diff --git a/.github/workflows/upgrade_downgrade_test_backups_e2e.yml b/.github/workflows/upgrade_downgrade_test_backups_e2e.yml
index aaa3cb42f9c..d7e0e213037 100644
--- a/.github/workflows/upgrade_downgrade_test_backups_e2e.yml
+++ b/.github/workflows/upgrade_downgrade_test_backups_e2e.yml
@@ -76,6 +76,7 @@ jobs:
go-version: 1.22.5
- name: Setup github.com/slackhq/vitess-additions access token
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: git config --global url.https://$GH_ACCESS_TOKEN@github.com/.insteadOf https://github.com/
- name: Set up python
@@ -104,9 +105,9 @@ jobs:
wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
sudo apt-get install -y gnupg2
sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release enable-only pxb-24
+ sudo percona-release enable-only tools
sudo apt-get update
- sudo apt-get install -y percona-xtrabackup-24
+ sudo apt-get install -y percona-xtrabackup-80
# Checkout to the last release of Vitess
- name: Check out other version's code (${{ steps.output-previous-release-ref.outputs.previous_release_ref }})
diff --git a/.github/workflows/upgrade_downgrade_test_backups_e2e_next_release.yml b/.github/workflows/upgrade_downgrade_test_backups_e2e_next_release.yml
index b497d4bbea0..326233a710f 100644
--- a/.github/workflows/upgrade_downgrade_test_backups_e2e_next_release.yml
+++ b/.github/workflows/upgrade_downgrade_test_backups_e2e_next_release.yml
@@ -100,9 +100,9 @@ jobs:
wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
sudo apt-get install -y gnupg2
sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release enable-only pxb-24
+ sudo percona-release enable-only tools
sudo apt-get update
- sudo apt-get install -y percona-xtrabackup-24
+ sudo apt-get install -y percona-xtrabackup-80
# Checkout to the next release of Vitess
- name: Check out other version's code (${{ steps.output-next-release-ref.outputs.next_release_ref }})
diff --git a/.github/workflows/upgrade_downgrade_test_backups_manual.yml b/.github/workflows/upgrade_downgrade_test_backups_manual.yml
index ab5021de227..0c006625510 100644
--- a/.github/workflows/upgrade_downgrade_test_backups_manual.yml
+++ b/.github/workflows/upgrade_downgrade_test_backups_manual.yml
@@ -80,6 +80,7 @@ jobs:
go-version: 1.22.5
- name: Setup github.com/slackhq/vitess-additions access token
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: git config --global url.https://$GH_ACCESS_TOKEN@github.com/.insteadOf https://github.com/
- name: Set up python
@@ -126,9 +127,9 @@ jobs:
wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
sudo apt-get install -y gnupg2
sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release enable-only pxb-24
+ sudo percona-release enable-only tools
sudo apt-get update
- sudo apt-get install -y percona-xtrabackup-24
+ sudo apt-get install -y percona-xtrabackup-80
# Checkout to the last release of Vitess
- name: Checkout to the other version's code (${{ steps.output-previous-release-ref.outputs.previous_release_ref }})
diff --git a/.github/workflows/upgrade_downgrade_test_backups_manual_next_release.yml b/.github/workflows/upgrade_downgrade_test_backups_manual_next_release.yml
index 819c1f8c0c9..7a14447608a 100644
--- a/.github/workflows/upgrade_downgrade_test_backups_manual_next_release.yml
+++ b/.github/workflows/upgrade_downgrade_test_backups_manual_next_release.yml
@@ -121,9 +121,9 @@ jobs:
wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
sudo apt-get install -y gnupg2
sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release enable-only pxb-24
+ sudo percona-release enable-only tools
sudo apt-get update
- sudo apt-get install -y percona-xtrabackup-24
+ sudo apt-get install -y percona-xtrabackup-80
# Checkout to the next release of Vitess
- name: Checkout to the other version's code (${{ steps.output-next-release-ref.outputs.next_release_ref }})
diff --git a/.github/workflows/upgrade_downgrade_test_query_serving_queries.yml b/.github/workflows/upgrade_downgrade_test_query_serving_queries.yml
index 6eb117d1a2b..93daba76d6d 100644
--- a/.github/workflows/upgrade_downgrade_test_query_serving_queries.yml
+++ b/.github/workflows/upgrade_downgrade_test_query_serving_queries.yml
@@ -79,6 +79,7 @@ jobs:
go-version: 1.22.5
- name: Setup github.com/slackhq/vitess-additions access token
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: git config --global url.https://$GH_ACCESS_TOKEN@github.com/.insteadOf https://github.com/
- name: Set up python
diff --git a/.github/workflows/upgrade_downgrade_test_query_serving_schema.yml b/.github/workflows/upgrade_downgrade_test_query_serving_schema.yml
index 73eae4acaac..af0084315a9 100644
--- a/.github/workflows/upgrade_downgrade_test_query_serving_schema.yml
+++ b/.github/workflows/upgrade_downgrade_test_query_serving_schema.yml
@@ -79,6 +79,7 @@ jobs:
go-version: 1.22.5
- name: Setup github.com/slackhq/vitess-additions access token
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: git config --global url.https://$GH_ACCESS_TOKEN@github.com/.insteadOf https://github.com/
- name: Set up python
diff --git a/.github/workflows/upgrade_downgrade_test_reparent_new_vttablet.yml b/.github/workflows/upgrade_downgrade_test_reparent_new_vttablet.yml
index e6b61526419..f67a3214d24 100644
--- a/.github/workflows/upgrade_downgrade_test_reparent_new_vttablet.yml
+++ b/.github/workflows/upgrade_downgrade_test_reparent_new_vttablet.yml
@@ -118,9 +118,9 @@ jobs:
wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
sudo apt-get install -y gnupg2
sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release enable-only pxb-24
+ sudo percona-release enable-only tools
sudo apt-get update
- sudo apt-get install -y percona-xtrabackup-24
+ sudo apt-get install -y percona-xtrabackup-80
# Checkout to the next release of Vitess
- name: Check out other version's code (${{ steps.output-next-release-ref.outputs.next_release_ref }})
diff --git a/.github/workflows/upgrade_downgrade_test_reparent_old_vtctl.yml b/.github/workflows/upgrade_downgrade_test_reparent_old_vtctl.yml
index 8d5ee34b85e..b05631a4862 100644
--- a/.github/workflows/upgrade_downgrade_test_reparent_old_vtctl.yml
+++ b/.github/workflows/upgrade_downgrade_test_reparent_old_vtctl.yml
@@ -79,6 +79,7 @@ jobs:
go-version: 1.22.5
- name: Setup github.com/slackhq/vitess-additions access token
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: git config --global url.https://$GH_ACCESS_TOKEN@github.com/.insteadOf https://github.com/
- name: Set up python
diff --git a/.github/workflows/upgrade_downgrade_test_reparent_old_vttablet.yml b/.github/workflows/upgrade_downgrade_test_reparent_old_vttablet.yml
index 5ce2f6ac521..372f223f06a 100644
--- a/.github/workflows/upgrade_downgrade_test_reparent_old_vttablet.yml
+++ b/.github/workflows/upgrade_downgrade_test_reparent_old_vttablet.yml
@@ -79,6 +79,7 @@ jobs:
go-version: 1.22.5
- name: Setup github.com/slackhq/vitess-additions access token
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: git config --global url.https://$GH_ACCESS_TOKEN@github.com/.insteadOf https://github.com/
- name: Set up python
diff --git a/.github/workflows/vitess_tester_vtgate.yml b/.github/workflows/vitess_tester_vtgate.yml
new file mode 100644
index 00000000000..63d7332fc39
--- /dev/null
+++ b/.github/workflows/vitess_tester_vtgate.yml
@@ -0,0 +1,178 @@
+# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows"
+
+name: Vitess Tester (vtgate)
+on: [push, pull_request]
+concurrency:
+ group: format('{0}-{1}', ${{ github.ref }}, 'Vitess Tester (vtgate)')
+ cancel-in-progress: true
+
+permissions: read-all
+
+env:
+ LAUNCHABLE_ORGANIZATION: "vitess"
+ LAUNCHABLE_WORKSPACE: "vitess-app"
+ GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}"
+ GOPRIVATE: github.com/slackhq/vitess-addons
+ GH_ACCESS_TOKEN: "${{`{{ secrets.GH_ACCESS_TOKEN }}`}}"
+
+jobs:
+ build:
+ name: Run endtoend tests on Vitess Tester (vtgate)
+ runs-on:
+ group: vitess-ubuntu20
+
+ steps:
+ - name: Skip CI
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then
+ echo "skipping CI due to the 'Skip CI' label"
+ exit 1
+ fi
+
+ - name: Check if workflow needs to be skipped
+ id: skip-workflow
+ run: |
+ skip='false'
+ if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
+ skip='true'
+ fi
+ echo Skip ${skip}
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
+
+ PR_DATA=$(curl -s\
+ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
+ -H "Accept: application/vnd.github.v3+json" \
+ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}")
+ draft=$(echo "$PR_DATA" | jq .draft -r)
+ echo "is_draft=${draft}" >> $GITHUB_OUTPUT
+
+ - name: Check out code
+ if: steps.skip-workflow.outputs.skip-workflow == 'false'
+ uses: actions/checkout@v4
+
+ - name: Check for changes in relevant files
+ if: steps.skip-workflow.outputs.skip-workflow == 'false'
+ uses: dorny/paths-filter@v3.0.1
+ id: changes
+ with:
+ token: ''
+ filters: |
+ end_to_end:
+ - 'go/**/*.go'
+ - 'go/vt/sidecardb/**/*.sql'
+ - 'go/test/endtoend/onlineddl/vrepl_suite/**'
+ - 'test.go'
+ - 'Makefile'
+ - 'build.env'
+ - 'go.sum'
+ - 'go.mod'
+ - 'proto/*.proto'
+ - 'tools/**'
+ - 'config/**'
+ - 'bootstrap.sh'
+ - '.github/workflows/vitess_tester_vtgate.yml'
+
+ - name: Set up Go
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ uses: actions/setup-go@v5
+ with:
+ go-version: 1.22.5
+
+ - name: Setup github.com/slackhq/vitess-additions access token
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ run: git config --global url.https://$GH_ACCESS_TOKEN@github.com/.insteadOf https://github.com/
+
+ - name: Set up python
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ uses: actions/setup-python@v5
+
+ - name: Tune the OS
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ run: |
+ # Limit local port range to not use ports that overlap with server side
+ # ports that we listen on.
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535"
+ # Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio
+ echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf
+ sudo sysctl -p /etc/sysctl.conf
+
+ - name: Get dependencies
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ run: |
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
+ sudo apt-get -qq update
+ # Install everything else we need, and configure
+ sudo apt-get -qq install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5
+
+ sudo service mysql stop
+ sudo service etcd stop
+ sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
+ sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld
+ go mod download
+
+ # install JUnit report formatter
+ go install github.com/vitessio/go-junit-report@HEAD
+
+ # install vitess tester
+ go install github.com/vitessio/vitess-tester@eb953122baba163ed8ccaa6642458ee984f5d7e4
+
+ - name: Setup launchable dependencies
+ if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
+ run: |
+ # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
+ pip3 install --user launchable~=1.0 > /dev/null
+
+ # verify that launchable setup is all correct.
+ launchable verify || true
+
+ # Tell Launchable about the build you are producing and testing
+ launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source .
+
+ - name: Run cluster endtoend test
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ timeout-minutes: 45
+ run: |
+ # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
+ # which musn't be more than 107 characters long.
+ export VTDATAROOT="/tmp/"
+ source build.env
+ make build
+
+ set -exo pipefail
+
+ i=1
+ for dir in ./go/test/endtoend/vtgate/vitess_tester/*/; do
+ # We go over all the directories in the given path.
+ # If there is a vschema file there, we use it, otherwise we let vitess-tester autogenerate it.
+ if [ -f $dir/vschema.json ]; then
+ vitess-tester --sharded --xunit --test-dir $dir --vschema "$dir"vschema.json
+ else
+ vitess-tester --sharded --xunit --test-dir $dir
+ fi
+ # Number the reports by changing their file names.
+ mv report.xml report"$i".xml
+ i=$((i+1))
+ done
+
+ - name: Print test output and Record test result in launchable if PR is not a draft
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
+ run: |
+ if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then
+ # send recorded tests to launchable
+ launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ fi
+
+ # print test output
+ cat report*.xml
+
+ - name: Test Summary
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
+ uses: test-summary/action@v2
+ with:
+ paths: "report*.xml"
+ show: "fail, skip"
diff --git a/.gitignore b/.gitignore
index 881e89890cc..1676eb498d0 100644
--- a/.gitignore
+++ b/.gitignore
@@ -86,3 +86,4 @@ report
# plan test output
/go/vt/vtgate/planbuilder/testdata/plan_test*
+/go/vt/vtgate/planbuilder/testdata/expected
diff --git a/changelog/19.0/19.0.0/release_notes.md b/changelog/19.0/19.0.0/release_notes.md
index 98603d4240a..0a3f33f0fd9 100644
--- a/changelog/19.0/19.0.0/release_notes.md
+++ b/changelog/19.0/19.0.0/release_notes.md
@@ -53,6 +53,10 @@ Vitess will however, continue to support importing from MySQL 5.7 into Vitess ev
#### Docker Image vitess/lite
+> [!CAUTION]
+> If you are using incremental backups, you must remain on the `vitess/lite` image, as the official MySQL image does not have `mysqlbinlog` installed.
+> See https://github.com/vitessio/vitess/issues/16281 for more information.
+
The `mysqld` binary is now deprecated in the `vitess/lite` Docker image and will be removed in a future release.
This means that the MySQL/Percona version specific image tags for the `vitess/lite` image are deprecated.
diff --git a/changelog/19.0/19.0.5/changelog.md b/changelog/19.0/19.0.5/changelog.md
new file mode 100644
index 00000000000..4d0a520059d
--- /dev/null
+++ b/changelog/19.0/19.0.5/changelog.md
@@ -0,0 +1,71 @@
+# Changelog of Vitess v19.0.5
+
+### Bug fixes
+#### Cluster management
+ * [release-19.0] Use default schema reload config values when config file is empty (#16393) [#16410](https://github.com/vitessio/vitess/pull/16410)
+#### Docker
+ * [release-19.0] Fix the install dependencies script in Docker (#16340) [#16346](https://github.com/vitessio/vitess/pull/16346)
+#### Documentation
+ * [release-19.0] Fix the `v19.0.0` release notes and use the `vitess/lite` image for the MySQL container (#16282) [#16285](https://github.com/vitessio/vitess/pull/16285)
+#### Online DDL
+ * [release-19.0] Online DDL shadow table: rename referenced table name in self referencing FK (#16205) [#16207](https://github.com/vitessio/vitess/pull/16207)
+#### Query Serving
+ * [release-19.0] fix: handle info_schema routing (#15899) [#15906](https://github.com/vitessio/vitess/pull/15906)
+ * [release-19.0] connpool: Allow time out during shutdown (#15979) [#16003](https://github.com/vitessio/vitess/pull/16003)
+ * [release-19.0] fix: remove keyspace when merging subqueries (#16019) [#16027](https://github.com/vitessio/vitess/pull/16027)
+ * [release-19.0] Handle Nullability for Columns from Outer Tables (#16174) [#16185](https://github.com/vitessio/vitess/pull/16185)
+ * [release-19.0] Fix vtgate crash in group concat [#16254](https://github.com/vitessio/vitess/pull/16254)
+ * [release-19.0] Fix Incorrect Optimization with LIMIT and GROUP BY (#16263) [#16267](https://github.com/vitessio/vitess/pull/16267)
+ * [release-19.0] planner: Handle ORDER BY inside derived tables (#16353) [#16359](https://github.com/vitessio/vitess/pull/16359)
+ * [release-19.0] fix issue with aggregation inside of derived tables (#16366) [#16384](https://github.com/vitessio/vitess/pull/16384)
+ * [release-19.0] Fix Join Predicate Cleanup Bug in Route Merging (#16386) [#16389](https://github.com/vitessio/vitess/pull/16389)
+ * [release-19.0] Fix panic in schema tracker in presence of keyspace routing rules (#16383) [#16406](https://github.com/vitessio/vitess/pull/16406)
+ * [release-19.0] Fix subquery planning having an aggregation that is used in order by as long as we can merge it all into a single route (#16402) [#16407](https://github.com/vitessio/vitess/pull/16407)
+#### VReplication
+ * [release-19.0] vtctldclient: Apply (Shard | Keyspace| Table) Routing Rules commands don't work (#16096) [#16124](https://github.com/vitessio/vitess/pull/16124)
+ * [release-19.0] VDiff CLI: Fix VDiff `show` bug (#16177) [#16198](https://github.com/vitessio/vitess/pull/16198)
+ * [release-19.0] VReplication Workflow: set state correctly when restarting workflow streams in the copy phase (#16217) [#16222](https://github.com/vitessio/vitess/pull/16222)
+ * [release-19.0] VReplication: Properly handle target shards w/o a primary in Reshard (#16283) [#16291](https://github.com/vitessio/vitess/pull/16291)
+#### VTorc
+ * [release-19.0] Add timeout to all the contexts used for RPC calls in vtorc (#15991) [#16103](https://github.com/vitessio/vitess/pull/16103)
+#### vtexplain
+ * [release-19.0] Fix `vtexplain` not handling `UNION` queries with `weight_string` results correctly. (#16129) [#16157](https://github.com/vitessio/vitess/pull/16157)
+### CI/Build
+#### Build/CI
+ * [release-19.0] Add DCO workflow (#16052) [#16056](https://github.com/vitessio/vitess/pull/16056)
+ * [release-19.0] Remove DCO workaround (#16087) [#16091](https://github.com/vitessio/vitess/pull/16091)
+ * [release-19.0] CI: Fix for xtrabackup install failures (#16329) [#16332](https://github.com/vitessio/vitess/pull/16332)
+#### General
+ * [release-19.0] Upgrade the Golang version to `go1.22.4` [#16061](https://github.com/vitessio/vitess/pull/16061)
+ * [release-19.0] Upgrade the Golang version to `go1.22.5` [#16322](https://github.com/vitessio/vitess/pull/16322)
+#### VTAdmin
+ * [release-19.0] Update VTAdmin build script (#15839) [#15850](https://github.com/vitessio/vitess/pull/15850)
+### Dependencies
+#### VTAdmin
+ * [release-19.0] Update braces package (#16115) [#16118](https://github.com/vitessio/vitess/pull/16118)
+### Internal Cleanup
+#### Examples
+ * [release-19.0] Update env.sh so that is does not error when running on Mac (#15835) [#15915](https://github.com/vitessio/vitess/pull/15915)
+### Performance
+#### VTTablet
+ * [release-19.0] Do not load table stats when booting `vttablet`. (#15715) [#16100](https://github.com/vitessio/vitess/pull/16100)
+### Regression
+#### Query Serving
+ * [release-19.0] fix: derived table join column expression to be part of add join predicate on rewrite (#15956) [#15960](https://github.com/vitessio/vitess/pull/15960)
+ * [release-19.0] fix: insert on duplicate update to add list argument in the bind variables map (#15961) [#15967](https://github.com/vitessio/vitess/pull/15967)
+ * [release-19.0] fix: order by subquery planning (#16049) [#16132](https://github.com/vitessio/vitess/pull/16132)
+ * [release-19.0] feat: add a LIMIT 1 on EXISTS subqueries to limit network overhead (#16153) [#16191](https://github.com/vitessio/vitess/pull/16191)
+### Release
+#### General
+ * [release-19.0] Bump to `v19.0.5-SNAPSHOT` after the `v19.0.4` release [#15889](https://github.com/vitessio/vitess/pull/15889)
+### Testing
+#### Build/CI
+ * Run more test on release-19 branch [#16152](https://github.com/vitessio/vitess/pull/16152)
+#### Query Serving
+ * [release-19.0] test: Cleaner plan tests output (#15922) [#15964](https://github.com/vitessio/vitess/pull/15964)
+ * [release-19] Vitess tester workflow (#16127) [#16418](https://github.com/vitessio/vitess/pull/16418)
+#### VTCombo
+ * [release-19.0] Fix flaky tests that use vtcombo (#16178) [#16212](https://github.com/vitessio/vitess/pull/16212)
+#### vtexplain
+ * [release-19.0] Fix flakiness in `vtexplain` unit test case. (#16159) [#16167](https://github.com/vitessio/vitess/pull/16167)
+
diff --git a/changelog/19.0/19.0.5/release_notes.md b/changelog/19.0/19.0.5/release_notes.md
new file mode 100644
index 00000000000..5df66930dd1
--- /dev/null
+++ b/changelog/19.0/19.0.5/release_notes.md
@@ -0,0 +1,7 @@
+# Release of Vitess v19.0.5
+The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/changelog/19.0/19.0.5/changelog.md).
+
+The release includes 40 merged Pull Requests.
+
+Thanks to all our contributors: @GuptaManan100, @app/vitess-bot, @harshit-gangal, @systay, @vitess-bot
+
diff --git a/changelog/19.0/README.md b/changelog/19.0/README.md
index b5c6489f101..ae90ef2df1b 100644
--- a/changelog/19.0/README.md
+++ b/changelog/19.0/README.md
@@ -1,4 +1,8 @@
## v19.0
+* **[19.0.5](19.0.5)**
+ * [Changelog](19.0.5/changelog.md)
+ * [Release Notes](19.0.5/release_notes.md)
+
* **[19.0.4](19.0.4)**
* [Changelog](19.0.4/changelog.md)
* [Release Notes](19.0.4/release_notes.md)
diff --git a/docker/bootstrap/Dockerfile.mysql57 b/docker/bootstrap/Dockerfile.mysql57
index 4d79be9d3ec..d523241f499 100644
--- a/docker/bootstrap/Dockerfile.mysql57
+++ b/docker/bootstrap/Dockerfile.mysql57
@@ -14,6 +14,7 @@ RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-ins
echo percona-server-server-5.7 percona-server-server/root_password password 'unused'; \
echo percona-server-server-5.7 percona-server-server/root_password_again password 'unused'; \
} | debconf-set-selections && \
+ percona-release enable-only tools \
apt-get update -y && \
DEBIAN_FRONTEND=noninteractive apt-get install -y mysql-server libmysqlclient-dev libdbd-mysql-perl rsync libev4 percona-xtrabackup-24 && \
rm -rf /var/lib/apt/lists/*
diff --git a/docker/bootstrap/Dockerfile.percona57 b/docker/bootstrap/Dockerfile.percona57
index febe09fd8bf..f43c655b3d7 100644
--- a/docker/bootstrap/Dockerfile.percona57
+++ b/docker/bootstrap/Dockerfile.percona57
@@ -11,6 +11,7 @@ RUN for i in $(seq 1 10); do apt-key adv --no-tty --keyserver keyserver.ubuntu.c
echo percona-server-server-5.7 percona-server-server/root_password password 'unused'; \
echo percona-server-server-5.7 percona-server-server/root_password_again password 'unused'; \
} | debconf-set-selections && \
+ percona-release enable-only tools \
apt-get update && \
apt-get install -y --no-install-recommends percona-server-server-5.7 && \
apt-get install -y --no-install-recommends libperconaserverclient20-dev percona-xtrabackup-24 && \
diff --git a/examples/common/env.sh b/examples/common/env.sh
index 2416d0aa6c1..795e7005cad 100644
--- a/examples/common/env.sh
+++ b/examples/common/env.sh
@@ -82,6 +82,8 @@ mkdir -p "${VTDATAROOT}/tmp"
alias mysql="command mysql --no-defaults -h 127.0.0.1 -P 15306"
alias vtctldclient="command vtctldclient --server localhost:15999"
-# Make sure aliases are expanded in non-interactive shell
-shopt -s expand_aliases
+# If using bash, make sure aliases are expanded in non-interactive shell
+if [[ -n ${BASH} ]]; then
+ shopt -s expand_aliases
+fi
diff --git a/examples/common/scripts/vtadmin-up.sh b/examples/common/scripts/vtadmin-up.sh
index 292a71b99c5..356f6ac3880 100755
--- a/examples/common/scripts/vtadmin-up.sh
+++ b/examples/common/scripts/vtadmin-up.sh
@@ -14,6 +14,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+function output() {
+ echo -e "$@"
+}
+
script_dir="$(dirname "${BASH_SOURCE[0]:-$0}")"
source "${script_dir}/../env.sh"
@@ -24,11 +28,13 @@ web_dir="${script_dir}/../../../web/vtadmin"
vtadmin_api_port=14200
vtadmin_web_port=14201
-echo "vtadmin-api http-origin set to \"http://${hostname}:${vtadmin_web_port}\""
+case_insensitive_hostname=$(echo "$hostname" | tr '[:upper:]' '[:lower:]')
+
+output "\n\033[1;32mvtadmin-api expects vtadmin-web at, and set http-origin to \"http://${case_insensitive_hostname}:${vtadmin_web_port}\"\033[0m"
vtadmin \
- --addr "${hostname}:${vtadmin_api_port}" \
- --http-origin "http://${hostname}:${vtadmin_web_port}" \
+ --addr "${case_insensitive_hostname}:${vtadmin_api_port}" \
+ --http-origin "http://${case_insensitive_hostname}:${vtadmin_web_port}" \
--http-tablet-url-tmpl "http://{{ .Tablet.Hostname }}:15{{ .Tablet.Alias.Uid }}" \
--tracer "opentracing-jaeger" \
--grpc-tracing \
@@ -45,7 +51,7 @@ echo ${vtadmin_api_pid} > "${log_dir}/vtadmin-api.pid"
echo "\
vtadmin-api is running!
- - API: http://${hostname}:${vtadmin_api_port}
+ - API: http://${case_insensitive_hostname}:${vtadmin_api_port}
- Logs: ${log_dir}/vtadmin-api.out
- PID: ${vtadmin_api_pid}
"
@@ -56,7 +62,7 @@ source "${web_dir}/build.sh"
# Wait for vtadmin to successfully discover the cluster
expected_cluster_result="{\"result\":{\"clusters\":[{\"id\":\"${cluster_name}\",\"name\":\"${cluster_name}\"}]},\"ok\":true}"
for _ in {0..100}; do
- result=$(curl -s "http://${hostname}:${vtadmin_api_port}/api/clusters")
+ result=$(curl -s "http://${case_insensitive_hostname}:${vtadmin_api_port}/api/clusters")
if [[ ${result} == "${expected_cluster_result}" ]]; then
break
fi
@@ -64,7 +70,7 @@ for _ in {0..100}; do
done
# Check one last time
-[[ $(curl -s "http://${hostname}:${vtadmin_api_port}/api/clusters") == "${expected_cluster_result}" ]] || fail "vtadmin failed to discover the running example Vitess cluster."
+[[ $(curl -s "http://${case_insensitive_hostname}:${vtadmin_api_port}/api/clusters") == "${expected_cluster_result}" ]] || fail "vtadmin failed to discover the running example Vitess cluster."
[[ ! -d "$web_dir/build" ]] && fail "Please make sure the VTAdmin files are built in $web_dir/build, using 'make build'"
@@ -76,7 +82,7 @@ echo ${vtadmin_web_pid} > "${log_dir}/vtadmin-web.pid"
echo "\
vtadmin-web is running!
- - Browser: http://${hostname}:${vtadmin_web_port}
+ - Browser: http://${case_insensitive_hostname}:${vtadmin_web_port}
- Logs: ${log_dir}/vtadmin-web.out
- PID: ${vtadmin_web_pid}
"
diff --git a/examples/compose/docker-compose.beginners.yml b/examples/compose/docker-compose.beginners.yml
index 028fe95a745..2e816d6a1c1 100644
--- a/examples/compose/docker-compose.beginners.yml
+++ b/examples/compose/docker-compose.beginners.yml
@@ -58,7 +58,7 @@ services:
- "3306"
vtctld:
- image: vitess/lite:v19.0.4
+ image: vitess/lite:v19.0.5
ports:
- "15000:$WEB_PORT"
- "$GRPC_PORT"
@@ -81,7 +81,7 @@ services:
condition: service_healthy
vtgate:
- image: vitess/lite:v19.0.4
+ image: vitess/lite:v19.0.5
ports:
- "15099:$WEB_PORT"
- "$GRPC_PORT"
@@ -111,7 +111,7 @@ services:
condition: service_healthy
schemaload:
- image: vitess/lite:v19.0.4
+ image: vitess/lite:v19.0.5
command:
- sh
- -c
@@ -144,12 +144,12 @@ services:
environment:
- KEYSPACES=$KEYSPACE
- GRPC_PORT=15999
- image: vitess/lite:v19.0.4
+ image: vitess/lite:v19.0.5
volumes:
- .:/script
vttablet100:
- image: vitess/lite:v19.0.4
+ image: vitess/lite:v19.0.5
ports:
- "15100:$WEB_PORT"
- "$GRPC_PORT"
@@ -181,7 +181,7 @@ services:
retries: 15
vttablet101:
- image: vitess/lite:v19.0.4
+ image: vitess/lite:v19.0.5
ports:
- "15101:$WEB_PORT"
- "$GRPC_PORT"
@@ -213,7 +213,7 @@ services:
retries: 15
vttablet102:
- image: vitess/lite:v19.0.4
+ image: vitess/lite:v19.0.5
ports:
- "15102:$WEB_PORT"
- "$GRPC_PORT"
@@ -245,7 +245,7 @@ services:
retries: 15
vttablet103:
- image: vitess/lite:v19.0.4
+ image: vitess/lite:v19.0.5
ports:
- "15103:$WEB_PORT"
- "$GRPC_PORT"
@@ -277,7 +277,7 @@ services:
retries: 15
vtorc:
- image: vitess/lite:v19.0.4
+ image: vitess/lite:v19.0.5
command: ["sh", "-c", "/script/vtorc-up.sh"]
depends_on:
- vtctld
@@ -307,7 +307,7 @@ services:
retries: 15
vreplication:
- image: vitess/lite:v19.0.4
+ image: vitess/lite:v19.0.5
volumes:
- ".:/script"
environment:
diff --git a/examples/compose/docker-compose.yml b/examples/compose/docker-compose.yml
index 84c2c2ffca4..8626e6f3c85 100644
--- a/examples/compose/docker-compose.yml
+++ b/examples/compose/docker-compose.yml
@@ -75,7 +75,7 @@ services:
- SCHEMA_FILES=lookup_keyspace_schema_file.sql
- POST_LOAD_FILE=
- EXTERNAL_DB=0
- image: vitess/lite:v19.0.4
+ image: vitess/lite:v19.0.5
volumes:
- .:/script
schemaload_test_keyspace:
@@ -101,7 +101,7 @@ services:
- SCHEMA_FILES=test_keyspace_schema_file.sql
- POST_LOAD_FILE=
- EXTERNAL_DB=0
- image: vitess/lite:v19.0.4
+ image: vitess/lite:v19.0.5
volumes:
- .:/script
set_keyspace_durability_policy:
@@ -115,7 +115,7 @@ services:
environment:
- KEYSPACES=test_keyspace lookup_keyspace
- GRPC_PORT=15999
- image: vitess/lite:v19.0.4
+ image: vitess/lite:v19.0.5
volumes:
- .:/script
vreplication:
@@ -129,7 +129,7 @@ services:
- TOPOLOGY_FLAGS=--topo_implementation consul --topo_global_server_address consul1:8500
--topo_global_root vitess/global
- EXTERNAL_DB=0
- image: vitess/lite:v19.0.4
+ image: vitess/lite:v19.0.5
volumes:
- .:/script
vtctld:
@@ -143,7 +143,7 @@ services:
depends_on:
external_db_host:
condition: service_healthy
- image: vitess/lite:v19.0.4
+ image: vitess/lite:v19.0.5
ports:
- 15000:8080
- "15999"
@@ -160,7 +160,7 @@ services:
--normalize_queries=true '
depends_on:
- vtctld
- image: vitess/lite:v19.0.4
+ image: vitess/lite:v19.0.5
ports:
- 15099:8080
- "15999"
@@ -182,7 +182,7 @@ services:
- EXTERNAL_DB=0
- DB_USER=
- DB_PASS=
- image: vitess/lite:v19.0.4
+ image: vitess/lite:v19.0.5
ports:
- 13000:8080
volumes:
@@ -217,7 +217,7 @@ services:
- CMD-SHELL
- curl -s --fail --show-error localhost:8080/debug/health
timeout: 10s
- image: vitess/lite:v19.0.4
+ image: vitess/lite:v19.0.5
ports:
- 15101:8080
- "15999"
@@ -254,7 +254,7 @@ services:
- CMD-SHELL
- curl -s --fail --show-error localhost:8080/debug/health
timeout: 10s
- image: vitess/lite:v19.0.4
+ image: vitess/lite:v19.0.5
ports:
- 15102:8080
- "15999"
@@ -291,7 +291,7 @@ services:
- CMD-SHELL
- curl -s --fail --show-error localhost:8080/debug/health
timeout: 10s
- image: vitess/lite:v19.0.4
+ image: vitess/lite:v19.0.5
ports:
- 15201:8080
- "15999"
@@ -328,7 +328,7 @@ services:
- CMD-SHELL
- curl -s --fail --show-error localhost:8080/debug/health
timeout: 10s
- image: vitess/lite:v19.0.4
+ image: vitess/lite:v19.0.5
ports:
- 15202:8080
- "15999"
@@ -365,7 +365,7 @@ services:
- CMD-SHELL
- curl -s --fail --show-error localhost:8080/debug/health
timeout: 10s
- image: vitess/lite:v19.0.4
+ image: vitess/lite:v19.0.5
ports:
- 15301:8080
- "15999"
@@ -402,7 +402,7 @@ services:
- CMD-SHELL
- curl -s --fail --show-error localhost:8080/debug/health
timeout: 10s
- image: vitess/lite:v19.0.4
+ image: vitess/lite:v19.0.5
ports:
- 15302:8080
- "15999"
diff --git a/examples/compose/vtcompose/docker-compose.test.yml b/examples/compose/vtcompose/docker-compose.test.yml
index f03a36bd344..f4abaad543c 100644
--- a/examples/compose/vtcompose/docker-compose.test.yml
+++ b/examples/compose/vtcompose/docker-compose.test.yml
@@ -79,7 +79,7 @@ services:
- SCHEMA_FILES=test_keyspace_schema_file.sql
- POST_LOAD_FILE=
- EXTERNAL_DB=0
- image: vitess/lite:v19.0.4
+ image: vitess/lite:v19.0.5
volumes:
- .:/script
schemaload_unsharded_keyspace:
@@ -103,7 +103,7 @@ services:
- SCHEMA_FILES=unsharded_keyspace_schema_file.sql
- POST_LOAD_FILE=
- EXTERNAL_DB=0
- image: vitess/lite:v19.0.4
+ image: vitess/lite:v19.0.5
volumes:
- .:/script
set_keyspace_durability_policy_test_keyspace:
@@ -117,7 +117,7 @@ services:
environment:
- GRPC_PORT=15999
- KEYSPACES=test_keyspace
- image: vitess/lite:v19.0.4
+ image: vitess/lite:v19.0.5
volumes:
- .:/script
set_keyspace_durability_policy_unsharded_keyspace:
@@ -130,7 +130,7 @@ services:
environment:
- GRPC_PORT=15999
- KEYSPACES=unsharded_keyspace
- image: vitess/lite:v19.0.4
+ image: vitess/lite:v19.0.5
volumes:
- .:/script
vreplication:
@@ -144,7 +144,7 @@ services:
- TOPOLOGY_FLAGS=--topo_implementation consul --topo_global_server_address consul1:8500
--topo_global_root vitess/global
- EXTERNAL_DB=0
- image: vitess/lite:v19.0.4
+ image: vitess/lite:v19.0.5
volumes:
- .:/script
vtctld:
@@ -159,7 +159,7 @@ services:
depends_on:
external_db_host:
condition: service_healthy
- image: vitess/lite:v19.0.4
+ image: vitess/lite:v19.0.5
ports:
- 15000:8080
- "15999"
@@ -176,7 +176,7 @@ services:
''grpc-vtgateservice'' --normalize_queries=true '
depends_on:
- vtctld
- image: vitess/lite:v19.0.4
+ image: vitess/lite:v19.0.5
ports:
- 15099:8080
- "15999"
@@ -199,7 +199,7 @@ services:
- EXTERNAL_DB=0
- DB_USER=
- DB_PASS=
- image: vitess/lite:v19.0.4
+ image: vitess/lite:v19.0.5
ports:
- 13000:8080
volumes:
@@ -234,7 +234,7 @@ services:
- CMD-SHELL
- curl -s --fail --show-error localhost:8080/debug/health
timeout: 10s
- image: vitess/lite:v19.0.4
+ image: vitess/lite:v19.0.5
ports:
- 15101:8080
- "15999"
@@ -271,7 +271,7 @@ services:
- CMD-SHELL
- curl -s --fail --show-error localhost:8080/debug/health
timeout: 10s
- image: vitess/lite:v19.0.4
+ image: vitess/lite:v19.0.5
ports:
- 15102:8080
- "15999"
@@ -308,7 +308,7 @@ services:
- CMD-SHELL
- curl -s --fail --show-error localhost:8080/debug/health
timeout: 10s
- image: vitess/lite:v19.0.4
+ image: vitess/lite:v19.0.5
ports:
- 15201:8080
- "15999"
@@ -345,7 +345,7 @@ services:
- CMD-SHELL
- curl -s --fail --show-error localhost:8080/debug/health
timeout: 10s
- image: vitess/lite:v19.0.4
+ image: vitess/lite:v19.0.5
ports:
- 15202:8080
- "15999"
@@ -382,7 +382,7 @@ services:
- CMD-SHELL
- curl -s --fail --show-error localhost:8080/debug/health
timeout: 10s
- image: vitess/lite:v19.0.4
+ image: vitess/lite:v19.0.5
ports:
- 15301:8080
- "15999"
diff --git a/examples/compose/vtcompose/vtcompose.go b/examples/compose/vtcompose/vtcompose.go
index e5436c26282..25a1a19bce5 100644
--- a/examples/compose/vtcompose/vtcompose.go
+++ b/examples/compose/vtcompose/vtcompose.go
@@ -533,7 +533,7 @@ func generateDefaultShard(tabAlias int, shard string, keyspaceData keyspaceInfo,
- op: add
path: /services/init_shard_primary%[2]d
value:
- image: vitess/lite:v19.0.4
+ image: vitess/lite:v19.0.5
command: ["sh", "-c", "/vt/bin/vtctldclient %[5]s InitShardPrimary --force %[4]s/%[3]s %[6]s-%[2]d "]
%[1]s
`, dependsOn, aliases[0], shard, keyspaceData.keyspace, opts.topologyFlags, opts.cell)
@@ -565,7 +565,7 @@ func generateExternalPrimary(
- op: add
path: /services/vttablet%[1]d
value:
- image: vitess/lite:v19.0.4
+ image: vitess/lite:v19.0.5
ports:
- "15%[1]d:%[3]d"
- "%[4]d"
@@ -627,7 +627,7 @@ func generateDefaultTablet(tabAlias int, shard, role, keyspace string, dbInfo ex
- op: add
path: /services/vttablet%[1]d
value:
- image: vitess/lite:v19.0.4
+ image: vitess/lite:v19.0.5
ports:
- "15%[1]d:%[4]d"
- "%[5]d"
@@ -665,7 +665,7 @@ func generateVtctld(opts vtOptions) string {
- op: add
path: /services/vtctld
value:
- image: vitess/lite:v19.0.4
+ image: vitess/lite:v19.0.5
ports:
- "15000:%[1]d"
- "%[2]d"
@@ -696,7 +696,7 @@ func generateVtgate(opts vtOptions) string {
- op: add
path: /services/vtgate
value:
- image: vitess/lite:v19.0.4
+ image: vitess/lite:v19.0.5
ports:
- "15099:%[1]d"
- "%[2]d"
@@ -738,7 +738,7 @@ func generateVTOrc(dbInfo externalDbInfo, keyspaceInfoMap map[string]keyspaceInf
- op: add
path: /services/vtorc
value:
- image: vitess/lite:v19.0.4
+ image: vitess/lite:v19.0.5
volumes:
- ".:/script"
environment:
@@ -763,7 +763,7 @@ func generateVreplication(dbInfo externalDbInfo, opts vtOptions) string {
- op: add
path: /services/vreplication
value:
- image: vitess/lite:v19.0.4
+ image: vitess/lite:v19.0.5
volumes:
- ".:/script"
environment:
@@ -791,7 +791,7 @@ func generateSetKeyspaceDurabilityPolicy(
- op: add
path: /services/set_keyspace_durability_policy_%[3]s
value:
- image: vitess/lite:v19.0.4
+ image: vitess/lite:v19.0.5
volumes:
- ".:/script"
environment:
@@ -828,7 +828,7 @@ func generateSchemaload(
- op: add
path: /services/schemaload_%[7]s
value:
- image: vitess/lite:v19.0.4
+ image: vitess/lite:v19.0.5
volumes:
- ".:/script"
environment:
diff --git a/examples/operator/101_initial_cluster.yaml b/examples/operator/101_initial_cluster.yaml
index 0c4faa6644a..4c4d92f1f1f 100644
--- a/examples/operator/101_initial_cluster.yaml
+++ b/examples/operator/101_initial_cluster.yaml
@@ -8,14 +8,14 @@ metadata:
name: example
spec:
images:
- vtctld: vitess/lite:v19.0.4
- vtadmin: vitess/vtadmin:v19.0.4
- vtgate: vitess/lite:v19.0.4
- vttablet: vitess/lite:v19.0.4
- vtbackup: vitess/lite:v19.0.4
- vtorc: vitess/lite:v19.0.4
+ vtctld: vitess/lite:v19.0.5
+ vtadmin: vitess/vtadmin:v19.0.5
+ vtgate: vitess/lite:v19.0.5
+ vttablet: vitess/lite:v19.0.5
+ vtbackup: vitess/lite:v19.0.5
+ vtorc: vitess/lite:v19.0.5
mysqld:
- mysql80Compatible: mysql:8.0.30
+ mysql80Compatible: vitess/lite:v19.0.5
mysqldExporter: prom/mysqld-exporter:v0.11.0
cells:
- name: zone1
diff --git a/examples/operator/201_customer_tablets.yaml b/examples/operator/201_customer_tablets.yaml
index b3f06d7efd5..d49cec49120 100644
--- a/examples/operator/201_customer_tablets.yaml
+++ b/examples/operator/201_customer_tablets.yaml
@@ -4,14 +4,14 @@ metadata:
name: example
spec:
images:
- vtctld: vitess/lite:v19.0.4
- vtadmin: vitess/vtadmin:v19.0.4
- vtgate: vitess/lite:v19.0.4
- vttablet: vitess/lite:v19.0.4
- vtbackup: vitess/lite:v19.0.4
- vtorc: vitess/lite:v19.0.4
+ vtctld: vitess/lite:v19.0.5
+ vtadmin: vitess/vtadmin:v19.0.5
+ vtgate: vitess/lite:v19.0.5
+ vttablet: vitess/lite:v19.0.5
+ vtbackup: vitess/lite:v19.0.5
+ vtorc: vitess/lite:v19.0.5
mysqld:
- mysql80Compatible: mysql:8.0.30
+ mysql80Compatible: vitess/lite:v19.0.5
mysqldExporter: prom/mysqld-exporter:v0.11.0
cells:
- name: zone1
diff --git a/examples/operator/302_new_shards.yaml b/examples/operator/302_new_shards.yaml
index 457ede80f74..5a0e8e141d1 100644
--- a/examples/operator/302_new_shards.yaml
+++ b/examples/operator/302_new_shards.yaml
@@ -4,14 +4,14 @@ metadata:
name: example
spec:
images:
- vtctld: vitess/lite:v19.0.4
- vtadmin: vitess/vtadmin:v19.0.4
- vtgate: vitess/lite:v19.0.4
- vttablet: vitess/lite:v19.0.4
- vtbackup: vitess/lite:v19.0.4
- vtorc: vitess/lite:v19.0.4
+ vtctld: vitess/lite:v19.0.5
+ vtadmin: vitess/vtadmin:v19.0.5
+ vtgate: vitess/lite:v19.0.5
+ vttablet: vitess/lite:v19.0.5
+ vtbackup: vitess/lite:v19.0.5
+ vtorc: vitess/lite:v19.0.5
mysqld:
- mysql80Compatible: mysql:8.0.30
+ mysql80Compatible: vitess/lite:v19.0.5
mysqldExporter: prom/mysqld-exporter:v0.11.0
cells:
- name: zone1
diff --git a/examples/operator/306_down_shard_0.yaml b/examples/operator/306_down_shard_0.yaml
index 7451128fdbe..1b28fe76bc6 100644
--- a/examples/operator/306_down_shard_0.yaml
+++ b/examples/operator/306_down_shard_0.yaml
@@ -4,14 +4,14 @@ metadata:
name: example
spec:
images:
- vtctld: vitess/lite:v19.0.4
- vtadmin: vitess/vtadmin:v19.0.4
- vtgate: vitess/lite:v19.0.4
- vttablet: vitess/lite:v19.0.4
- vtbackup: vitess/lite:v19.0.4
- vtorc: vitess/lite:v19.0.4
+ vtctld: vitess/lite:v19.0.5
+ vtadmin: vitess/vtadmin:v19.0.5
+ vtgate: vitess/lite:v19.0.5
+ vttablet: vitess/lite:v19.0.5
+ vtbackup: vitess/lite:v19.0.5
+ vtorc: vitess/lite:v19.0.5
mysqld:
- mysql80Compatible: mysql:8.0.30
+ mysql80Compatible: vitess/lite:v19.0.5
mysqldExporter: prom/mysqld-exporter:v0.11.0
cells:
- name: zone1
diff --git a/go/cmd/vtctldclient/command/routing_rules.go b/go/cmd/vtctldclient/command/routing_rules.go
index 0ffee0c2c24..8a228589098 100644
--- a/go/cmd/vtctldclient/command/routing_rules.go
+++ b/go/cmd/vtctldclient/command/routing_rules.go
@@ -82,7 +82,7 @@ func commandApplyRoutingRules(cmd *cobra.Command, args []string) error {
}
rr := &vschemapb.RoutingRules{}
- if err := json2.Unmarshal(rulesBytes, &rr); err != nil {
+ if err := json2.UnmarshalPB(rulesBytes, rr); err != nil {
return err
}
diff --git a/go/cmd/vtctldclient/command/shard_routing_rules.go b/go/cmd/vtctldclient/command/shard_routing_rules.go
index 10ce7e81747..2214269d0f3 100644
--- a/go/cmd/vtctldclient/command/shard_routing_rules.go
+++ b/go/cmd/vtctldclient/command/shard_routing_rules.go
@@ -87,7 +87,7 @@ func commandApplyShardRoutingRules(cmd *cobra.Command, args []string) error {
}
srr := &vschemapb.ShardRoutingRules{}
- if err := json2.Unmarshal(rulesBytes, &srr); err != nil {
+ if err := json2.UnmarshalPB(rulesBytes, srr); err != nil {
return err
}
// Round-trip so when we display the result it's readable.
diff --git a/go/cmd/vtctldclient/command/vreplication/vdiff/vdiff.go b/go/cmd/vtctldclient/command/vreplication/vdiff/vdiff.go
index bce6e18ddbb..a1e6d1ae57f 100644
--- a/go/cmd/vtctldclient/command/vreplication/vdiff/vdiff.go
+++ b/go/cmd/vtctldclient/command/vreplication/vdiff/vdiff.go
@@ -462,7 +462,6 @@ func getStructFieldNames(s any) []string {
}
func buildListings(listings []*listing) string {
- var values []string
var lines [][]string
var result string
@@ -474,6 +473,7 @@ func buildListings(listings []*listing) string {
// The header is the first row.
lines = append(lines, fields)
for _, listing := range listings {
+ var values []string
v := reflect.ValueOf(*listing)
for _, field := range fields {
values = append(values, v.FieldByName(field).String())
diff --git a/go/cmd/vttestserver/cli/main_test.go b/go/cmd/vttestserver/cli/main_test.go
index dbaf256c806..7d3c5e84b23 100644
--- a/go/cmd/vttestserver/cli/main_test.go
+++ b/go/cmd/vttestserver/cli/main_test.go
@@ -60,7 +60,7 @@ func TestRunsVschemaMigrations(t *testing.T) {
cluster, err := startCluster()
defer cluster.TearDown()
- assert.NoError(t, err)
+ require.NoError(t, err)
assertColumnVindex(t, cluster, columnVindex{keyspace: "test_keyspace", table: "test_table", vindex: "my_vdx", vindexType: "hash", column: "id"})
assertColumnVindex(t, cluster, columnVindex{keyspace: "app_customer", table: "customers", vindex: "hash", vindexType: "hash", column: "id"})
@@ -77,7 +77,7 @@ func TestPersistentMode(t *testing.T) {
dir := t.TempDir()
cluster, err := startPersistentCluster(dir)
- assert.NoError(t, err)
+ require.NoError(t, err)
// Add a new "ad-hoc" vindex via vtgate once the cluster is up, to later make sure it is persisted across teardowns
err = addColumnVindex(cluster, "test_keyspace", "alter vschema on persistence_test add vindex my_vdx(id)")
@@ -116,7 +116,7 @@ func TestPersistentMode(t *testing.T) {
cluster.PersistentMode = false // Cleanup the tmpdir as we're done
cluster.TearDown()
}()
- assert.NoError(t, err)
+ require.NoError(t, err)
// rerun our sanity checks to make sure vschema is persisted correctly
assertColumnVindex(t, cluster, columnVindex{keyspace: "test_keyspace", table: "test_table", vindex: "my_vdx", vindexType: "hash", column: "id"})
@@ -137,7 +137,7 @@ func TestForeignKeysAndDDLModes(t *testing.T) {
defer resetConfig(conf)
cluster, err := startCluster("--foreign_key_mode=allow", "--enable_online_ddl=true", "--enable_direct_ddl=true")
- assert.NoError(t, err)
+ require.NoError(t, err)
defer cluster.TearDown()
err = execOnCluster(cluster, "test_keyspace", func(conn *mysql.Conn) error {
@@ -163,7 +163,7 @@ func TestForeignKeysAndDDLModes(t *testing.T) {
cluster.TearDown()
cluster, err = startCluster("--foreign_key_mode=disallow", "--enable_online_ddl=false", "--enable_direct_ddl=false")
- assert.NoError(t, err)
+ require.NoError(t, err)
defer cluster.TearDown()
err = execOnCluster(cluster, "test_keyspace", func(conn *mysql.Conn) error {
@@ -191,7 +191,7 @@ func TestCanGetKeyspaces(t *testing.T) {
defer resetConfig(conf)
clusterInstance, err := startCluster()
- assert.NoError(t, err)
+ require.NoError(t, err)
defer clusterInstance.TearDown()
defer func() {
@@ -222,7 +222,7 @@ func TestExternalTopoServerConsul(t *testing.T) {
cluster, err := startCluster("--external_topo_implementation=consul",
fmt.Sprintf("--external_topo_global_server_address=%s", serverAddr), "--external_topo_global_root=consul_test/global")
- assert.NoError(t, err)
+ require.NoError(t, err)
defer cluster.TearDown()
assertGetKeyspaces(t, cluster)
@@ -258,7 +258,7 @@ func TestMtlsAuth(t *testing.T) {
fmt.Sprintf("--vtctld_grpc_cert=%s", clientCert),
fmt.Sprintf("--vtctld_grpc_ca=%s", caCert),
fmt.Sprintf("--grpc_auth_mtls_allowed_substrings=%s", "CN=ClientApp"))
- assert.NoError(t, err)
+ require.NoError(t, err)
defer func() {
cluster.PersistentMode = false // Cleanup the tmpdir as we're done
cluster.TearDown()
@@ -302,7 +302,7 @@ func TestMtlsAuthUnauthorizedFails(t *testing.T) {
fmt.Sprintf("--grpc_auth_mtls_allowed_substrings=%s", "CN=ClientApp"))
defer cluster.TearDown()
- assert.Error(t, err)
+ require.Error(t, err)
assert.Contains(t, err.Error(), "code = Unauthenticated desc = client certificate not authorized")
}
diff --git a/go/json2/unmarshal.go b/go/json2/unmarshal.go
index e382b8ad47a..e2034fa71c9 100644
--- a/go/json2/unmarshal.go
+++ b/go/json2/unmarshal.go
@@ -33,8 +33,7 @@ var carriageReturn = []byte("\n")
// efficient and should not be used for high QPS operations.
func Unmarshal(data []byte, v any) error {
if pb, ok := v.(proto.Message); ok {
- opts := protojson.UnmarshalOptions{DiscardUnknown: true}
- return annotate(data, opts.Unmarshal(data, pb))
+ return UnmarshalPB(data, pb)
}
return annotate(data, json.Unmarshal(data, v))
}
@@ -53,3 +52,9 @@ func annotate(data []byte, err error) error {
return fmt.Errorf("line: %d, position %d: %v", line, pos, err)
}
+
+// UnmarshalPB is similar to Unmarshal but specifically for proto.Message to add type safety.
+func UnmarshalPB(data []byte, pb proto.Message) error {
+ opts := protojson.UnmarshalOptions{DiscardUnknown: true}
+ return annotate(data, opts.Unmarshal(data, pb))
+}
diff --git a/go/json2/unmarshal_test.go b/go/json2/unmarshal_test.go
index ff18a29def8..1ba3368d5ca 100644
--- a/go/json2/unmarshal_test.go
+++ b/go/json2/unmarshal_test.go
@@ -91,3 +91,14 @@ func TestAnnotate(t *testing.T) {
require.Equal(t, tcase.err, err, "annotate(%s, %v) error", string(tcase.data), tcase.err)
}
}
+
+func TestUnmarshalPB(t *testing.T) {
+ want := &emptypb.Empty{}
+ json, err := protojson.Marshal(want)
+ require.NoError(t, err)
+
+ var got emptypb.Empty
+ err = UnmarshalPB(json, &got)
+ require.NoError(t, err)
+ require.Equal(t, want, &got)
+}
diff --git a/go/mysql/flavor_mysql.go b/go/mysql/flavor_mysql.go
index f413c8ef1fb..f83f087582c 100644
--- a/go/mysql/flavor_mysql.go
+++ b/go/mysql/flavor_mysql.go
@@ -286,9 +286,18 @@ func (mysqlFlavor) readBinlogEvent(c *Conn) (BinlogEvent, error) {
// baseShowTables is part of the Flavor interface.
func (mysqlFlavor) baseShowTables() string {
- return "SELECT table_name, table_type, unix_timestamp(create_time), table_comment FROM information_schema.tables WHERE table_schema = database()"
+ return BaseShowTables
}
+const BaseShowTables = `SELECT t.table_name,
+ t.table_type,
+ UNIX_TIMESTAMP(t.create_time),
+ t.table_comment
+ FROM information_schema.tables t
+ WHERE
+ t.table_schema = database()
+`
+
// TablesWithSize56 is a query to select table along with size for mysql 5.6
const TablesWithSize56 = `SELECT table_name,
table_type,
diff --git a/go/mysql/schema.go b/go/mysql/schema.go
index d0b9bfe2e79..03d558d2637 100644
--- a/go/mysql/schema.go
+++ b/go/mysql/schema.go
@@ -78,19 +78,21 @@ var BaseShowTablesFields = []*querypb.Field{{
ColumnLength: 6144,
Charset: uint32(collations.SystemCollation.Collation),
Flags: uint32(querypb.MySqlFlag_NOT_NULL_FLAG),
-}, {
+}}
+
+var BaseShowTablesWithSizesFields = append(BaseShowTablesFields, &querypb.Field{
Name: "i.file_size",
Type: querypb.Type_INT64,
ColumnLength: 11,
Charset: collations.CollationBinaryID,
Flags: uint32(querypb.MySqlFlag_BINARY_FLAG | querypb.MySqlFlag_NUM_FLAG),
-}, {
+}, &querypb.Field{
Name: "i.allocated_size",
Type: querypb.Type_INT64,
ColumnLength: 11,
Charset: collations.CollationBinaryID,
Flags: uint32(querypb.MySqlFlag_BINARY_FLAG | querypb.MySqlFlag_NUM_FLAG),
-}}
+})
// BaseShowTablesRow returns the fields from a BaseShowTables or
// BaseShowTablesForTable command.
@@ -104,9 +106,14 @@ func BaseShowTablesRow(tableName string, isView bool, comment string) []sqltypes
sqltypes.MakeTrusted(sqltypes.VarChar, []byte(tableType)),
sqltypes.MakeTrusted(sqltypes.Int64, []byte("1427325875")), // unix_timestamp(create_time)
sqltypes.MakeTrusted(sqltypes.VarChar, []byte(comment)),
+ }
+}
+
+func BaseShowTablesWithSizesRow(tableName string, isView bool, comment string) []sqltypes.Value {
+ return append(BaseShowTablesRow(tableName, isView, comment),
sqltypes.MakeTrusted(sqltypes.Int64, []byte("100")), // file_size
sqltypes.MakeTrusted(sqltypes.Int64, []byte("150")), // allocated_size
- }
+ )
}
// ShowPrimaryFields contains the fields for a BaseShowPrimary.
diff --git a/go/pools/smartconnpool/pool.go b/go/pools/smartconnpool/pool.go
index 7c10d6ba4b0..ecc3f827c7b 100644
--- a/go/pools/smartconnpool/pool.go
+++ b/go/pools/smartconnpool/pool.go
@@ -32,10 +32,16 @@ import (
var (
// ErrTimeout is returned if a connection get times out.
- ErrTimeout = vterrors.New(vtrpcpb.Code_RESOURCE_EXHAUSTED, "resource pool timed out")
+ ErrTimeout = vterrors.New(vtrpcpb.Code_RESOURCE_EXHAUSTED, "connection pool timed out")
// ErrCtxTimeout is returned if a ctx is already expired by the time the connection pool is used
- ErrCtxTimeout = vterrors.New(vtrpcpb.Code_DEADLINE_EXCEEDED, "resource pool context already expired")
+ ErrCtxTimeout = vterrors.New(vtrpcpb.Code_DEADLINE_EXCEEDED, "connection pool context already expired")
+
+ // ErrConnPoolClosed is returned when trying to get a connection from a closed conn pool
+ ErrConnPoolClosed = vterrors.New(vtrpcpb.Code_INTERNAL, "connection pool is closed")
+
+ // PoolCloseTimeout is how long to wait for all connections to be returned to the pool during close
+ PoolCloseTimeout = 10 * time.Second
)
type Metrics struct {
@@ -119,8 +125,9 @@ type ConnPool[C Connection] struct {
capacity atomic.Int64
// workers is a waitgroup for all the currently running worker goroutines
- workers sync.WaitGroup
- close chan struct{}
+ workers sync.WaitGroup
+ close chan struct{}
+ capacityMu sync.Mutex
config struct {
// connect is the callback to create a new connection for the pool
@@ -142,6 +149,7 @@ type ConnPool[C Connection] struct {
}
Metrics Metrics
+ Name string
}
// NewPool creates a new connection pool with the given Config.
@@ -236,29 +244,60 @@ func (pool *ConnPool[C]) Open(connect Connector[C], refresh RefreshCheck) *ConnP
// Close shuts down the pool. No connections will be returned from ConnPool.Get after calling this,
// but calling ConnPool.Put is still allowed. This function will not return until all of the pool's
-// connections have been returned.
+// connections have been returned or the default PoolCloseTimeout has elapsed
func (pool *ConnPool[C]) Close() {
- if pool.close == nil {
+ ctx, cancel := context.WithTimeout(context.Background(), PoolCloseTimeout)
+ defer cancel()
+
+ if err := pool.CloseWithContext(ctx); err != nil {
+ log.Errorf("failed to close pool %q: %v", pool.Name, err)
+ }
+}
+
+// CloseWithContext behaves like Close but allows passing in a Context to time out the
+// pool closing operation
+func (pool *ConnPool[C]) CloseWithContext(ctx context.Context) error {
+ pool.capacityMu.Lock()
+ defer pool.capacityMu.Unlock()
+
+ if pool.close == nil || pool.capacity.Load() == 0 {
// already closed
- return
+ return nil
}
- pool.SetCapacity(0)
+ // close all the connections in the pool; if we time out while waiting for
+ // users to return our connections, we still want to finish the shutdown
+ // for the pool
+ err := pool.setCapacity(ctx, 0)
close(pool.close)
pool.workers.Wait()
pool.close = nil
+ return err
}
func (pool *ConnPool[C]) reopen() {
+ pool.capacityMu.Lock()
+ defer pool.capacityMu.Unlock()
+
capacity := pool.capacity.Load()
if capacity == 0 {
return
}
- pool.Close()
- pool.open()
- pool.SetCapacity(capacity)
+ ctx, cancel := context.WithTimeout(context.Background(), PoolCloseTimeout)
+ defer cancel()
+
+ // to re-open the connection pool, first set the capacity to 0 so we close
+ // all the existing connections, as they're now connected to a stale MySQL
+ // instance.
+ if err := pool.setCapacity(ctx, 0); err != nil {
+ log.Errorf("failed to reopen pool %q: %v", pool.Name, err)
+ }
+
+ // the second call to setCapacity cannot fail because it's only increasing the number
+ // of connections and doesn't need to shut down any
+ _ = pool.setCapacity(ctx, capacity)
}
// IsOpen returns whether the pool is open
@@ -322,7 +361,7 @@ func (pool *ConnPool[C]) Get(ctx context.Context, setting *Setting) (*Pooled[C],
return nil, ErrCtxTimeout
}
if pool.capacity.Load() == 0 {
- return nil, ErrTimeout
+ return nil, ErrConnPoolClosed
}
if setting == nil {
return pool.get(ctx)
@@ -572,39 +611,55 @@ func (pool *ConnPool[C]) getWithSetting(ctx context.Context, setting *Setting) (
// If the capacity is smaller than the number of connections that there are
// currently open, we'll close enough connections before returning, even if
// that means waiting for clients to return connections to the pool.
-func (pool *ConnPool[C]) SetCapacity(newcap int64) {
+// If the given context times out before we've managed to close enough connections
+// an error will be returned.
+func (pool *ConnPool[C]) SetCapacity(ctx context.Context, newcap int64) error {
+ pool.capacityMu.Lock()
+ defer pool.capacityMu.Unlock()
+ return pool.setCapacity(ctx, newcap)
+}
+
+// setCapacity is the internal implementation for SetCapacity; it must be called
+// with pool.capacityMu being held
+func (pool *ConnPool[C]) setCapacity(ctx context.Context, newcap int64) error {
if newcap < 0 {
panic("negative capacity")
}
oldcap := pool.capacity.Swap(newcap)
if oldcap == newcap {
- return
+ return nil
}
- backoff := 1 * time.Millisecond
+ const delay = 10 * time.Millisecond
// close connections until we're under capacity
for pool.active.Load() > newcap {
+ if err := ctx.Err(); err != nil {
+ return vterrors.Errorf(vtrpcpb.Code_ABORTED,
+ "timed out while waiting for connections to be returned to the pool (capacity=%d, active=%d, borrowed=%d)",
+ pool.capacity.Load(), pool.active.Load(), pool.borrowed.Load())
+ }
+ // if we're closing down the pool, make sure there's no clients waiting
+ // for connections because they won't be returned in the future
+ if newcap == 0 {
+ pool.wait.expire(true)
+ }
+
// try closing from connections which are currently idle in the stacks
conn := pool.getFromSettingsStack(nil)
if conn == nil {
conn, _ = pool.clean.Pop()
}
if conn == nil {
- time.Sleep(backoff)
- backoff += 1 * time.Millisecond
+ time.Sleep(delay)
continue
}
conn.Close()
pool.closedConn()
}
- // if we're closing down the pool, wake up any blocked waiters because no connections
- // are going to be returned in the future
- if newcap == 0 {
- pool.wait.expire(true)
- }
+ return nil
}
func (pool *ConnPool[C]) closeIdleResources(now time.Time) {
@@ -660,6 +715,8 @@ func (pool *ConnPool[C]) RegisterStats(stats *servenv.Exporter, name string) {
return
}
+ pool.Name = name
+
stats.NewGaugeFunc(name+"Capacity", "Tablet server conn pool capacity", func() int64 {
return pool.Capacity()
})
diff --git a/go/pools/smartconnpool/pool_test.go b/go/pools/smartconnpool/pool_test.go
index 9a9fb9500b6..701327005ad 100644
--- a/go/pools/smartconnpool/pool_test.go
+++ b/go/pools/smartconnpool/pool_test.go
@@ -208,13 +208,15 @@ func TestOpen(t *testing.T) {
assert.EqualValues(t, 6, state.lastID.Load())
// SetCapacity
- p.SetCapacity(3)
+ err = p.SetCapacity(ctx, 3)
+ require.NoError(t, err)
assert.EqualValues(t, 3, state.open.Load())
assert.EqualValues(t, 6, state.lastID.Load())
assert.EqualValues(t, 3, p.Capacity())
assert.EqualValues(t, 3, p.Available())
- p.SetCapacity(6)
+ err = p.SetCapacity(ctx, 6)
+ require.NoError(t, err)
assert.EqualValues(t, 6, p.Capacity())
assert.EqualValues(t, 6, p.Available())
@@ -265,7 +267,9 @@ func TestShrinking(t *testing.T) {
}
done := make(chan bool)
go func() {
- p.SetCapacity(3)
+ err := p.SetCapacity(ctx, 3)
+ require.NoError(t, err)
+
done <- true
}()
expected := map[string]any{
@@ -335,7 +339,8 @@ func TestShrinking(t *testing.T) {
// This will also wait
go func() {
- p.SetCapacity(2)
+ err := p.SetCapacity(ctx, 2)
+ require.NoError(t, err)
done <- true
}()
time.Sleep(10 * time.Millisecond)
@@ -353,7 +358,8 @@ func TestShrinking(t *testing.T) {
assert.EqualValues(t, 2, state.open.Load())
// Test race condition of SetCapacity with itself
- p.SetCapacity(3)
+ err = p.SetCapacity(ctx, 3)
+ require.NoError(t, err)
for i := 0; i < 3; i++ {
var r *Pooled[*TestConn]
var err error
@@ -375,9 +381,15 @@ func TestShrinking(t *testing.T) {
time.Sleep(10 * time.Millisecond)
// This will wait till we Put
- go p.SetCapacity(2)
+ go func() {
+ err := p.SetCapacity(ctx, 2)
+ require.NoError(t, err)
+ }()
time.Sleep(10 * time.Millisecond)
- go p.SetCapacity(4)
+ go func() {
+ err := p.SetCapacity(ctx, 4)
+ require.NoError(t, err)
+ }()
time.Sleep(10 * time.Millisecond)
// This should not hang
@@ -387,7 +399,7 @@ func TestShrinking(t *testing.T) {
<-done
assert.Panics(t, func() {
- p.SetCapacity(-1)
+ _ = p.SetCapacity(ctx, -1)
})
assert.EqualValues(t, 4, p.Capacity())
@@ -530,6 +542,46 @@ func TestReopen(t *testing.T) {
assert.EqualValues(t, 0, state.open.Load())
}
+func TestUserClosing(t *testing.T) {
+ var state TestState
+
+ ctx := context.Background()
+ p := NewPool(&Config[*TestConn]{
+ Capacity: 5,
+ IdleTimeout: time.Second,
+ LogWait: state.LogWait,
+ }).Open(newConnector(&state), nil)
+
+ var resources [5]*Pooled[*TestConn]
+ for i := 0; i < 5; i++ {
+ var err error
+ resources[i], err = p.Get(ctx, nil)
+ require.NoError(t, err)
+ }
+
+ for _, r := range resources[:4] {
+ r.Recycle()
+ }
+
+ ch := make(chan error)
+ go func() {
+ ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
+ defer cancel()
+
+ err := p.CloseWithContext(ctx)
+ ch <- err
+ close(ch)
+ }()
+
+ select {
+ case <-time.After(5 * time.Second):
+ t.Fatalf("Pool did not shutdown after 5s")
+ case err := <-ch:
+ require.Error(t, err)
+ t.Logf("Shutdown error: %v", err)
+ }
+}
+
func TestIdleTimeout(t *testing.T) {
testTimeout := func(t *testing.T, setting *Setting) {
var state TestState
@@ -818,7 +870,7 @@ func TestTimeout(t *testing.T) {
newctx, cancel := context.WithTimeout(ctx, 10*time.Millisecond)
_, err = p.Get(newctx, setting)
cancel()
- assert.EqualError(t, err, "resource pool timed out")
+ assert.EqualError(t, err, "connection pool timed out")
}
@@ -842,7 +894,7 @@ func TestExpired(t *testing.T) {
ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(-1*time.Second))
_, err := p.Get(ctx, setting)
cancel()
- require.EqualError(t, err, "resource pool context already expired")
+ require.EqualError(t, err, "connection pool context already expired")
}
}
diff --git a/go/test/endtoend/utils/mysql.go b/go/test/endtoend/utils/mysql.go
index 1e770b87516..6b1eb679ffa 100644
--- a/go/test/endtoend/utils/mysql.go
+++ b/go/test/endtoend/utils/mysql.go
@@ -237,8 +237,8 @@ func checkFields(t *testing.T, columnName string, vtField, myField *querypb.Fiel
t.Errorf("for column %s field types do not match\nNot equal: \nMySQL: %v\nVitess: %v\n", columnName, myField.Type.String(), vtField.Type.String())
}
- // starting in Vitess 20, decimal types are properly sized in their field information
- if BinaryIsAtLeastAtVersion(20, "vtgate") && vtField.Type == sqltypes.Decimal {
+ // starting in Vitess 19, decimal types are properly sized in their field information
+ if BinaryIsAtLeastAtVersion(19, "vtgate") && vtField.Type == sqltypes.Decimal {
if vtField.Decimals != myField.Decimals {
t.Errorf("for column %s field decimals count do not match\nNot equal: \nMySQL: %v\nVitess: %v\n", columnName, myField.Decimals, vtField.Decimals)
}
diff --git a/go/test/endtoend/vtgate/queries/aggregation/aggregation_test.go b/go/test/endtoend/vtgate/queries/aggregation/aggregation_test.go
index 4255538a07d..83840a78516 100644
--- a/go/test/endtoend/vtgate/queries/aggregation/aggregation_test.go
+++ b/go/test/endtoend/vtgate/queries/aggregation/aggregation_test.go
@@ -18,6 +18,7 @@ package aggregation
import (
"fmt"
+ "math/rand/v2"
"slices"
"sort"
"strings"
@@ -69,6 +70,22 @@ func start(t *testing.T) (utils.MySQLCompare, func()) {
}
}
+func TestAggrWithLimit(t *testing.T) {
+ version, err := cluster.GetMajorVersion("vtgate")
+ require.NoError(t, err)
+ if version != 19 {
+ t.Skip("Test requires VTGate version 18")
+ }
+ mcmp, closer := start(t)
+ defer closer()
+
+ for i := range 1000 {
+ r := rand.IntN(50)
+ mcmp.Exec(fmt.Sprintf("insert into aggr_test(id, val1, val2) values(%d, 'a', %d)", i, r))
+ }
+ mcmp.Exec("select val2, count(*) from aggr_test group by val2 order by count(*), val2 limit 10")
+}
+
func TestAggregateTypes(t *testing.T) {
mcmp, closer := start(t)
defer closer()
@@ -415,8 +432,15 @@ func TestOrderByCount(t *testing.T) {
defer closer()
mcmp.Exec("insert into t9(id1, id2, id3) values(1, '1', '1'), (2, '2', '2'), (3, '2', '2'), (4, '3', '3'), (5, '3', '3'), (6, '3', '3')")
+ mcmp.Exec("insert into t1(t1_id, `name`, `value`, shardkey) values(1,'a1','foo',100), (2,'b1','foo',200), (3,'c1','foo',300), (4,'a1','foo',100), (5,'b1','bar',200)")
- mcmp.AssertMatches("SELECT t9.id2 FROM t9 GROUP BY t9.id2 ORDER BY COUNT(t9.id2) DESC", `[[VARCHAR("3")] [VARCHAR("2")] [VARCHAR("1")]]`)
+ mcmp.Exec("SELECT t9.id2 FROM t9 GROUP BY t9.id2 ORDER BY COUNT(t9.id2) DESC")
+ version, err := cluster.GetMajorVersion("vtgate")
+ require.NoError(t, err)
+ if version == 19 {
+ mcmp.Exec("select COUNT(*) from (select 1 as one FROM t9 WHERE id3 = 3 ORDER BY id1 DESC LIMIT 3 OFFSET 0) subquery_for_count")
+ mcmp.Exec("select t.id1, t1.name, t.leCount from (select id1, count(*) as leCount from t9 group by 1 order by 2 desc limit 20) t join t1 on t.id1 = t1.t1_id")
+ }
}
func TestAggregateAnyValue(t *testing.T) {
@@ -573,7 +597,7 @@ func TestComplexAggregation(t *testing.T) {
func TestJoinAggregation(t *testing.T) {
// This is new functionality in Vitess 20
- utils.SkipIfBinaryIsBelowVersion(t, 20, "vtgate")
+ utils.SkipIfBinaryIsBelowVersion(t, 19, "vtgate")
mcmp, closer := start(t)
defer closer()
diff --git a/go/test/endtoend/vtgate/queries/derived/derived_test.go b/go/test/endtoend/vtgate/queries/derived/derived_test.go
index 80ae36633e1..6eb7ee914cd 100644
--- a/go/test/endtoend/vtgate/queries/derived/derived_test.go
+++ b/go/test/endtoend/vtgate/queries/derived/derived_test.go
@@ -113,3 +113,15 @@ func TestDerivedTablesWithLimit(t *testing.T) {
(SELECT id, user_id FROM music LIMIT 10) as m on u.id = m.user_id`,
`[[INT64(1) INT64(1)] [INT64(5) INT64(2)] [INT64(1) INT64(3)] [INT64(2) INT64(4)] [INT64(3) INT64(5)] [INT64(5) INT64(7)] [INT64(4) INT64(6)] [INT64(6) NULL]]`)
}
+
+// TestDerivedTableColumnAliasWithJoin tests the derived table having alias column and using it in the join condition
+func TestDerivedTableColumnAliasWithJoin(t *testing.T) {
+ utils.SkipIfBinaryIsBelowVersion(t, 19, "vtgate")
+ mcmp, closer := start(t)
+ defer closer()
+
+ mcmp.Exec(`SELECT user.id FROM user join (SELECT id as uid FROM user) t on t.uid = user.id`)
+ mcmp.Exec(`SELECT user.id FROM user left join (SELECT id as uid FROM user) t on t.uid = user.id`)
+ mcmp.Exec(`SELECT user.id FROM user join (SELECT id FROM user) t(uid) on t.uid = user.id`)
+ mcmp.Exec(`SELECT user.id FROM user left join (SELECT id FROM user) t(uid) on t.uid = user.id`)
+}
diff --git a/go/test/endtoend/vtgate/queries/dml/insert_test.go b/go/test/endtoend/vtgate/queries/dml/insert_test.go
index ce052b7b2ba..dfb5961d887 100644
--- a/go/test/endtoend/vtgate/queries/dml/insert_test.go
+++ b/go/test/endtoend/vtgate/queries/dml/insert_test.go
@@ -54,6 +54,27 @@ func TestSimpleInsertSelect(t *testing.T) {
utils.AssertMatches(t, mcmp.VtConn, `select num from num_vdx_tbl order by num`, `[[INT64(2)] [INT64(4)] [INT64(40)] [INT64(42)] [INT64(80)] [INT64(84)]]`)
}
+// TestInsertOnDup test the insert on duplicate key update feature with argument and list argument.
+func TestInsertOnDup(t *testing.T) {
+ utils.SkipIfBinaryIsBelowVersion(t, 19, "vtgate")
+
+ mcmp, closer := start(t)
+ defer closer()
+
+ mcmp.Exec("insert into order_tbl(oid, region_id, cust_no) values (1,2,3),(3,4,5)")
+
+ for _, mode := range []string{"oltp", "olap"} {
+ mcmp.Run(mode, func(mcmp *utils.MySQLCompare) {
+ utils.Exec(t, mcmp.VtConn, fmt.Sprintf("set workload = %s", mode))
+
+ mcmp.Exec(`insert into order_tbl(oid, region_id, cust_no) values (2,2,3),(4,4,5) on duplicate key update cust_no = if(values(cust_no) in (1, 2, 3), region_id, values(cust_no))`)
+ mcmp.Exec(`select oid, region_id, cust_no from order_tbl order by oid, region_id`)
+ mcmp.Exec(`insert into order_tbl(oid, region_id, cust_no) values (7,2,2) on duplicate key update cust_no = 10 + values(cust_no)`)
+ mcmp.Exec(`select oid, region_id, cust_no from order_tbl order by oid, region_id`)
+ })
+ }
+}
+
func TestFailureInsertSelect(t *testing.T) {
if clusterInstance.HasPartialKeyspaces {
t.Skip("don't run on partial keyspaces")
diff --git a/go/test/endtoend/vtgate/queries/misc/misc_test.go b/go/test/endtoend/vtgate/queries/misc/misc_test.go
index ed2221eaf7d..2d861b1a625 100644
--- a/go/test/endtoend/vtgate/queries/misc/misc_test.go
+++ b/go/test/endtoend/vtgate/queries/misc/misc_test.go
@@ -114,6 +114,21 @@ func TestInvalidDateTimeTimestampVals(t *testing.T) {
require.Error(t, err)
}
+func TestJoinWithThreeTables(t *testing.T) {
+ version, err := cluster.GetMajorVersion("vtgate")
+ require.NoError(t, err)
+ if version != 19 {
+ t.Skip("cannot run upgrade/downgrade test")
+ }
+
+ mcmp, closer := start(t)
+ defer closer()
+
+ mcmp.Exec("insert into t1(id1, id2) values (0,0), (1,1), (2,2)")
+ mcmp.Exec("insert into tbl(id, unq_col, nonunq_col) values (0,0,0), (1,1,1), (2,2,1)")
+ mcmp.Exec("select 42 from t1 u1, t1 u2, tbl u3 where u1.id1 = u2.id1 and u1.id1 = u3.id and (u1.id2 or u2.id2 or u3.unq_col)")
+}
+
// TestIntervalWithMathFunctions tests that the Interval keyword can be used with math functions.
func TestIntervalWithMathFunctions(t *testing.T) {
mcmp, closer := start(t)
@@ -371,3 +386,17 @@ func TestAlterTableWithView(t *testing.T) {
mcmp.AssertMatches("select * from v1", `[[INT64(1) INT64(1)]]`)
}
+
+func TestHandleNullableColumn(t *testing.T) {
+ utils.SkipIfBinaryIsBelowVersion(t, 21, "vtgate")
+ require.NoError(t,
+ utils.WaitForAuthoritative(t, keyspaceName, "tbl", clusterInstance.VtgateProcess.ReadVSchema))
+ mcmp, closer := start(t)
+ defer closer()
+
+ mcmp.Exec("insert into t1(id1, id2) values (0,0), (1,1), (2,2)")
+ mcmp.Exec("insert into tbl(id, unq_col, nonunq_col) values (0,0,0), (1,1,6)")
+ // This query tests that we handle nullable columns correctly
+ // tbl.nonunq_col is not nullable according to the schema, but because of the left join, it can be NULL
+ mcmp.ExecWithColumnCompare(`select * from t1 left join tbl on t1.id2 = tbl.id where t1.id1 = 6 or tbl.nonunq_col = 6`)
+}
diff --git a/go/test/endtoend/vtgate/queries/misc/schema.sql b/go/test/endtoend/vtgate/queries/misc/schema.sql
index ceac0c07e6d..f87d7c19078 100644
--- a/go/test/endtoend/vtgate/queries/misc/schema.sql
+++ b/go/test/endtoend/vtgate/queries/misc/schema.sql
@@ -1,5 +1,15 @@
-create table if not exists t1(
- id1 bigint,
- id2 bigint,
- primary key(id1)
-) Engine=InnoDB;
\ No newline at end of file
+create table t1
+(
+ id1 bigint,
+ id2 bigint,
+ primary key (id1)
+) Engine=InnoDB;
+
+create table tbl
+(
+ id bigint,
+ unq_col bigint,
+ nonunq_col bigint,
+ primary key (id),
+ unique (unq_col)
+) Engine = InnoDB;
diff --git a/go/test/endtoend/vtgate/queries/misc/vschema.json b/go/test/endtoend/vtgate/queries/misc/vschema.json
index 60aa2bc9c07..63e870ffd58 100644
--- a/go/test/endtoend/vtgate/queries/misc/vschema.json
+++ b/go/test/endtoend/vtgate/queries/misc/vschema.json
@@ -13,6 +13,14 @@
"name": "hash"
}
]
+ },
+ "tbl": {
+ "column_vindexes": [
+ {
+ "column": "id",
+ "name": "hash"
+ }
+ ]
}
}
}
\ No newline at end of file
diff --git a/go/test/endtoend/vtgate/queries/reference/reference_test.go b/go/test/endtoend/vtgate/queries/reference/reference_test.go
index 0e3096e6064..ae7319a52e3 100644
--- a/go/test/endtoend/vtgate/queries/reference/reference_test.go
+++ b/go/test/endtoend/vtgate/queries/reference/reference_test.go
@@ -84,7 +84,7 @@ func TestReferenceRouting(t *testing.T) {
)
t.Run("Complex reference query", func(t *testing.T) {
- utils.SkipIfBinaryIsBelowVersion(t, 20, "vtgate")
+ utils.SkipIfBinaryIsBelowVersion(t, 19, "vtgate")
// Verify a complex query using reference tables with a left join having a derived table with an order by clause works as intended.
utils.AssertMatches(
t,
diff --git a/go/test/endtoend/vtgate/queries/subquery/schema.sql b/go/test/endtoend/vtgate/queries/subquery/schema.sql
index a64ac799a4e..9dfa963d340 100644
--- a/go/test/endtoend/vtgate/queries/subquery/schema.sql
+++ b/go/test/endtoend/vtgate/queries/subquery/schema.sql
@@ -4,18 +4,21 @@ create table t1
id2 bigint,
primary key (id1)
) Engine = InnoDB;
+
create table t1_id2_idx
(
id2 bigint,
keyspace_id varbinary(10),
primary key (id2)
) Engine = InnoDB;
+
create table t2
(
id3 bigint,
id4 bigint,
primary key (id3)
) Engine = InnoDB;
+
create table t2_id4_idx
(
id bigint not null auto_increment,
@@ -23,4 +26,17 @@ create table t2_id4_idx
id3 bigint,
primary key (id),
key idx_id4 (id4)
-) Engine = InnoDB;
\ No newline at end of file
+) Engine = InnoDB;
+
+CREATE TABLE user
+(
+ id INT PRIMARY KEY,
+ name VARCHAR(100)
+);
+
+CREATE TABLE user_extra
+(
+ user_id INT,
+ extra_info VARCHAR(100),
+ PRIMARY KEY (user_id, extra_info)
+);
\ No newline at end of file
diff --git a/go/test/endtoend/vtgate/queries/subquery/subquery_test.go b/go/test/endtoend/vtgate/queries/subquery/subquery_test.go
index 59dc42de060..e3f3cc52a5b 100644
--- a/go/test/endtoend/vtgate/queries/subquery/subquery_test.go
+++ b/go/test/endtoend/vtgate/queries/subquery/subquery_test.go
@@ -17,6 +17,7 @@ limitations under the License.
package subquery
import (
+ "fmt"
"testing"
"github.com/stretchr/testify/assert"
@@ -179,7 +180,7 @@ func TestSubqueryInAggregation(t *testing.T) {
// TestSubqueryInDerivedTable tests that subqueries and derived tables
// are handled correctly when there are joins inside the derived table
func TestSubqueryInDerivedTable(t *testing.T) {
- utils.SkipIfBinaryIsBelowVersion(t, 20, "vtgate")
+ utils.SkipIfBinaryIsBelowVersion(t, 19, "vtgate")
mcmp, closer := start(t)
defer closer()
@@ -188,3 +189,46 @@ func TestSubqueryInDerivedTable(t *testing.T) {
mcmp.Exec(`select t.a from (select t1.id2, t2.id3, (select id2 from t1 order by id2 limit 1) as a from t1 join t2 on t1.id1 = t2.id4) t`)
mcmp.Exec(`SELECT COUNT(*) FROM (SELECT DISTINCT t1.id1 FROM t1 JOIN t2 ON t1.id1 = t2.id4) dt`)
}
+
+func TestSubqueries(t *testing.T) {
+ // This method tests many types of subqueries. The queries should move to a vitess-tester test file once we have a way to run them.
+ // The commented out queries are failing because of wrong types being returned.
+ // The tests are commented out until the issue is fixed.
+ utils.SkipIfBinaryIsBelowVersion(t, 19, "vtgate")
+ mcmp, closer := start(t)
+ defer closer()
+ queries := []string{
+ `INSERT INTO user (id, name) VALUES (1, 'Alice'), (2, 'Bob'), (3, 'Charlie'), (4, 'David'), (5, 'Eve'), (6, 'Frank'), (7, 'Grace'), (8, 'Hannah'), (9, 'Ivy'), (10, 'Jack')`,
+ `INSERT INTO user_extra (user_id, extra_info) VALUES (1, 'info1'), (1, 'info2'), (2, 'info1'), (3, 'info1'), (3, 'info2'), (4, 'info1'), (5, 'info1'), (6, 'info1'), (7, 'info1'), (8, 'info1')`,
+ `SELECT (SELECT COUNT(*) FROM user_extra) AS order_count, id FROM user WHERE id = (SELECT COUNT(*) FROM user_extra)`,
+ `SELECT id, (SELECT COUNT(*) FROM user_extra) AS order_count FROM user ORDER BY (SELECT COUNT(*) FROM user_extra)`,
+ `SELECT id FROM user WHERE id = (SELECT COUNT(*) FROM user_extra) ORDER BY (SELECT COUNT(*) FROM user_extra)`,
+ `SELECT (SELECT COUNT(*) FROM user_extra WHERE user.id = user_extra.user_id) AS extra_count, id, name FROM user WHERE (SELECT COUNT(*) FROM user_extra WHERE user.id = user_extra.user_id) > 0`,
+ `SELECT id, name, (SELECT COUNT(*) FROM user_extra WHERE user.id = user_extra.user_id) AS extra_count FROM user ORDER BY (SELECT COUNT(*) FROM user_extra WHERE user.id = user_extra.user_id)`,
+ `SELECT id, name FROM user WHERE (SELECT COUNT(*) FROM user_extra WHERE user.id = user_extra.user_id) > 0 ORDER BY (SELECT COUNT(*) FROM user_extra WHERE user.id = user_extra.user_id)`,
+ `SELECT id, name, (SELECT COUNT(*) FROM user_extra WHERE user.id = user_extra.user_id) AS extra_count FROM user GROUP BY id, name HAVING COUNT(*) > (SELECT COUNT(*) FROM user_extra WHERE user.id = user_extra.user_id)`,
+ `SELECT id, name, COUNT(*) FROM user WHERE (SELECT COUNT(*) FROM user_extra WHERE user.id = user_extra.user_id) > 0 GROUP BY id, name HAVING COUNT(*) > (SELECT COUNT(*) FROM user_extra WHERE user.id = user_extra.user_id)`,
+ `SELECT id, round(MAX(id + (SELECT COUNT(*) FROM user_extra where user_id = 42))) as r FROM user WHERE id = 42 GROUP BY id ORDER BY r`,
+ `SELECT id, name, (SELECT COUNT(*) FROM user_extra WHERE user.id = user_extra.user_id) * 2 AS double_extra_count FROM user`,
+ `SELECT id, name FROM user WHERE id IN (SELECT user_id FROM user_extra WHERE LENGTH(extra_info) > 4)`,
+ `SELECT id, COUNT(*) FROM user GROUP BY id HAVING COUNT(*) > (SELECT COUNT(*) FROM user_extra WHERE user_extra.user_id = user.id) + 1`,
+ `SELECT id, name FROM user ORDER BY (SELECT COUNT(*) FROM user_extra WHERE user.id = user_extra.user_id) * id`,
+ `SELECT id, name, (SELECT COUNT(*) FROM user_extra WHERE user.id = user_extra.user_id) + id AS extra_count_plus_id FROM user`,
+ `SELECT id, name FROM user WHERE id IN (SELECT user_id FROM user_extra WHERE extra_info = 'info1') OR id IN (SELECT user_id FROM user_extra WHERE extra_info = 'info2')`,
+ `SELECT id, name, (SELECT COUNT(*) FROM user_extra) AS total_extra_count, SUM(id) AS sum_ids FROM user GROUP BY id, name ORDER BY (SELECT COUNT(*) FROM user_extra)`,
+ // `SELECT id, name, (SELECT SUM(LENGTH(extra_info)) FROM user_extra) AS total_length_extra_info, AVG(id) AS avg_ids FROM user GROUP BY id, name HAVING (SELECT SUM(LENGTH(extra_info)) FROM user_extra) > 10`,
+ `SELECT id, name, (SELECT AVG(LENGTH(extra_info)) FROM user_extra) AS avg_length_extra_info, MAX(id) AS max_id FROM user WHERE id IN (SELECT user_id FROM user_extra) GROUP BY id, name`,
+ `SELECT id, name, (SELECT MAX(LENGTH(extra_info)) FROM user_extra) AS max_length_extra_info, MIN(id) AS min_id FROM user GROUP BY id, name ORDER BY (SELECT MAX(LENGTH(extra_info)) FROM user_extra)`,
+ `SELECT id, name, (SELECT MIN(LENGTH(extra_info)) FROM user_extra) AS min_length_extra_info, SUM(id) AS sum_ids FROM user GROUP BY id, name HAVING (SELECT MIN(LENGTH(extra_info)) FROM user_extra) < 5`,
+ `SELECT id, name, (SELECT COUNT(*) FROM user_extra) AS total_extra_count, AVG(id) AS avg_ids FROM user WHERE id > (SELECT COUNT(*) FROM user_extra) GROUP BY id, name`,
+ // `SELECT id, name, (SELECT SUM(LENGTH(extra_info)) FROM user_extra) AS total_length_extra_info, COUNT(id) AS count_ids FROM user GROUP BY id, name ORDER BY (SELECT SUM(LENGTH(extra_info)) FROM user_extra)`,
+ // `SELECT id, name, (SELECT COUNT(*) FROM user_extra) AS total_extra_count, (SELECT SUM(LENGTH(extra_info)) FROM user_extra) AS total_length_extra_info, (SELECT AVG(LENGTH(extra_info)) FROM user_extra) AS avg_length_extra_info, (SELECT MAX(LENGTH(extra_info)) FROM user_extra) AS max_length_extra_info, (SELECT MIN(LENGTH(extra_info)) FROM user_extra) AS min_length_extra_info, SUM(id) AS sum_ids FROM user GROUP BY id, name HAVING (SELECT AVG(LENGTH(extra_info)) FROM user_extra) > 2`,
+ `SELECT id, name, (SELECT COUNT(*) FROM user_extra) + id AS total_extra_count_plus_id, AVG(id) AS avg_ids FROM user WHERE id < (SELECT MAX(user_id) FROM user_extra) GROUP BY id, name`,
+ }
+
+ for idx, query := range queries {
+ mcmp.Run(fmt.Sprintf("%d %s", idx, query), func(mcmp *utils.MySQLCompare) {
+ mcmp.Exec(query)
+ })
+ }
+}
diff --git a/go/test/endtoend/vtgate/queries/subquery/vschema.json b/go/test/endtoend/vtgate/queries/subquery/vschema.json
index da4e589f20f..a98255db65e 100644
--- a/go/test/endtoend/vtgate/queries/subquery/vschema.json
+++ b/go/test/endtoend/vtgate/queries/subquery/vschema.json
@@ -22,6 +22,9 @@
"autocommit": "true"
},
"owner": "t2"
+ },
+ "xxhash": {
+ "type": "xxhash"
}
},
"tables": {
@@ -64,6 +67,34 @@
"name": "hash"
}
]
+ },
+ "user_extra": {
+ "name": "user_extra",
+ "column_vindexes": [
+ {
+ "columns": [
+ "user_id",
+ "extra_info"
+ ],
+ "type": "xxhash",
+ "name": "xxhash",
+ "vindex": null
+ }
+ ]
+ },
+ "user": {
+ "name": "user",
+ "column_vindexes": [
+ {
+ "columns": [
+ "id"
+ ],
+ "type": "xxhash",
+ "name": "xxhash",
+ "vindex": null
+ }
+ ]
}
+
}
}
\ No newline at end of file
diff --git a/go/test/endtoend/vtgate/queries/tpch/tpch_test.go b/go/test/endtoend/vtgate/queries/tpch/tpch_test.go
index 513aea94a86..c0d8c798273 100644
--- a/go/test/endtoend/vtgate/queries/tpch/tpch_test.go
+++ b/go/test/endtoend/vtgate/queries/tpch/tpch_test.go
@@ -48,7 +48,7 @@ func start(t *testing.T) (utils.MySQLCompare, func()) {
}
func TestTPCHQueries(t *testing.T) {
- utils.SkipIfBinaryIsBelowVersion(t, 20, "vtgate")
+ utils.SkipIfBinaryIsBelowVersion(t, 19, "vtgate")
mcmp, closer := start(t)
defer closer()
err := utils.WaitForColumn(t, clusterInstance.VtgateProcess, keyspaceName, "region", `R_COMMENT`)
diff --git a/go/test/endtoend/vtgate/vitess_tester/tpcc/tpcc.test b/go/test/endtoend/vtgate/vitess_tester/tpcc/tpcc.test
new file mode 100644
index 00000000000..16f624aa1f6
--- /dev/null
+++ b/go/test/endtoend/vtgate/vitess_tester/tpcc/tpcc.test
@@ -0,0 +1,210 @@
+# The TPC-C Benchmark queries with some sample data so we can test the queries
+
+CREATE TABLE IF NOT EXISTS warehouse (
+ w_id INT NOT NULL,
+ w_name VARCHAR(10),
+ w_street_1 VARCHAR(20),
+ w_street_2 VARCHAR(20),
+ w_city VARCHAR(20),
+ w_state CHAR(2),
+ w_zip CHAR(9),
+ w_tax DECIMAL(4, 4),
+ w_ytd DECIMAL(12, 2),
+ PRIMARY KEY (w_id)
+);
+
+CREATE TABLE IF NOT EXISTS customer (
+ c_id INT NOT NULL,
+ c_d_id INT NOT NULL,
+ c_w_id INT NOT NULL,
+ c_first VARCHAR(16),
+ c_middle CHAR(2),
+ c_last VARCHAR(16),
+ c_street_1 VARCHAR(20),
+ c_street_2 VARCHAR(20),
+ c_city VARCHAR(20),
+ c_state CHAR(2),
+ c_zip CHAR(9),
+ c_phone CHAR(16),
+ c_since DATETIME,
+ c_credit CHAR(2),
+ c_credit_lim DECIMAL(12, 2),
+ c_discount DECIMAL(4,4),
+ c_balance DECIMAL(12,2),
+ c_ytd_payment DECIMAL(12,2),
+ c_payment_cnt INT,
+ c_delivery_cnt INT,
+ c_data VARCHAR(500),
+ PRIMARY KEY(c_w_id, c_d_id, c_id),
+ INDEX idx_customer (c_w_id, c_d_id, c_last, c_first)
+);
+
+CREATE TABLE IF NOT EXISTS district (
+ d_id INT NOT NULL,
+ d_w_id INT NOT NULL,
+ d_name VARCHAR(10),
+ d_street_1 VARCHAR(20),
+ d_street_2 VARCHAR(20),
+ d_city VARCHAR(20),
+ d_state CHAR(2),
+ d_zip CHAR(9),
+ d_tax DECIMAL(4, 4),
+ d_ytd DECIMAL(12, 2),
+ d_next_o_id INT,
+ PRIMARY KEY (d_w_id, d_id)
+);
+
+CREATE TABLE IF NOT EXISTS history (
+ h_c_id INT NOT NULL,
+ h_c_d_id INT NOT NULL,
+ h_c_w_id INT NOT NULL,
+ h_d_id INT NOT NULL,
+ h_w_id INT NOT NULL,
+ h_date DATETIME,
+ h_amount DECIMAL(6, 2),
+ h_data VARCHAR(24),
+ INDEX idx_h_w_id (h_w_id),
+ INDEX idx_h_c_w_id (h_c_w_id)
+);
+
+CREATE TABLE IF NOT EXISTS new_orders (
+ no_o_id INT NOT NULL,
+ no_d_id INT NOT NULL,
+ no_w_id INT NOT NULL,
+ PRIMARY KEY(no_w_id, no_d_id, no_o_id)
+);
+
+CREATE TABLE IF NOT EXISTS orders (
+ o_id INT NOT NULL,
+ o_d_id INT NOT NULL,
+ o_w_id INT NOT NULL,
+ o_c_id INT,
+ o_entry_d DATETIME,
+ o_carrier_id INT,
+ o_ol_cnt INT,
+ o_all_local INT,
+ PRIMARY KEY(o_w_id, o_d_id, o_id),
+ INDEX idx_order (o_w_id, o_d_id, o_c_id, o_id)
+);
+
+CREATE TABLE IF NOT EXISTS order_line (
+ ol_o_id INT NOT NULL,
+ ol_d_id INT NOT NULL,
+ ol_w_id INT NOT NULL,
+ ol_number INT NOT NULL,
+ ol_i_id INT NOT NULL,
+ ol_supply_w_id INT,
+ ol_delivery_d DATETIME,
+ ol_quantity INT,
+ ol_amount DECIMAL(6, 2),
+ ol_dist_info CHAR(24),
+ PRIMARY KEY(ol_w_id, ol_d_id, ol_o_id, ol_number)
+);
+
+CREATE TABLE IF NOT EXISTS stock (
+ s_i_id INT NOT NULL,
+ s_w_id INT NOT NULL,
+ s_quantity INT,
+ s_dist_01 CHAR(24),
+ s_dist_02 CHAR(24),
+ s_dist_03 CHAR(24),
+ s_dist_04 CHAR(24),
+ s_dist_05 CHAR(24),
+ s_dist_06 CHAR(24),
+ s_dist_07 CHAR(24),
+ s_dist_08 CHAR(24),
+ s_dist_09 CHAR(24),
+ s_dist_10 CHAR(24),
+ s_ytd INT,
+ s_order_cnt INT,
+ s_remote_cnt INT,
+ s_data VARCHAR(50),
+ PRIMARY KEY(s_w_id, s_i_id)
+);
+
+CREATE TABLE IF NOT EXISTS item (
+ i_id INT NOT NULL,
+ i_im_id INT,
+ i_name VARCHAR(24),
+ i_price DECIMAL(5, 2),
+ i_data VARCHAR(50),
+ PRIMARY KEY(i_id)
+);
+
+INSERT INTO warehouse (w_id, w_name, w_street_1, w_street_2, w_city, w_state, w_zip, w_tax, w_ytd) VALUES
+(1, 'Main', '123 Elm St', 'Suite 100', 'Anytown', 'CA', '12345', 0.0750, 100000.00),
+(2, 'Side', '123 Storgatan', 'Suite 666', 'Uptown', 'SE', '87654', 0.0150, 200000.00);
+
+INSERT INTO customer (c_id, c_d_id, c_w_id, c_first, c_middle, c_last, c_street_1, c_street_2, c_city, c_state, c_zip, c_phone, c_since, c_credit, c_credit_lim, c_discount, c_balance, c_ytd_payment, c_payment_cnt, c_delivery_cnt, c_data) VALUES
+(10, 15, 1, 'John', 'Q', 'Public', '456 Oak St', 'Apt 5', 'Othertown', 'NY', '54321', '555-1234-5678', '2023-01-01 12:00:00', 'Y', 50000.00, 0.0500, -100.00, 1500.00, 15, 2, 'Frequent shopper'),
+(1, 1, 5, 'Jane', 'R', 'last', '789 Pine St', 'Unit 7', 'Smalltown', 'TX', '98765', '555-8765-4321', '2023-02-02 14:30:00', 'N', 75000.00, 0.0250, 500.00, 250.00, 5, 1, 'Occasional shopper'),
+(2, 1, 5, 'Jake', 'S', 'last', '101 Birch St', 'Suite 21', 'Middletown', 'FL', '32145', '555-5678-1234', '2023-03-03 16:45:00', 'Y', 100000.00, 0.1000, 200.00, 300.00, 10, 3, 'Regular shopper'),
+(3, 5, 8, 'Alice', 'T', 'item_last', '102 Acacia Ave', 'Top Floor', 'Bigtown', 'CO', '12345', '555-9876-5432', '2023-04-04 18:00:00', 'N', 30000.00, 0.0750, 150.00, 100.00, 3, 1, 'Sporadic shopper'),
+(4, 5, 8, 'Bob', 'U', 'item_last', '103 Maple Dr', 'Room 6', 'Laketown', 'WA', '98765', '555-6543-2109', '2023-05-05 19:15:00', 'Y', 20000.00, 0.0500, 0.00, 50.00, 2, 0, 'New shopper'),
+(9, 1, 8965, 'Charlie', 'V', 'Quiet', '104 Cedar Ln', 'Basement', 'Cloudtown', 'VT', '54321', '555-3210-9876', '2023-06-06 20:30:00', 'N', 15000.00, 0.0200, 75.00, 25.00, 1, 0, 'Rare shopper'),
+(5, 68, 32, 'Dan', 'W', 'Anyone', '105 Spruce Rd', 'Floor 2', 'Hilltown', 'ME', '32145', '555-4321-0987', '2023-07-07 21:45:00', 'Y', 10000.00, 0.0150, 500.00, 75.00, 5, 2, 'Ad hoc shopper');
+
+INSERT INTO district (d_id, d_w_id, d_name, d_street_1, d_street_2, d_city, d_state, d_zip, d_tax, d_ytd, d_next_o_id) VALUES
+(95, 15, 'Central', '123 Central St', 'Unit 5', 'Centerville', 'CA', '95021', 0.0850, 20000.00, 10),
+(9, 896, 'Eastside', '789 East St', 'Bldg 2', 'Eastville', 'NY', '10021', 0.0750, 15000.00, 20),
+(6, 21, 'Westend', '456 West Rd', 'Suite 8', 'Westtown', 'TX', '77019', 0.0650, 50000.00, 30);
+
+INSERT INTO orders (o_id, o_d_id, o_w_id, o_c_id, o_entry_d, o_carrier_id, o_ol_cnt, o_all_local) VALUES
+(10, 3, 9894, 159, '2024-04-30 12:00:00', 12, 5, 1),
+(9, 3, 9894, 159, '2024-04-29 12:00:00', 15, 3, 1),
+(8, 3, 9894, 159, '2024-04-28 12:00:00', null, 4, 1),
+(6, 1983, 894605, 204, '2024-04-27 12:00:00', 10, 2, 0),
+(2110, 1, 1, 105, '2024-04-15 10:00:00', 5, 3, 1),
+(3000, 1, 1, 105, '2024-04-16 10:05:00', 6, 2, 1),
+(4200, 1, 1, 105, '2024-04-17 10:10:00', 7, 1, 1);
+
+INSERT INTO order_line (ol_o_id, ol_d_id, ol_w_id, ol_number, ol_i_id, ol_supply_w_id, ol_delivery_d, ol_quantity, ol_amount, ol_dist_info) VALUES
+ (1, 5, 92, 1, 101, 92, '2024-05-01 12:00:00', 5, 150.00, 'xyzabcdefghijklmnopr'),
+ (680, 201, 87, 1, 102, 87, '2024-05-02 13:00:00', 10, 100.00, 'yzabcdefghijklmnopqr'),
+ (680, 201, 87, 2, 103, 87, '2024-05-02 13:05:00', 2, 50.00, 'zabcdefghijklmnopqrs'),
+ (45, 156, 1, 1, 104, 1, '2024-05-03 14:00:00', 20, 200.00, 'abcdejklmnopqrsvwxyx'),
+ (56, 156, 1, 2, 105, 1, '2024-05-04 15:00:00', 30, 250.00, 'bcdefghiqrstuvwxyza'),
+ (15, 1908, 12, 1, 106, 12, '2024-05-05 16:00:00', 3, 75.00, 'cdefghijklmnopqwxyzab');
+
+INSERT INTO stock (s_i_id, s_w_id, s_quantity, s_dist_01, s_dist_02, s_dist_03, s_dist_04, s_dist_05, s_dist_06, s_dist_07, s_dist_08, s_dist_09, s_dist_10, s_ytd, s_order_cnt, s_remote_cnt, s_data) VALUES
+(101, 92, 50, 'distdata1', 'distdata2', 'distdata3', 'distdata4', 'distdata5', 'distdata6', 'distdata7', 'distdata8', 'distdata9', 'distdata10', 1000, 100, 10, 'Example data string'),
+(102, 87, 30, 'distdata1', 'distdata2', 'distdata3', 'distdata4', 'distdata5', 'distdata6', 'distdata7', 'distdata8', 'distdata9', 'distdata10', 500, 50, 5, 'Another example string'),
+(106, 12, 5, 'distdata1', 'distdata2', 'distdata3', 'distdata4', 'distdata5', 'distdata6', 'distdata7', 'distdata8', 'distdata9', 'distdata10', 300, 30, 3, 'Yet another string'),
+(8, 1, 900, 'distdata1', 'distdata2', 'distdata3', 'distdata4', 'distdata5', 'distdata6', 'distdata7', 'distdata8', 'distdata9', 'distdata10', 800, 80, 8, 'Low stock string'),
+(2198, 89, 100, 'distdata1', '', '', '', '', '', '', '', '', '', 150, 15, 1, 'Critical stock data');
+
+INSERT INTO new_orders (no_o_id, no_d_id, no_w_id) VALUES
+(10, 689, 15),
+(11, 689, 15),
+(12, 689, 15);
+
+INSERT INTO item (i_id, i_im_id, i_name, i_price, i_data) VALUES
+(9654, 123, 'Gadget', 199.99, 'High-quality electronic gadget'),
+(9655, 124, 'Widget', 29.99, 'Durable plastic widget');
+
+# Here follows the SELECT queries we are testing.
+# The TPCC benchmark also uses INSERT, UPDATE and DELETE queries, but we are not testing those here.
+-- wait_authoritative customer
+-- wait_authoritative warehouse
+SELECT c_discount, c_last, c_credit, w_tax FROM customer AS c JOIN warehouse AS w ON c_w_id=w_id WHERE w_id = 1 AND c_d_id = 15 AND c_id = 10;
+SELECT count(c_id) namecnt FROM customer WHERE c_w_id = 5 AND c_d_id= 1 AND c_last='last';
+SELECT c_id FROM customer WHERE c_w_id = 8 AND c_d_id = 5 AND c_last='item_last' ORDER BY c_first;
+SELECT c_first, c_middle, c_last, c_street_1, c_street_2, c_city, c_state, c_zip, c_phone, c_credit, c_credit_lim, c_discount, c_balance, c_ytd_payment, c_since FROM customer WHERE c_w_id = 8965 AND c_d_id = 1 AND c_id = 9;
+SELECT c_data FROM customer WHERE c_w_id = 32 AND c_d_id=68 AND c_id = 5;
+SELECT count(c_id) namecnt FROM customer WHERE c_w_id = 870 AND c_d_id= 780 AND c_last='last';
+SELECT c_balance, c_first, c_middle, c_id FROM customer WHERE c_w_id = 840 AND c_d_id= 1 AND c_last='test' ORDER BY c_first;
+SELECT c_balance, c_first, c_middle, c_last FROM customer WHERE c_w_id = 15 AND c_d_id=5169 AND c_id=1;
+SELECT d_next_o_id, d_tax FROM district WHERE d_w_id = 15 AND d_id = 95;
+SELECT d_street_1, d_street_2, d_city, d_state, d_zip, d_name FROM district WHERE d_w_id = 896 AND d_id = 9;
+SELECT d_next_o_id FROM district WHERE d_id = 6 AND d_w_id= 21;
+SELECT o_id, o_carrier_id, o_entry_d FROM orders WHERE o_w_id = 9894 AND o_d_id = 3 AND o_c_id = 159 ORDER BY o_id DESC;
+SELECT o_c_id FROM orders WHERE o_id = 6 AND o_d_id = 1983 AND o_w_id = 894605;
+SELECT ol_i_id, ol_supply_w_id, ol_quantity, ol_amount, ol_delivery_d FROM order_line WHERE ol_w_id = 92 AND ol_d_id = 5 AND ol_o_id = 1;
+SELECT SUM(ol_amount) sm FROM order_line WHERE ol_o_id = 680 AND ol_d_id = 201 AND ol_w_id = 87;
+SELECT DISTINCT ol_i_id FROM order_line WHERE ol_w_id = 1 AND ol_d_id = 156 AND ol_o_id < 500 AND ol_o_id >= 56;
+SELECT COUNT(DISTINCT(s.s_i_id)) FROM stock AS s JOIN order_line AS ol ON ol.ol_w_id=s.s_w_id AND ol.ol_i_id=s.s_i_id WHERE ol.ol_w_id = 12 AND ol.ol_d_id = 1908 AND ol.ol_o_id < 30 AND ol.ol_o_id >= 15 AND s.s_w_id= 12 AND s.s_quantity < 10;
+SELECT count(*) FROM stock WHERE s_w_id = 1 AND s_i_id = 8 AND s_quantity < 1000;
+SELECT s_quantity, s_data, s_dist_01 s_dist FROM stock WHERE s_i_id = 2198 AND s_w_id = 89;
+SELECT no_o_id FROM new_orders WHERE no_d_id = 689 AND no_w_id = 15 ORDER BY no_o_id ASC LIMIT 1;
+SELECT i_price, i_name, i_data FROM item WHERE i_id = 9654;
+SELECT w_street_1, w_street_2, w_city, w_state, w_zip, w_name FROM warehouse WHERE w_id = 998;
\ No newline at end of file
diff --git a/go/test/endtoend/vtgate/vitess_tester/tpch/tpch.test b/go/test/endtoend/vtgate/vitess_tester/tpch/tpch.test
new file mode 100644
index 00000000000..2452b2dbcd6
--- /dev/null
+++ b/go/test/endtoend/vtgate/vitess_tester/tpch/tpch.test
@@ -0,0 +1,780 @@
+# http://www.tpc.org/tpc_documents_current_versions/pdf/tpc-h_v2.17.1.pdf
+
+CREATE TABLE IF NOT EXISTS nation ( N_NATIONKEY INTEGER NOT NULL,
+ N_NAME CHAR(25) NOT NULL,
+ N_REGIONKEY INTEGER NOT NULL,
+ N_COMMENT VARCHAR(152),
+ PRIMARY KEY (N_NATIONKEY));
+
+CREATE TABLE IF NOT EXISTS region ( R_REGIONKEY INTEGER NOT NULL,
+ R_NAME CHAR(25) NOT NULL,
+ R_COMMENT VARCHAR(152),
+ PRIMARY KEY (R_REGIONKEY));
+
+CREATE TABLE IF NOT EXISTS part ( P_PARTKEY INTEGER NOT NULL,
+ P_NAME VARCHAR(55) NOT NULL,
+ P_MFGR CHAR(25) NOT NULL,
+ P_BRAND CHAR(10) NOT NULL,
+ P_TYPE VARCHAR(25) NOT NULL,
+ P_SIZE INTEGER NOT NULL,
+ P_CONTAINER CHAR(10) NOT NULL,
+ P_RETAILPRICE DECIMAL(15,2) NOT NULL,
+ P_COMMENT VARCHAR(23) NOT NULL,
+ PRIMARY KEY (P_PARTKEY));
+
+CREATE TABLE IF NOT EXISTS supplier ( S_SUPPKEY INTEGER NOT NULL,
+ S_NAME CHAR(25) NOT NULL,
+ S_ADDRESS VARCHAR(40) NOT NULL,
+ S_NATIONKEY INTEGER NOT NULL,
+ S_PHONE CHAR(15) NOT NULL,
+ S_ACCTBAL DECIMAL(15,2) NOT NULL,
+ S_COMMENT VARCHAR(101) NOT NULL,
+ PRIMARY KEY (S_SUPPKEY));
+
+CREATE TABLE IF NOT EXISTS partsupp ( PS_PARTKEY INTEGER NOT NULL,
+ PS_SUPPKEY INTEGER NOT NULL,
+ PS_AVAILQTY INTEGER NOT NULL,
+ PS_SUPPLYCOST DECIMAL(15,2) NOT NULL,
+ PS_COMMENT VARCHAR(199) NOT NULL,
+ PRIMARY KEY (PS_PARTKEY,PS_SUPPKEY));
+
+CREATE TABLE IF NOT EXISTS customer ( C_CUSTKEY INTEGER NOT NULL,
+ C_NAME VARCHAR(25) NOT NULL,
+ C_ADDRESS VARCHAR(40) NOT NULL,
+ C_NATIONKEY INTEGER NOT NULL,
+ C_PHONE CHAR(15) NOT NULL,
+ C_ACCTBAL DECIMAL(15,2) NOT NULL,
+ C_MKTSEGMENT CHAR(10) NOT NULL,
+ C_COMMENT VARCHAR(117) NOT NULL,
+ PRIMARY KEY (C_CUSTKEY));
+
+CREATE TABLE IF NOT EXISTS orders ( O_ORDERKEY INTEGER NOT NULL,
+ O_CUSTKEY INTEGER NOT NULL,
+ O_ORDERSTATUS CHAR(1) NOT NULL,
+ O_TOTALPRICE DECIMAL(15,2) NOT NULL,
+ O_ORDERDATE DATE NOT NULL,
+ O_ORDERPRIORITY CHAR(15) NOT NULL,
+ O_CLERK CHAR(15) NOT NULL,
+ O_SHIPPRIORITY INTEGER NOT NULL,
+ O_COMMENT VARCHAR(79) NOT NULL,
+ PRIMARY KEY (O_ORDERKEY));
+
+CREATE TABLE IF NOT EXISTS lineitem ( L_ORDERKEY INTEGER NOT NULL,
+ L_PARTKEY INTEGER NOT NULL,
+ L_SUPPKEY INTEGER NOT NULL,
+ L_LINENUMBER INTEGER NOT NULL,
+ L_QUANTITY DECIMAL(15,2) NOT NULL,
+ L_EXTENDEDPRICE DECIMAL(15,2) NOT NULL,
+ L_DISCOUNT DECIMAL(15,2) NOT NULL,
+ L_TAX DECIMAL(15,2) NOT NULL,
+ L_RETURNFLAG CHAR(1) NOT NULL,
+ L_LINESTATUS CHAR(1) NOT NULL,
+ L_SHIPDATE DATE NOT NULL,
+ L_COMMITDATE DATE NOT NULL,
+ L_RECEIPTDATE DATE NOT NULL,
+ L_SHIPINSTRUCT CHAR(25) NOT NULL,
+ L_SHIPMODE CHAR(10) NOT NULL,
+ L_COMMENT VARCHAR(44) NOT NULL,
+ PRIMARY KEY (L_ORDERKEY,L_LINENUMBER));
+
+INSERT INTO region (R_REGIONKEY, R_NAME, R_COMMENT) VALUES
+ (1, 'ASIA', 'Eastern Asia'),
+ (2, 'MIDDLE EAST', 'Rich cultural heritage');
+
+INSERT INTO nation (N_NATIONKEY, N_NAME, N_REGIONKEY, N_COMMENT) VALUES
+ (1, 'China', 1, 'Large population'),
+ (2, 'India', 1, 'Large variety of cultures'),
+ (3, 'Nation A', 2, 'Historic sites'),
+ (4, 'Nation B', 2, 'Beautiful landscapes');
+
+INSERT INTO supplier (S_SUPPKEY, S_NAME, S_ADDRESS, S_NATIONKEY, S_PHONE, S_ACCTBAL, S_COMMENT) VALUES
+ (1, 'Supplier A', '123 Square', 1, '86-123-4567', 5000.00, 'High quality steel'),
+ (2, 'Supplier B', '456 Ganges St', 2, '91-789-4561', 5500.00, 'Efficient production'),
+ (3, 'Supplier 1', 'Supplier Address 1', 3, '91-789-4562', 3000.00, 'Supplier Comment 1'),
+ (4, 'Supplier 2', 'Supplier Address 2', 2, '91-789-4563', 4000.00, 'Supplier Comment 2');
+
+INSERT INTO part (P_PARTKEY, P_NAME, P_MFGR, P_BRAND, P_TYPE, P_SIZE, P_CONTAINER, P_RETAILPRICE, P_COMMENT) VALUES
+ (100, 'Part 100', 'MFGR A', 'Brand X', 'BOLT STEEL', 30, 'SM BOX', 45.00, 'High strength'),
+ (101, 'Part 101', 'MFGR B', 'Brand Y', 'NUT STEEL', 30, 'LG BOX', 30.00, 'Rust resistant');
+
+INSERT INTO partsupp (PS_PARTKEY, PS_SUPPKEY, PS_AVAILQTY, PS_SUPPLYCOST, PS_COMMENT) VALUES
+ (100, 1, 500, 10.00, 'Deliveries on time'),
+ (101, 2, 300, 9.00, 'Back orders possible'),
+ (100, 2, 600, 8.50, 'Bulk discounts available');
+
+INSERT INTO customer (C_CUSTKEY, C_NAME, C_ADDRESS, C_NATIONKEY, C_PHONE, C_ACCTBAL, C_MKTSEGMENT, C_COMMENT) VALUES
+ (1, 'Customer A', '1234 Drive Lane', 1, '123-456-7890', 1000.00, 'AUTOMOBILE', 'Frequent orders'),
+ (2, 'Customer B', '5678 Park Ave', 2, '234-567-8901', 2000.00, 'AUTOMOBILE', 'Large orders'),
+ (3, 'Customer 1', 'Address 1', 1, 'Phone 1', 1000.00, 'Segment 1', 'Comment 1'),
+ (4, 'Customer 2', 'Address 2', 2, 'Phone 2', 2000.00, 'Segment 2', 'Comment 2');
+
+INSERT INTO orders (O_ORDERKEY, O_CUSTKEY, O_ORDERSTATUS, O_TOTALPRICE, O_ORDERDATE, O_ORDERPRIORITY, O_CLERK, O_SHIPPRIORITY, O_COMMENT) VALUES
+ (100, 1, 'O', 15000.00, '1995-03-10', '1-URGENT', 'Clerk#0001', 1, 'N/A'),
+ (101, 2, 'O', 25000.00, '1995-03-05', '2-HIGH', 'Clerk#0002', 2, 'N/A'),
+ (1, 3, 'O', 10000.00, '1994-01-10', 'Priority 1', 'Clerk 1', 1, 'Order Comment 1'),
+ (2, 4, 'O', 20000.00, '1994-06-15', 'Priority 2', 'Clerk 2', 1, 'Order Comment 2');
+
+INSERT INTO lineitem (L_ORDERKEY, L_PARTKEY, L_SUPPKEY, L_LINENUMBER, L_QUANTITY, L_EXTENDEDPRICE, L_DISCOUNT, L_TAX, L_RETURNFLAG, L_LINESTATUS, L_SHIPDATE, L_COMMITDATE, L_RECEIPTDATE, L_SHIPINSTRUCT, L_SHIPMODE, L_COMMENT) VALUES
+ (100, 200, 300, 1, 10, 5000.00, 0.05, 0.10, 'N', 'O', '1995-03-15', '1995-03-14', '1995-03-16', 'DELIVER IN PERSON', 'TRUCK', 'Urgent delivery'),
+ (100, 201, 301, 2, 20, 10000.00, 0.10, 0.10, 'R', 'F', '1995-03-17', '1995-03-15', '1995-03-18', 'NONE', 'MAIL', 'Handle with care'),
+ (101, 202, 302, 1, 30, 15000.00, 0.00, 0.10, 'A', 'F', '1995-03-20', '1995-03-18', '1995-03-21', 'TAKE BACK RETURN', 'SHIP', 'Standard delivery'),
+ (101, 203, 303, 2, 40, 10000.00, 0.20, 0.10, 'N', 'O', '1995-03-22', '1995-03-20', '1995-03-23', 'DELIVER IN PERSON', 'RAIL', 'Expedite'),
+ (1, 101, 1, 1, 5, 5000.00, 0.1, 0.05, 'N', 'O', '1994-01-12', '1994-01-11', '1994-01-13', 'Deliver in person','TRUCK', 'Lineitem Comment 1'),
+ (2, 102, 2, 1, 3, 15000.00, 0.2, 0.05, 'R', 'F', '1994-06-17', '1994-06-15', '1994-06-18', 'Leave at front door','AIR', 'Lineitem Comment 2'),
+ (11, 100, 2, 1, 30, 10000.00, 0.05, 0.07, 'A', 'F', '1998-07-21', '1998-07-22', '1998-07-23', 'DELIVER IN PERSON', 'TRUCK', 'N/A'),
+ (12, 101, 3, 1, 50, 15000.00, 0.10, 0.08, 'N', 'O', '1998-08-10', '1998-08-11', '1998-08-12', 'NONE', 'AIR', 'N/A'),
+ (13, 102, 4, 1, 70, 21000.00, 0.02, 0.04, 'R', 'F', '1998-06-30', '1998-07-01', '1998-07-02', 'TAKE BACK RETURN', 'MAIL', 'N/A'),
+ (14, 103, 5, 1, 90, 30000.00, 0.15, 0.10, 'A', 'O', '1998-05-15', '1998-05-16', '1998-05-17', 'DELIVER IN PERSON', 'RAIL', 'N/A'),
+ (15, 104, 2, 1, 45, 45000.00, 0.20, 0.15, 'N', 'F', '1998-07-15', '1998-07-16', '1998-07-17', 'NONE', 'SHIP', 'N/A');
+
+# Query 1
+select
+ l_returnflag,
+ l_linestatus,
+ sum(l_quantity) as sum_qty,
+ sum(l_extendedprice) as sum_base_price,
+ sum(l_extendedprice * (1 - l_discount)) as sum_disc_price,
+ sum(l_extendedprice * (1 - l_discount) * (1 + l_tax)) as sum_charge,
+ avg(l_quantity) as avg_qty,
+ avg(l_extendedprice) as avg_price,
+ avg(l_discount) as avg_disc,
+ count(*) as count_order
+from
+ lineitem
+where
+ l_shipdate <= date_sub('1998-12-01', interval 108 day)
+group by
+ l_returnflag,
+ l_linestatus
+order by
+ l_returnflag,
+ l_linestatus;
+
+# Query 2
+-- skip
+select
+ s_acctbal,
+ s_name,
+ n_name,
+ p_partkey,
+ p_mfgr,
+ s_address,
+ s_phone,
+ s_comment
+from
+ part,
+ supplier,
+ partsupp,
+ nation,
+ region
+where
+ p_partkey = ps_partkey
+ and s_suppkey = ps_suppkey
+ and p_size = 30
+ and p_type like '%STEEL'
+ and s_nationkey = n_nationkey
+ and n_regionkey = r_regionkey
+ and r_name = 'ASIA'
+ and ps_supplycost = (
+ select
+ min(ps_supplycost)
+ from
+ partsupp,
+ supplier,
+ nation,
+ region
+ where
+ p_partkey = ps_partkey
+ and s_suppkey = ps_suppkey
+ and s_nationkey = n_nationkey
+ and n_regionkey = r_regionkey
+ and r_name = 'ASIA'
+ )
+order by
+ s_acctbal desc,
+ n_name,
+ s_name,
+ p_partkey
+limit 100;
+
+-- wait_authoritative customer
+-- wait_authoritative orders
+-- wait_authoritative lineitem
+# Q3 Shipping Priority Query
+select
+ l_orderkey,
+ sum(l_extendedprice * (1 - l_discount)) as revenue,
+ o_orderdate,
+ o_shippriority
+from
+ customer,
+ orders,
+ lineitem
+where
+ c_mktsegment = 'AUTOMOBILE'
+ and c_custkey = o_custkey
+ and l_orderkey = o_orderkey
+ and o_orderdate < '1995-03-13'
+ and l_shipdate > '1995-03-13'
+group by
+ l_orderkey,
+ o_orderdate,
+ o_shippriority
+order by
+ revenue desc,
+ o_orderdate
+limit 10;
+
+# Q4 Order Priority Checking Query
+select
+ o_orderpriority,
+ count(*) as order_count
+from
+ orders
+where
+ o_orderdate >= '1995-01-01'
+ and o_orderdate < date_add('1995-01-01', interval '3' month)
+ and exists (
+ select
+ *
+ from
+ lineitem
+ where
+ l_orderkey = o_orderkey
+ and l_commitdate < l_receiptdate
+ )
+group by
+ o_orderpriority
+order by
+ o_orderpriority;
+
+# Q5 Local Supplier Volume Query
+select
+ n_name,
+ sum(l_extendedprice * (1 - l_discount)) as revenue
+from
+ customer,
+ orders,
+ lineitem,
+ supplier,
+ nation,
+ region
+where
+ c_custkey = o_custkey
+ and l_orderkey = o_orderkey
+ and l_suppkey = s_suppkey
+ and c_nationkey = s_nationkey
+ and s_nationkey = n_nationkey
+ and n_regionkey = r_regionkey
+ and r_name = 'MIDDLE EAST'
+ and o_orderdate >= '1994-01-01'
+ and o_orderdate < date_add('1994-01-01', interval '1' year)
+group by
+ n_name
+order by
+ revenue desc;
+
+# Q6 Forecasting Revenue Change Query
+select
+ sum(l_extendedprice * l_discount) as revenue
+from
+ lineitem
+where
+ l_shipdate >= '1994-01-01'
+ and l_shipdate < date_add('1994-01-01', interval '1' year)
+ and l_discount between 0.06 - 0.01 and 0.06 + 0.01
+ and l_quantity < 24;
+
+# Q7 Volume Shipping Query
+-- skip
+select
+ supp_nation,
+ cust_nation,
+ l_year,
+ sum(volume) as revenue
+from
+ (
+ select
+ n1.n_name as supp_nation,
+ n2.n_name as cust_nation,
+ extract(year from l_shipdate) as l_year,
+ l_extendedprice * (1 - l_discount) as volume
+ from
+ supplier,
+ lineitem,
+ orders,
+ customer,
+ nation n1,
+ nation n2
+ where
+ s_suppkey = l_suppkey
+ and o_orderkey = l_orderkey
+ and c_custkey = o_custkey
+ and s_nationkey = n1.n_nationkey
+ and c_nationkey = n2.n_nationkey
+ and (
+ (n1.n_name = 'JAPAN' and n2.n_name = 'INDIA')
+ or (n1.n_name = 'INDIA' and n2.n_name = 'JAPAN')
+ )
+ and l_shipdate between '1995-01-01' and '1996-12-31'
+ ) as shipping
+group by
+ supp_nation,
+ cust_nation,
+ l_year
+order by
+ supp_nation,
+ cust_nation,
+ l_year;
+
+# Q8 National Market Share Query
+-- skip
+select
+ o_year,
+ sum(case
+ when nation = 'INDIA' then volume
+ else 0
+ end) / sum(volume) as mkt_share
+from
+ (
+ select
+ extract(year from o_orderdate) as o_year,
+ l_extendedprice * (1 - l_discount) as volume,
+ n2.n_name as nation
+ from
+ part,
+ supplier,
+ lineitem,
+ orders,
+ customer,
+ nation n1,
+ nation n2,
+ region
+ where
+ p_partkey = l_partkey
+ and s_suppkey = l_suppkey
+ and l_orderkey = o_orderkey
+ and o_custkey = c_custkey
+ and c_nationkey = n1.n_nationkey
+ and n1.n_regionkey = r_regionkey
+ and r_name = 'ASIA'
+ and s_nationkey = n2.n_nationkey
+ and o_orderdate between '1995-01-01' and '1996-12-31'
+ and p_type = 'SMALL PLATED COPPER'
+ ) as all_nations
+group by
+ o_year
+order by
+ o_year;
+
+# Q9 Product Type Profit Measure Query
+-- skip
+select
+ nation,
+ o_year,
+ sum(amount) as sum_profit
+from
+ (
+ select
+ n_name as nation,
+ extract(year from o_orderdate) as o_year,
+ l_extendedprice * (1 - l_discount) - ps_supplycost * l_quantity as amount
+ from
+ part,
+ supplier,
+ lineitem,
+ partsupp,
+ orders,
+ nation
+ where
+ s_suppkey = l_suppkey
+ and ps_suppkey = l_suppkey
+ and ps_partkey = l_partkey
+ and p_partkey = l_partkey
+ and o_orderkey = l_orderkey
+ and s_nationkey = n_nationkey
+ and p_name like '%dim%'
+ ) as profit
+group by
+ nation,
+ o_year
+order by
+ nation,
+ o_year desc;
+
+# Q10 Returned Item Reporting Query
+select
+ c_custkey,
+ c_name,
+ sum(l_extendedprice * (1 - l_discount)) as revenue,
+ c_acctbal,
+ n_name,
+ c_address,
+ c_phone,
+ c_comment
+from
+ customer,
+ orders,
+ lineitem,
+ nation
+where
+ c_custkey = o_custkey
+ and l_orderkey = o_orderkey
+ and o_orderdate >= '1993-08-01'
+ and o_orderdate < date_add('1993-08-01', interval '3' month)
+ and l_returnflag = 'R'
+ and c_nationkey = n_nationkey
+group by
+ c_custkey,
+ c_name,
+ c_acctbal,
+ c_phone,
+ n_name,
+ c_address,
+ c_comment
+order by
+ revenue desc
+limit 20;
+
+# Q11 Important Stock Identification Query
+select
+ ps_partkey,
+ sum(ps_supplycost * ps_availqty) as value
+from
+ partsupp,
+ supplier,
+ nation
+where
+ ps_suppkey = s_suppkey
+ and s_nationkey = n_nationkey
+ and n_name = 'MOZAMBIQUE'
+group by
+ ps_partkey having
+ sum(ps_supplycost * ps_availqty) > (
+ select
+ sum(ps_supplycost * ps_availqty) * 0.0001000000
+ from
+ partsupp,
+ supplier,
+ nation
+ where
+ ps_suppkey = s_suppkey
+ and s_nationkey = n_nationkey
+ and n_name = 'MOZAMBIQUE'
+ )
+order by
+ value desc;
+
+# Q12 Shipping Modes and Order Priority Query
+select
+ l_shipmode,
+ sum(case
+ when o_orderpriority = '1-URGENT'
+ or o_orderpriority = '2-HIGH'
+ then 1
+ else 0
+ end) as high_line_count,
+ sum(case
+ when o_orderpriority <> '1-URGENT'
+ and o_orderpriority <> '2-HIGH'
+ then 1
+ else 0
+ end) as low_line_count
+from
+ orders,
+ lineitem
+where
+ o_orderkey = l_orderkey
+ and l_shipmode in ('RAIL', 'FOB')
+ and l_commitdate < l_receiptdate
+ and l_shipdate < l_commitdate
+ and l_receiptdate >= '1997-01-01'
+ and l_receiptdate < date_add('1997-01-01', interval '1' year)
+group by
+ l_shipmode
+order by
+ l_shipmode;
+
+# Q13 Customer Distribution Query
+select
+ c_count,
+ count(*) as custdist
+from
+ (
+ select
+ c_custkey,
+ count(o_orderkey) as c_count
+ from
+ customer left outer join orders on
+ c_custkey = o_custkey
+ and o_comment not like '%pending%deposits%'
+ group by
+ c_custkey
+ ) c_orders
+group by
+ c_count
+order by
+ custdist desc,
+ c_count desc;
+
+# Q14 Promotion Effect Query
+select
+ 100.00 * sum(case
+ when p_type like 'PROMO%'
+ then l_extendedprice * (1 - l_discount)
+ else 0
+ end) / sum(l_extendedprice * (1 - l_discount)) as promo_revenue
+from
+ lineitem,
+ part
+where
+ l_partkey = p_partkey
+ and l_shipdate >= '1996-12-01'
+ and l_shipdate < date_add('1996-12-01', interval '1' month);
+
+# Q16 Parts/Supplier Relationship Query
+select
+ p_brand,
+ p_type,
+ p_size,
+ count(distinct ps_suppkey) as supplier_cnt
+from
+ partsupp,
+ part
+where
+ p_partkey = ps_partkey
+ and p_brand <> 'Brand#34'
+ and p_type not like 'LARGE BRUSHED%'
+ and p_size in (48, 19, 12, 4, 41, 7, 21, 39)
+ and ps_suppkey not in (
+ select
+ s_suppkey
+ from
+ supplier
+ where
+ s_comment like '%Customer%Complaints%'
+ )
+group by
+ p_brand,
+ p_type,
+ p_size
+order by
+ supplier_cnt desc,
+ p_brand,
+ p_type,
+ p_size;
+
+# Q17 Small-Quantity-Order Revenue Query
+--skip correlated subquery is only supported for EXISTS
+select
+ sum(l_extendedprice) / 7.0 as avg_yearly
+from
+ lineitem,
+ part
+where
+ p_partkey = l_partkey
+ and p_brand = 'Brand#44'
+ and p_container = 'WRAP PKG'
+ and l_quantity < (
+ select
+ 0.2 * avg(l_quantity)
+ from
+ lineitem
+ where
+ l_partkey = p_partkey
+ );
+
+# Q18 Large Volume Customer Query
+select
+ c_name,
+ c_custkey,
+ o_orderkey,
+ o_orderdate,
+ o_totalprice,
+ sum(l_quantity)
+from
+ customer,
+ orders,
+ lineitem
+where
+ o_orderkey in (
+ select
+ l_orderkey
+ from
+ lineitem
+ group by
+ l_orderkey having
+ sum(l_quantity) > 314
+ )
+ and c_custkey = o_custkey
+ and o_orderkey = l_orderkey
+group by
+ c_name,
+ c_custkey,
+ o_orderkey,
+ o_orderdate,
+ o_totalprice
+order by
+ o_totalprice desc,
+ o_orderdate
+limit 100;
+
+# Q19 Discounted Revenue Query
+select
+ sum(l_extendedprice* (1 - l_discount)) as revenue
+from
+ lineitem,
+ part
+where
+ (
+ p_partkey = l_partkey
+ and p_brand = 'Brand#52'
+ and p_container in ('SM CASE', 'SM BOX', 'SM PACK', 'SM PKG')
+ and l_quantity >= 4 and l_quantity <= 4 + 10
+ and p_size between 1 and 5
+ and l_shipmode in ('AIR', 'AIR REG')
+ and l_shipinstruct = 'DELIVER IN PERSON'
+ )
+ or
+ (
+ p_partkey = l_partkey
+ and p_brand = 'Brand#11'
+ and p_container in ('MED BAG', 'MED BOX', 'MED PKG', 'MED PACK')
+ and l_quantity >= 18 and l_quantity <= 18 + 10
+ and p_size between 1 and 10
+ and l_shipmode in ('AIR', 'AIR REG')
+ and l_shipinstruct = 'DELIVER IN PERSON'
+ )
+ or
+ (
+ p_partkey = l_partkey
+ and p_brand = 'Brand#51'
+ and p_container in ('LG CASE', 'LG BOX', 'LG PACK', 'LG PKG')
+ and l_quantity >= 29 and l_quantity <= 29 + 10
+ and p_size between 1 and 15
+ and l_shipmode in ('AIR', 'AIR REG')
+ and l_shipinstruct = 'DELIVER IN PERSON'
+ );
+
+# Q20 Potential Part Promotion Query
+--skip correlated subquery is only supported for EXISTS
+select
+ s_name,
+ s_address
+from
+ supplier,
+ nation
+where
+ s_suppkey in (
+ select
+ ps_suppkey
+ from
+ partsupp
+ where
+ ps_partkey in (
+ select
+ p_partkey
+ from
+ part
+ where
+ p_name like 'green%'
+ )
+ and ps_availqty > (
+ select
+ 0.5 * sum(l_quantity)
+ from
+ lineitem
+ where
+ l_partkey = ps_partkey
+ and l_suppkey = ps_suppkey
+ and l_shipdate >= '1993-01-01'
+ and l_shipdate < date_add('1993-01-01', interval '1' year)
+ )
+ )
+ and s_nationkey = n_nationkey
+ and n_name = 'ALGERIA'
+order by
+ s_name;
+
+
+# Q21 Suppliers Who Kept Orders Waiting Query
+select
+ s_name,
+ count(*) as numwait
+from
+ supplier,
+ lineitem l1,
+ orders,
+ nation
+where
+ s_suppkey = l1.l_suppkey
+ and o_orderkey = l1.l_orderkey
+ and o_orderstatus = 'F'
+ and l1.l_receiptdate > l1.l_commitdate
+ and exists (
+ select
+ *
+ from
+ lineitem l2
+ where
+ l2.l_orderkey = l1.l_orderkey
+ and l2.l_suppkey <> l1.l_suppkey
+ )
+ and not exists (
+ select
+ *
+ from
+ lineitem l3
+ where
+ l3.l_orderkey = l1.l_orderkey
+ and l3.l_suppkey <> l1.l_suppkey
+ and l3.l_receiptdate > l3.l_commitdate
+ )
+ and s_nationkey = n_nationkey
+ and n_name = 'EGYPT'
+group by
+ s_name
+order by
+ numwait desc,
+ s_name
+limit 100;
+
+# Q22 Global Sales Opportunity Query
+-- skip correlated subquery is only supported for EXISTS
+select
+ cntrycode,
+ count(*) as numcust,
+ sum(c_acctbal) as totacctbal
+from
+ (
+ select
+ substring(c_phone from 1 for 2) as cntrycode,
+ c_acctbal
+ from
+ customer
+ where
+ substring(c_phone from 1 for 2) in
+ ('20', '40', '22', '30', '39', '42', '21')
+ and c_acctbal > (
+ select
+ avg(c_acctbal)
+ from
+ customer
+ where
+ c_acctbal > 0.00
+ and substring(c_phone from 1 for 2) in
+ ('20', '40', '22', '30', '39', '42', '21')
+ )
+ and not exists (
+ select
+ *
+ from
+ orders
+ where
+ o_custkey = c_custkey
+ )
+ ) as custsale
+group by
+ cntrycode
+order by
+ cntrycode;
\ No newline at end of file
diff --git a/go/vt/servenv/version.go b/go/vt/servenv/version.go
index b820ea8a276..61c606c65ea 100644
--- a/go/vt/servenv/version.go
+++ b/go/vt/servenv/version.go
@@ -19,4 +19,4 @@ package servenv
// DO NOT EDIT
// THIS FILE IS AUTO-GENERATED DURING NEW RELEASES BY THE VITESS-RELEASER
-const versionName = "19.0.4"
+const versionName = "19.0.5"
diff --git a/go/vt/sqlparser/ast.go b/go/vt/sqlparser/ast.go
index ca7aae0f385..1dfc1050e0a 100644
--- a/go/vt/sqlparser/ast.go
+++ b/go/vt/sqlparser/ast.go
@@ -2864,6 +2864,8 @@ type (
Expr
GetArg() Expr
GetArgs() Exprs
+ SetArg(expr Expr)
+ SetArgs(exprs Exprs) error
// AggrName returns the lower case string representing this aggregation function
AggrName() string
}
@@ -3375,6 +3377,51 @@ func (varS *VarSamp) GetArgs() Exprs { return Exprs{varS.Arg} }
func (variance *Variance) GetArgs() Exprs { return Exprs{variance.Arg} }
func (av *AnyValue) GetArgs() Exprs { return Exprs{av.Arg} }
+func (min *Min) SetArg(expr Expr) { min.Arg = expr }
+func (sum *Sum) SetArg(expr Expr) { sum.Arg = expr }
+func (max *Max) SetArg(expr Expr) { max.Arg = expr }
+func (avg *Avg) SetArg(expr Expr) { avg.Arg = expr }
+func (*CountStar) SetArg(expr Expr) {}
+func (count *Count) SetArg(expr Expr) { count.Args = Exprs{expr} }
+func (grpConcat *GroupConcatExpr) SetArg(expr Expr) { grpConcat.Exprs = Exprs{expr} }
+func (bAnd *BitAnd) SetArg(expr Expr) { bAnd.Arg = expr }
+func (bOr *BitOr) SetArg(expr Expr) { bOr.Arg = expr }
+func (bXor *BitXor) SetArg(expr Expr) { bXor.Arg = expr }
+func (std *Std) SetArg(expr Expr) { std.Arg = expr }
+func (stdD *StdDev) SetArg(expr Expr) { stdD.Arg = expr }
+func (stdP *StdPop) SetArg(expr Expr) { stdP.Arg = expr }
+func (stdS *StdSamp) SetArg(expr Expr) { stdS.Arg = expr }
+func (varP *VarPop) SetArg(expr Expr) { varP.Arg = expr }
+func (varS *VarSamp) SetArg(expr Expr) { varS.Arg = expr }
+func (variance *Variance) SetArg(expr Expr) { variance.Arg = expr }
+func (av *AnyValue) SetArg(expr Expr) { av.Arg = expr }
+
+func (min *Min) SetArgs(exprs Exprs) error { return setFuncArgs(min, exprs, "MIN") }
+func (sum *Sum) SetArgs(exprs Exprs) error { return setFuncArgs(sum, exprs, "SUM") }
+func (max *Max) SetArgs(exprs Exprs) error { return setFuncArgs(max, exprs, "MAX") }
+func (avg *Avg) SetArgs(exprs Exprs) error { return setFuncArgs(avg, exprs, "AVG") }
+func (*CountStar) SetArgs(Exprs) error { return nil }
+func (bAnd *BitAnd) SetArgs(exprs Exprs) error { return setFuncArgs(bAnd, exprs, "BIT_AND") }
+func (bOr *BitOr) SetArgs(exprs Exprs) error { return setFuncArgs(bOr, exprs, "BIT_OR") }
+func (bXor *BitXor) SetArgs(exprs Exprs) error { return setFuncArgs(bXor, exprs, "BIT_XOR") }
+func (std *Std) SetArgs(exprs Exprs) error { return setFuncArgs(std, exprs, "STD") }
+func (stdD *StdDev) SetArgs(exprs Exprs) error { return setFuncArgs(stdD, exprs, "STDDEV") }
+func (stdP *StdPop) SetArgs(exprs Exprs) error { return setFuncArgs(stdP, exprs, "STDDEV_POP") }
+func (stdS *StdSamp) SetArgs(exprs Exprs) error { return setFuncArgs(stdS, exprs, "STDDEV_SAMP") }
+func (varP *VarPop) SetArgs(exprs Exprs) error { return setFuncArgs(varP, exprs, "VAR_POP") }
+func (varS *VarSamp) SetArgs(exprs Exprs) error { return setFuncArgs(varS, exprs, "VAR_SAMP") }
+func (variance *Variance) SetArgs(exprs Exprs) error { return setFuncArgs(variance, exprs, "VARIANCE") }
+func (av *AnyValue) SetArgs(exprs Exprs) error { return setFuncArgs(av, exprs, "ANY_VALUE") }
+
+func (count *Count) SetArgs(exprs Exprs) error {
+ count.Args = exprs
+ return nil
+}
+func (grpConcat *GroupConcatExpr) SetArgs(exprs Exprs) error {
+ grpConcat.Exprs = exprs
+ return nil
+}
+
func (sum *Sum) IsDistinct() bool { return sum.Distinct }
func (min *Min) IsDistinct() bool { return min.Distinct }
func (max *Max) IsDistinct() bool { return max.Distinct }
diff --git a/go/vt/sqlparser/ast_funcs.go b/go/vt/sqlparser/ast_funcs.go
index 62bdded7598..b3798bbd28f 100644
--- a/go/vt/sqlparser/ast_funcs.go
+++ b/go/vt/sqlparser/ast_funcs.go
@@ -2103,6 +2103,15 @@ func ContainsAggregation(e SQLNode) bool {
return hasAggregates
}
+// setFuncArgs sets the arguments for the aggregation function, while checking that there is only one argument
+func setFuncArgs(aggr AggrFunc, exprs Exprs, name string) error {
+ if len(exprs) != 1 {
+ return vterrors.VT03001(name)
+ }
+ aggr.SetArg(exprs[0])
+ return nil
+}
+
// GetFirstSelect gets the first select statement
func GetFirstSelect(selStmt SelectStatement) *Select {
if selStmt == nil {
diff --git a/go/vt/sqlparser/normalizer_test.go b/go/vt/sqlparser/normalizer_test.go
index 18f2ad44a7f..de1fdc868ad 100644
--- a/go/vt/sqlparser/normalizer_test.go
+++ b/go/vt/sqlparser/normalizer_test.go
@@ -388,6 +388,15 @@ func TestNormalize(t *testing.T) {
"bv2": sqltypes.Int64BindVariable(2),
"bv3": sqltypes.Int64BindVariable(3),
},
+ }, {
+ // list in on duplicate key update
+ in: "insert into t(a, b) values (1, 2) on duplicate key update b = if(values(b) in (1, 2), b, values(b))",
+ outstmt: "insert into t(a, b) values (:bv1 /* INT64 */, :bv2 /* INT64 */) on duplicate key update b = if(values(b) in ::bv3, b, values(b))",
+ outbv: map[string]*querypb.BindVariable{
+ "bv1": sqltypes.Int64BindVariable(1),
+ "bv2": sqltypes.Int64BindVariable(2),
+ "bv3": sqltypes.TestBindVariable([]any{1, 2}),
+ },
}}
parser := NewTestParser()
for _, tc := range testcases {
diff --git a/go/vt/vtctl/workflow/framework_test.go b/go/vt/vtctl/workflow/framework_test.go
new file mode 100644
index 00000000000..197ebf1a709
--- /dev/null
+++ b/go/vt/vtctl/workflow/framework_test.go
@@ -0,0 +1,448 @@
+/*
+Copyright 2024 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package workflow
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "regexp"
+ "slices"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+ "golang.org/x/exp/maps"
+ "google.golang.org/protobuf/proto"
+
+ "vitess.io/vitess/go/sqltypes"
+ "vitess.io/vitess/go/vt/key"
+ "vitess.io/vitess/go/vt/mysqlctl/tmutils"
+ "vitess.io/vitess/go/vt/topo"
+ "vitess.io/vitess/go/vt/topo/memorytopo"
+ "vitess.io/vitess/go/vt/topotools"
+ "vitess.io/vitess/go/vt/vtenv"
+ "vitess.io/vitess/go/vt/vterrors"
+ "vitess.io/vitess/go/vt/vttablet/tmclient"
+
+ _flag "vitess.io/vitess/go/internal/flag"
+ binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata"
+ querypb "vitess.io/vitess/go/vt/proto/query"
+ tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata"
+ topodatapb "vitess.io/vitess/go/vt/proto/topodata"
+ vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
+)
+
+const (
+ defaultCellName = "cell"
+ startingSourceTabletUID = 100
+ startingTargetTabletUID = 200
+ tabletUIDStep = 10
+)
+
+type testKeyspace struct {
+ KeyspaceName string
+ ShardNames []string
+}
+
+type queryResult struct {
+ query string
+ result *querypb.QueryResult
+}
+
+func TestMain(m *testing.M) {
+ _flag.ParseFlagsForTest()
+ os.Exit(m.Run())
+}
+
+type testEnv struct {
+ ws *Server
+ ts *topo.Server
+ tmc *testTMClient
+ sourceKeyspace, targetKeyspace *testKeyspace
+ // Keyed first by keyspace name, then tablet UID.
+ tablets map[string]map[int]*topodatapb.Tablet
+ cell string
+}
+
+func newTestEnv(t *testing.T, ctx context.Context, cell string, sourceKeyspace, targetKeyspace *testKeyspace) *testEnv {
+ t.Helper()
+ env := &testEnv{
+ ts: memorytopo.NewServer(ctx, cell),
+ sourceKeyspace: sourceKeyspace,
+ targetKeyspace: targetKeyspace,
+ tablets: make(map[string]map[int]*topodatapb.Tablet),
+ cell: cell,
+ }
+ venv := vtenv.NewTestEnv()
+ env.tmc = newTestTMClient(env)
+ env.ws = NewServer(venv, env.ts, env.tmc)
+
+ serving := true
+ tabletID := startingSourceTabletUID
+ for _, shardName := range sourceKeyspace.ShardNames {
+ _ = env.addTablet(t, ctx, tabletID, sourceKeyspace.KeyspaceName, shardName, topodatapb.TabletType_PRIMARY, serving)
+ tabletID += tabletUIDStep
+ }
+
+ isReshard := func() bool {
+ return sourceKeyspace.KeyspaceName == targetKeyspace.KeyspaceName &&
+ !slices.Equal(sourceKeyspace.ShardNames, targetKeyspace.ShardNames)
+ }
+
+ if isReshard() {
+ serving = false
+ }
+ tabletID = startingTargetTabletUID
+ for _, shardName := range targetKeyspace.ShardNames {
+ _ = env.addTablet(t, ctx, tabletID, targetKeyspace.KeyspaceName, shardName, topodatapb.TabletType_PRIMARY, serving)
+ tabletID += tabletUIDStep
+ }
+
+ if isReshard() {
+ initSrvKeyspace(t, env.ts, targetKeyspace.KeyspaceName, sourceKeyspace.ShardNames, targetKeyspace.ShardNames, []string{cell})
+ }
+
+ err := env.ts.RebuildSrvVSchema(ctx, nil)
+ require.NoError(t, err)
+
+ return env
+}
+
+func initSrvKeyspace(t *testing.T, topo *topo.Server, keyspace string, sources, targets, cells []string) {
+ ctx := context.Background()
+ srvKeyspace := &topodatapb.SrvKeyspace{
+ Partitions: []*topodatapb.SrvKeyspace_KeyspacePartition{},
+ }
+ getPartition := func(t *testing.T, shards []string) *topodatapb.SrvKeyspace_KeyspacePartition {
+ partition := &topodatapb.SrvKeyspace_KeyspacePartition{
+ ServedType: topodatapb.TabletType_PRIMARY,
+ ShardReferences: []*topodatapb.ShardReference{},
+ }
+ for _, shard := range shards {
+ keyRange, err := key.ParseShardingSpec(shard)
+ require.NoError(t, err)
+ require.Equal(t, 1, len(keyRange))
+ partition.ShardReferences = append(partition.ShardReferences, &topodatapb.ShardReference{
+ Name: shard,
+ KeyRange: keyRange[0],
+ })
+ }
+ return partition
+ }
+ srvKeyspace.Partitions = append(srvKeyspace.Partitions, getPartition(t, sources))
+ srvKeyspace.Partitions = append(srvKeyspace.Partitions, getPartition(t, targets))
+ for _, cell := range cells {
+ err := topo.UpdateSrvKeyspace(ctx, cell, keyspace, srvKeyspace)
+ require.NoError(t, err)
+ }
+ err := topo.ValidateSrvKeyspace(ctx, keyspace, strings.Join(cells, ","))
+ require.NoError(t, err)
+}
+
+func (env *testEnv) close() {
+ for _, k := range maps.Values(env.tablets) {
+ for _, t := range maps.Values(k) {
+ env.deleteTablet(t)
+ }
+ }
+}
+
+func (env *testEnv) addTablet(t *testing.T, ctx context.Context, id int, keyspace, shard string, tabletType topodatapb.TabletType, serving bool) *topodatapb.Tablet {
+ tablet := &topodatapb.Tablet{
+ Alias: &topodatapb.TabletAlias{
+ Cell: env.cell,
+ Uid: uint32(id),
+ },
+ Keyspace: keyspace,
+ Shard: shard,
+ KeyRange: &topodatapb.KeyRange{},
+ Type: tabletType,
+ PortMap: map[string]int32{
+ "test": int32(id),
+ },
+ }
+ if env.tablets[keyspace] == nil {
+ env.tablets[keyspace] = make(map[int]*topodatapb.Tablet)
+ }
+ env.tablets[keyspace][id] = tablet
+ err := env.ws.ts.InitTablet(ctx, tablet, false /* allowPrimaryOverride */, true /* createShardAndKeyspace */, false /* allowUpdate */)
+ require.NoError(t, err)
+ if tabletType == topodatapb.TabletType_PRIMARY {
+ _, err = env.ws.ts.UpdateShardFields(ctx, keyspace, shard, func(si *topo.ShardInfo) error {
+ si.PrimaryAlias = tablet.Alias
+ si.IsPrimaryServing = serving
+ return nil
+ })
+ require.NoError(t, err)
+ }
+ return tablet
+}
+
+// addTableRoutingRules adds routing rules from the test env's source keyspace to
+// its target keyspace for the given tablet types and tables.
+func (env *testEnv) addTableRoutingRules(t *testing.T, ctx context.Context, tabletTypes []topodatapb.TabletType, tables []string) {
+ ks := env.targetKeyspace.KeyspaceName
+ rules := make(map[string][]string, len(tables)*(len(tabletTypes)*3))
+ for _, tabletType := range tabletTypes {
+ for _, tableName := range tables {
+ toTarget := []string{ks + "." + tableName}
+ tt := strings.ToLower(tabletType.String())
+ if tabletType == topodatapb.TabletType_PRIMARY {
+ rules[tableName] = toTarget
+ rules[ks+"."+tableName] = toTarget
+ rules[env.sourceKeyspace.KeyspaceName+"."+tableName] = toTarget
+ } else {
+ rules[tableName+"@"+tt] = toTarget
+ rules[ks+"."+tableName+"@"+tt] = toTarget
+ rules[env.sourceKeyspace.KeyspaceName+"."+tableName+"@"+tt] = toTarget
+ }
+ }
+ }
+ err := topotools.SaveRoutingRules(ctx, env.ts, rules)
+ require.NoError(t, err)
+ err = env.ts.RebuildSrvVSchema(ctx, nil)
+ require.NoError(t, err)
+}
+
+func (env *testEnv) deleteTablet(tablet *topodatapb.Tablet) {
+ _ = env.ts.DeleteTablet(context.Background(), tablet.Alias)
+ delete(env.tablets[tablet.Keyspace], int(tablet.Alias.Uid))
+}
+
+type testTMClient struct {
+ tmclient.TabletManagerClient
+ schema map[string]*tabletmanagerdatapb.SchemaDefinition
+
+ mu sync.Mutex
+ vrQueries map[int][]*queryResult
+ createVReplicationWorkflowRequests map[uint32]*tabletmanagerdatapb.CreateVReplicationWorkflowRequest
+ readVReplicationWorkflowRequests map[uint32]*tabletmanagerdatapb.ReadVReplicationWorkflowRequest
+
+ env *testEnv // For access to the env config from tmc methods.
+ reverse atomic.Bool // Are we reversing traffic?
+}
+
+func newTestTMClient(env *testEnv) *testTMClient {
+ return &testTMClient{
+ schema: make(map[string]*tabletmanagerdatapb.SchemaDefinition),
+ vrQueries: make(map[int][]*queryResult),
+ createVReplicationWorkflowRequests: make(map[uint32]*tabletmanagerdatapb.CreateVReplicationWorkflowRequest),
+ readVReplicationWorkflowRequests: make(map[uint32]*tabletmanagerdatapb.ReadVReplicationWorkflowRequest),
+ env: env,
+ }
+}
+
+func (tmc *testTMClient) CreateVReplicationWorkflow(ctx context.Context, tablet *topodatapb.Tablet, req *tabletmanagerdatapb.CreateVReplicationWorkflowRequest) (*tabletmanagerdatapb.CreateVReplicationWorkflowResponse, error) {
+ tmc.mu.Lock()
+ defer tmc.mu.Unlock()
+
+ if expect := tmc.createVReplicationWorkflowRequests[tablet.Alias.Uid]; expect != nil {
+ if !proto.Equal(expect, req) {
+ return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "unexpected CreateVReplicationWorkflow request: got %+v, want %+v", req, expect)
+ }
+ }
+ res := sqltypes.MakeTestResult(sqltypes.MakeTestFields("rowsaffected", "int64"), "1")
+ return &tabletmanagerdatapb.CreateVReplicationWorkflowResponse{Result: sqltypes.ResultToProto3(res)}, nil
+}
+
+func (tmc *testTMClient) ReadVReplicationWorkflow(ctx context.Context, tablet *topodatapb.Tablet, req *tabletmanagerdatapb.ReadVReplicationWorkflowRequest) (*tabletmanagerdatapb.ReadVReplicationWorkflowResponse, error) {
+ tmc.mu.Lock()
+ defer tmc.mu.Unlock()
+
+ if expect := tmc.readVReplicationWorkflowRequests[tablet.Alias.Uid]; expect != nil {
+ if !proto.Equal(expect, req) {
+ return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "unexpected ReadVReplicationWorkflow request: got %+v, want %+v", req, expect)
+ }
+ }
+ workflowType := binlogdatapb.VReplicationWorkflowType_MoveTables
+ if strings.Contains(req.Workflow, "lookup") {
+ workflowType = binlogdatapb.VReplicationWorkflowType_CreateLookupIndex
+ }
+ res := &tabletmanagerdatapb.ReadVReplicationWorkflowResponse{
+ Workflow: req.Workflow,
+ WorkflowType: workflowType,
+ Streams: make([]*tabletmanagerdatapb.ReadVReplicationWorkflowResponse_Stream, 0, 2),
+ }
+ rules := make([]*binlogdatapb.Rule, len(tmc.schema))
+ for i, table := range maps.Keys(tmc.schema) {
+ rules[i] = &binlogdatapb.Rule{
+ Match: table,
+ Filter: fmt.Sprintf("select * from %s", table),
+ }
+ }
+ blsKs := tmc.env.sourceKeyspace
+ if tmc.reverse.Load() && tablet.Keyspace == tmc.env.sourceKeyspace.KeyspaceName {
+ blsKs = tmc.env.targetKeyspace
+ }
+ for i, shard := range blsKs.ShardNames {
+ stream := &tabletmanagerdatapb.ReadVReplicationWorkflowResponse_Stream{
+ Id: int32(i + 1),
+ Bls: &binlogdatapb.BinlogSource{
+ Keyspace: blsKs.KeyspaceName,
+ Shard: shard,
+ Tables: maps.Keys(tmc.schema),
+ Filter: &binlogdatapb.Filter{
+ Rules: rules,
+ },
+ },
+ }
+ res.Streams = append(res.Streams, stream)
+ }
+
+ return res, nil
+}
+
+func (tmc *testTMClient) DeleteVReplicationWorkflow(ctx context.Context, tablet *topodatapb.Tablet, req *tabletmanagerdatapb.DeleteVReplicationWorkflowRequest) (response *tabletmanagerdatapb.DeleteVReplicationWorkflowResponse, err error) {
+ return &tabletmanagerdatapb.DeleteVReplicationWorkflowResponse{
+ Result: &querypb.QueryResult{
+ RowsAffected: 1,
+ },
+ }, nil
+}
+
+func (tmc *testTMClient) GetSchema(ctx context.Context, tablet *topodatapb.Tablet, req *tabletmanagerdatapb.GetSchemaRequest) (*tabletmanagerdatapb.SchemaDefinition, error) {
+ tmc.mu.Lock()
+ defer tmc.mu.Unlock()
+
+ schemaDefn := &tabletmanagerdatapb.SchemaDefinition{}
+ for _, table := range req.Tables {
+ if table == "/.*/" {
+ // Special case of all tables in keyspace.
+ for key, tableDefn := range tmc.schema {
+ if strings.HasPrefix(key, tablet.Keyspace+".") {
+ schemaDefn.TableDefinitions = append(schemaDefn.TableDefinitions, tableDefn.TableDefinitions...)
+ }
+ }
+ break
+ }
+
+ key := tablet.Keyspace + "." + table
+ tableDefn := tmc.schema[key]
+ if tableDefn == nil {
+ continue
+ }
+ schemaDefn.TableDefinitions = append(schemaDefn.TableDefinitions, tableDefn.TableDefinitions...)
+ }
+ return schemaDefn, nil
+}
+
+func (tmc *testTMClient) expectVRQuery(tabletID int, query string, result *sqltypes.Result) {
+ tmc.mu.Lock()
+ defer tmc.mu.Unlock()
+
+ tmc.vrQueries[tabletID] = append(tmc.vrQueries[tabletID], &queryResult{
+ query: query,
+ result: sqltypes.ResultToProto3(result),
+ })
+}
+
+func (tmc *testTMClient) expectVRQueryResultOnKeyspaceTablets(keyspace string, queryResult *queryResult) {
+ tmc.mu.Lock()
+ defer tmc.mu.Unlock()
+
+ for uid := range tmc.env.tablets[keyspace] {
+ tmc.vrQueries[uid] = append(tmc.vrQueries[uid], queryResult)
+ }
+}
+
+func (tmc *testTMClient) expectCreateVReplicationWorkflowRequest(tabletID uint32, req *tabletmanagerdatapb.CreateVReplicationWorkflowRequest) {
+ tmc.mu.Lock()
+ defer tmc.mu.Unlock()
+
+ tmc.createVReplicationWorkflowRequests[tabletID] = req
+}
+
+func (tmc *testTMClient) VReplicationExec(ctx context.Context, tablet *topodatapb.Tablet, query string) (*querypb.QueryResult, error) {
+ tmc.mu.Lock()
+ defer tmc.mu.Unlock()
+
+ qrs := tmc.vrQueries[int(tablet.Alias.Uid)]
+ if len(qrs) == 0 {
+ return nil, fmt.Errorf("tablet %v does not expect any more queries: %s", tablet, query)
+ }
+ matched := false
+ if qrs[0].query[0] == '/' {
+ matched = regexp.MustCompile(qrs[0].query[1:]).MatchString(query)
+ } else {
+ matched = query == qrs[0].query
+ }
+ if !matched {
+ return nil, fmt.Errorf("tablet %v:\nunexpected query\n%s\nwant:\n%s", tablet, query, qrs[0].query)
+ }
+ tmc.vrQueries[int(tablet.Alias.Uid)] = qrs[1:]
+ return qrs[0].result, nil
+}
+
+func (tmc *testTMClient) ExecuteFetchAsDba(ctx context.Context, tablet *topodatapb.Tablet, usePool bool, req *tabletmanagerdatapb.ExecuteFetchAsDbaRequest) (*querypb.QueryResult, error) {
+ // Reuse VReplicationExec.
+ return tmc.VReplicationExec(ctx, tablet, string(req.Query))
+}
+
+func (tmc *testTMClient) ExecuteFetchAsAllPrivs(ctx context.Context, tablet *topodatapb.Tablet, req *tabletmanagerdatapb.ExecuteFetchAsAllPrivsRequest) (*querypb.QueryResult, error) {
+ return nil, nil
+}
+
+// Note: ONLY breaks up change.SQL into individual statements and executes it. Does NOT fully implement ApplySchema.
+func (tmc *testTMClient) ApplySchema(ctx context.Context, tablet *topodatapb.Tablet, change *tmutils.SchemaChange) (*tabletmanagerdatapb.SchemaChangeResult, error) {
+ stmts := strings.Split(change.SQL, ";")
+
+ for _, stmt := range stmts {
+ _, err := tmc.ExecuteFetchAsDba(ctx, tablet, false, &tabletmanagerdatapb.ExecuteFetchAsDbaRequest{
+ Query: []byte(stmt),
+ MaxRows: 0,
+ ReloadSchema: true,
+ })
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return nil, nil
+}
+
+func (tmc *testTMClient) VDiff(ctx context.Context, tablet *topodatapb.Tablet, req *tabletmanagerdatapb.VDiffRequest) (*tabletmanagerdatapb.VDiffResponse, error) {
+ return &tabletmanagerdatapb.VDiffResponse{
+ Id: 1,
+ VdiffUuid: req.VdiffUuid,
+ Output: &querypb.QueryResult{
+ RowsAffected: 1,
+ },
+ }, nil
+}
+
+func (tmc *testTMClient) UpdateVReplicationWorkflow(ctx context.Context, tablet *topodatapb.Tablet, req *tabletmanagerdatapb.UpdateVReplicationWorkflowRequest) (*tabletmanagerdatapb.UpdateVReplicationWorkflowResponse, error) {
+ return &tabletmanagerdatapb.UpdateVReplicationWorkflowResponse{
+ Result: &querypb.QueryResult{
+ RowsAffected: 1,
+ },
+ }, nil
+}
+
+func (tmc *testTMClient) PrimaryPosition(ctx context.Context, tablet *topodatapb.Tablet) (string, error) {
+ return position, nil
+}
+
+func (tmc *testTMClient) WaitForPosition(ctx context.Context, tablet *topodatapb.Tablet, pos string) error {
+ return nil
+}
+
+func (tmc *testTMClient) VReplicationWaitForPos(ctx context.Context, tablet *topodatapb.Tablet, id int32, pos string) error {
+ return nil
+}
diff --git a/go/vt/vtctl/workflow/materializer_env_test.go b/go/vt/vtctl/workflow/materializer_env_test.go
index 452c5755a10..c4d1e938e46 100644
--- a/go/vt/vtctl/workflow/materializer_env_test.go
+++ b/go/vt/vtctl/workflow/materializer_env_test.go
@@ -19,7 +19,6 @@ package workflow
import (
"context"
"fmt"
- "os"
"regexp"
"strconv"
"strings"
@@ -36,7 +35,6 @@ import (
"vitess.io/vitess/go/vt/vterrors"
"vitess.io/vitess/go/vt/vttablet/tmclient"
- _flag "vitess.io/vitess/go/internal/flag"
binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata"
querypb "vitess.io/vitess/go/vt/proto/query"
tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata"
@@ -45,11 +43,6 @@ import (
vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
)
-type queryResult struct {
- query string
- result *querypb.QueryResult
-}
-
type testMaterializerEnv struct {
ws *Server
ms *vtctldatapb.MaterializeSettings
@@ -66,11 +59,6 @@ type testMaterializerEnv struct {
//----------------------------------------------
// testMaterializerEnv
-func TestMain(m *testing.M) {
- _flag.ParseFlagsForTest()
- os.Exit(m.Run())
-}
-
func newTestMaterializerEnv(t *testing.T, ctx context.Context, ms *vtctldatapb.MaterializeSettings, sources, targets []string) *testMaterializerEnv {
t.Helper()
env := &testMaterializerEnv{
diff --git a/go/vt/vtctl/workflow/materializer_test.go b/go/vt/vtctl/workflow/materializer_test.go
index 82cc07fdf7f..e403ab8b4d5 100644
--- a/go/vt/vtctl/workflow/materializer_test.go
+++ b/go/vt/vtctl/workflow/materializer_test.go
@@ -45,6 +45,7 @@ import (
vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata"
)
+const position = "9d10e6ec-07a0-11ee-ae73-8e53f4cf3083:1-97"
const getWorkflowQuery = "select id from _vt.vreplication where db_name='vt_targetks' and workflow='workflow'"
const mzUpdateQuery = "update _vt.vreplication set state='Running' where db_name='vt_targetks' and workflow='workflow'"
const mzSelectFrozenQuery = "select 1 from _vt.vreplication where db_name='vt_targetks' and message='FROZEN' and workflow_sub_type != 1"
@@ -53,7 +54,6 @@ const mzGetWorkflowStatusQuery = "select id, workflow, source, pos, stop_pos, ma
const mzGetCopyState = "select distinct table_name from _vt.copy_state cs, _vt.vreplication vr where vr.id = cs.vrepl_id and vr.id = 1"
const mzGetLatestCopyState = "select vrepl_id, table_name, lastpk from _vt.copy_state where vrepl_id in (1) and id in (select max(id) from _vt.copy_state where vrepl_id in (1) group by vrepl_id, table_name)"
const insertPrefix = `/insert into _vt.vreplication\(workflow, source, pos, max_tps, max_replication_lag, cell, tablet_types, time_updated, transaction_timestamp, state, db_name, workflow_type, workflow_sub_type, defer_secondary_keys\) values `
-const eol = "$"
var (
defaultOnDDL = binlogdatapb.OnDDLAction_IGNORE.String()
diff --git a/go/vt/vtctl/workflow/resharder.go b/go/vt/vtctl/workflow/resharder.go
index e36b546c1d2..9ca91d00d00 100644
--- a/go/vt/vtctl/workflow/resharder.go
+++ b/go/vt/vtctl/workflow/resharder.go
@@ -97,6 +97,9 @@ func (s *Server) buildResharder(ctx context.Context, keyspace, workflow string,
if err != nil {
return nil, vterrors.Wrapf(err, "GetShard(%s) failed", shard)
}
+ if si.PrimaryAlias == nil {
+ return nil, fmt.Errorf("target shard %v has no primary tablet", shard)
+ }
if si.IsPrimaryServing {
return nil, fmt.Errorf("target shard %v is in serving state", shard)
}
diff --git a/go/vt/vtctl/workflow/resharder_test.go b/go/vt/vtctl/workflow/resharder_test.go
new file mode 100644
index 00000000000..f2f8293d536
--- /dev/null
+++ b/go/vt/vtctl/workflow/resharder_test.go
@@ -0,0 +1,232 @@
+/*
+Copyright 2024 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package workflow
+
+import (
+ "context"
+ "fmt"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+
+ "vitess.io/vitess/go/sqltypes"
+ "vitess.io/vitess/go/vt/topo"
+ "vitess.io/vitess/go/vt/topo/topoproto"
+
+ binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata"
+ tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata"
+ topodatapb "vitess.io/vitess/go/vt/proto/topodata"
+ vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata"
+)
+
+const eol = "$"
+
+func TestReshardCreate(t *testing.T) {
+ ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
+ defer cancel()
+
+ workflowName := "wf1"
+ tableName := "t1"
+ sourceKeyspaceName := "targetks"
+ targetKeyspaceName := "targetks"
+ tabletTypes := []topodatapb.TabletType{
+ topodatapb.TabletType_PRIMARY,
+ topodatapb.TabletType_REPLICA,
+ topodatapb.TabletType_RDONLY,
+ }
+ tabletTypesStr := topoproto.MakeStringTypeCSV(tabletTypes)
+ schema := map[string]*tabletmanagerdatapb.SchemaDefinition{
+ tableName: {
+ TableDefinitions: []*tabletmanagerdatapb.TableDefinition{
+ {
+ Name: tableName,
+ Schema: fmt.Sprintf("CREATE TABLE %s (id BIGINT, name VARCHAR(64), PRIMARY KEY (id))", tableName),
+ },
+ },
+ },
+ }
+
+ var binlogSource = &binlogdatapb.BinlogSource{
+ Keyspace: sourceKeyspaceName,
+ Shard: "0",
+ Filter: &binlogdatapb.Filter{
+ Rules: []*binlogdatapb.Rule{{
+ Match: "t1",
+ Filter: "select * from t1",
+ }},
+ },
+ }
+
+ testcases := []struct {
+ name string
+ sourceKeyspace, targetKeyspace *testKeyspace
+ preFunc func(env *testEnv)
+ want *vtctldatapb.WorkflowStatusResponse
+ wantErr string
+ }{
+ {
+ name: "basic",
+ sourceKeyspace: &testKeyspace{
+ KeyspaceName: sourceKeyspaceName,
+ ShardNames: []string{"0"},
+ },
+ targetKeyspace: &testKeyspace{
+ KeyspaceName: targetKeyspaceName,
+ ShardNames: []string{"-80", "80-"},
+ },
+ want: &vtctldatapb.WorkflowStatusResponse{
+ ShardStreams: map[string]*vtctldatapb.WorkflowStatusResponse_ShardStreams{
+ "targetks/-80": {
+ Streams: []*vtctldatapb.WorkflowStatusResponse_ShardStreamState{
+ {
+ Id: 1,
+ Tablet: &topodatapb.TabletAlias{Cell: defaultCellName, Uid: startingTargetTabletUID},
+ SourceShard: "targetks/0", Position: position, Status: "Running", Info: "VStream Lag: 0s",
+ },
+ },
+ },
+ "targetks/80-": {
+ Streams: []*vtctldatapb.WorkflowStatusResponse_ShardStreamState{
+ {
+ Id: 1,
+ Tablet: &topodatapb.TabletAlias{Cell: defaultCellName, Uid: startingTargetTabletUID + tabletUIDStep},
+ SourceShard: "targetks/0", Position: position, Status: "Running", Info: "VStream Lag: 0s",
+ },
+ },
+ },
+ },
+ TrafficState: "Reads Not Switched. Writes Not Switched",
+ },
+ },
+ {
+ name: "no primary",
+ sourceKeyspace: &testKeyspace{
+ KeyspaceName: sourceKeyspaceName,
+ ShardNames: []string{"0"},
+ },
+ targetKeyspace: &testKeyspace{
+ KeyspaceName: targetKeyspaceName,
+ ShardNames: []string{"-80", "80-"},
+ },
+ preFunc: func(env *testEnv) {
+ _, err := env.ts.UpdateShardFields(ctx, targetKeyspaceName, "-80", func(si *topo.ShardInfo) error {
+ si.PrimaryAlias = nil
+ return nil
+ })
+ require.NoError(t, err)
+ },
+ wantErr: "buildResharder: target shard -80 has no primary tablet",
+ },
+ }
+ for _, tc := range testcases {
+ t.Run(tc.name, func(t *testing.T) {
+ require.NotNil(t, tc.sourceKeyspace)
+ require.NotNil(t, tc.targetKeyspace)
+
+ env := newTestEnv(t, ctx, defaultCellName, tc.sourceKeyspace, tc.targetKeyspace)
+ defer env.close()
+ env.tmc.schema = schema
+
+ req := &vtctldatapb.ReshardCreateRequest{
+ Keyspace: targetKeyspaceName,
+ Workflow: workflowName,
+ TabletTypes: tabletTypes,
+ SourceShards: tc.sourceKeyspace.ShardNames,
+ TargetShards: tc.targetKeyspace.ShardNames,
+ Cells: []string{env.cell},
+ }
+
+ for i := range tc.sourceKeyspace.ShardNames {
+ tabletUID := startingSourceTabletUID + (tabletUIDStep * i)
+ env.tmc.expectVRQuery(
+ tabletUID,
+ fmt.Sprintf("select workflow, source, cell, tablet_types from _vt.vreplication where db_name='vt_%s' and message != 'FROZEN'", targetKeyspaceName),
+ &sqltypes.Result{},
+ )
+ env.tmc.expectVRQuery(
+ tabletUID,
+ "select distinct table_name from _vt.copy_state cs, _vt.vreplication vr where vr.id = cs.vrepl_id and vr.id = 1",
+ &sqltypes.Result{},
+ )
+ env.tmc.expectVRQuery(
+ tabletUID,
+ fmt.Sprintf("select id, workflow, source, pos, stop_pos, max_replication_lag, state, db_name, time_updated, transaction_timestamp, message, tags, workflow_type, workflow_sub_type, time_heartbeat, defer_secondary_keys, component_throttled, time_throttled, rows_copied, tablet_types, cell from _vt.vreplication where workflow = '%s' and db_name = 'vt_%s'",
+ workflowName, targetKeyspaceName),
+ &sqltypes.Result{},
+ )
+ env.tmc.expectVRQuery(
+ tabletUID,
+ "select vrepl_id, table_name, lastpk from _vt.copy_state where vrepl_id in (1) and id in (select max(id) from _vt.copy_state where vrepl_id in (1) group by vrepl_id, table_name)",
+ &sqltypes.Result{},
+ )
+ }
+
+ for i, target := range tc.targetKeyspace.ShardNames {
+ tabletUID := startingTargetTabletUID + (tabletUIDStep * i)
+ env.tmc.expectVRQuery(
+ tabletUID,
+ fmt.Sprintf("select 1 from _vt.vreplication where db_name='vt_%s'", targetKeyspaceName),
+ &sqltypes.Result{},
+ )
+ env.tmc.expectVRQuery(
+ tabletUID,
+ insertPrefix+
+ `\('`+workflowName+`', 'keyspace:\\"`+targetKeyspaceName+`\\" shard:\\"0\\" filter:{rules:{match:\\"/.*\\" filter:\\"`+target+`\\"}}', '', [0-9]*, [0-9]*, '`+
+ env.cell+`', '`+tabletTypesStr+`', [0-9]*, 0, 'Stopped', 'vt_`+targetKeyspaceName+`', 4, 0, false\)`+eol,
+ &sqltypes.Result{},
+ )
+ env.tmc.expectVRQuery(
+ tabletUID,
+ "select distinct table_name from _vt.copy_state cs, _vt.vreplication vr where vr.id = cs.vrepl_id and vr.id = 1",
+ &sqltypes.Result{},
+ )
+ env.tmc.expectVRQuery(
+ tabletUID,
+ fmt.Sprintf("select id, workflow, source, pos, stop_pos, max_replication_lag, state, db_name, time_updated, transaction_timestamp, message, tags, workflow_type, workflow_sub_type, time_heartbeat, defer_secondary_keys, component_throttled, time_throttled, rows_copied, tablet_types, cell from _vt.vreplication where workflow = '%s' and db_name = 'vt_%s'",
+ workflowName, targetKeyspaceName),
+ sqltypes.MakeTestResult(
+ sqltypes.MakeTestFields(
+ "id|workflow|source|pos|stop_pos|max_replication_log|state|db_name|time_updated|transaction_timestamp|message|tags|workflow_type|workflow_sub_type|time_heartbeat|defer_secondary_keys|component_throttled|time_throttled|rows_copied|tablet_tuypes|cell",
+ "int64|varchar|blob|varchar|varchar|int64|varchar|varchar|int64|int64|varchar|varchar|int64|int64|int64|int64|varchar|int64|int64|varchar|varchar",
+ ),
+ fmt.Sprintf("1|%s|%s|MySQL56/%s|NULL|0|Running|vt_%s|1686577659|0|||1|0|0|0||0|10||", workflowName, binlogSource, position, sourceKeyspaceName),
+ ),
+ )
+ env.tmc.expectVRQuery(
+ tabletUID,
+ "select vrepl_id, table_name, lastpk from _vt.copy_state where vrepl_id in (1) and id in (select max(id) from _vt.copy_state where vrepl_id in (1) group by vrepl_id, table_name)",
+ &sqltypes.Result{},
+ )
+ }
+
+ if tc.preFunc != nil {
+ tc.preFunc(env)
+ }
+
+ res, err := env.ws.ReshardCreate(ctx, req)
+ if tc.wantErr != "" {
+ require.EqualError(t, err, tc.wantErr)
+ return
+ }
+ require.NoError(t, err)
+ if tc.want != nil {
+ require.Equal(t, tc.want, res)
+ }
+ })
+ }
+}
diff --git a/go/vt/vtctl/workflow/server.go b/go/vt/vtctl/workflow/server.go
index 635d47d8bf2..c9a512e4bf5 100644
--- a/go/vt/vtctl/workflow/server.go
+++ b/go/vt/vtctl/workflow/server.go
@@ -1669,7 +1669,7 @@ func (s *Server) ReshardCreate(ctx context.Context, req *vtctldatapb.ReshardCrea
if err := s.ts.ValidateSrvKeyspace(ctx, keyspace, strings.Join(cells, ",")); err != nil {
err2 := vterrors.Wrapf(err, "SrvKeyspace for keyspace %s is corrupt for cell(s) %s", keyspace, cells)
- log.Errorf("%w", err2)
+ log.Errorf("%v", err2)
return nil, err
}
tabletTypesStr := topoproto.MakeStringTypeCSV(req.TabletTypes)
@@ -1702,6 +1702,7 @@ func (s *Server) ReshardCreate(ctx context.Context, req *vtctldatapb.ReshardCrea
return s.WorkflowStatus(ctx, &vtctldatapb.WorkflowStatusRequest{
Keyspace: keyspace,
Workflow: req.Workflow,
+ Shards: req.TargetShards,
})
}
diff --git a/go/vt/vtexplain/testdata/multi-output/selectsharded-output.txt b/go/vt/vtexplain/testdata/multi-output/selectsharded-output.txt
index 7ae20ca1a7f..43ab9253151 100644
--- a/go/vt/vtexplain/testdata/multi-output/selectsharded-output.txt
+++ b/go/vt/vtexplain/testdata/multi-output/selectsharded-output.txt
@@ -99,10 +99,10 @@ select 1, "hello", 3.14, null from user limit 10 /* select constant sql values *
----------------------------------------------------------------------
select * from (select id from user) s /* scatter paren select */
-1 ks_sharded/-40: select s.id from (select id from `user`) as s limit 10001 /* scatter paren select */
-1 ks_sharded/40-80: select s.id from (select id from `user`) as s limit 10001 /* scatter paren select */
-1 ks_sharded/80-c0: select s.id from (select id from `user`) as s limit 10001 /* scatter paren select */
-1 ks_sharded/c0-: select s.id from (select id from `user`) as s limit 10001 /* scatter paren select */
+1 ks_sharded/-40: select id from (select id from `user`) as s limit 10001 /* scatter paren select */
+1 ks_sharded/40-80: select id from (select id from `user`) as s limit 10001 /* scatter paren select */
+1 ks_sharded/80-c0: select id from (select id from `user`) as s limit 10001 /* scatter paren select */
+1 ks_sharded/c0-: select id from (select id from `user`) as s limit 10001 /* scatter paren select */
----------------------------------------------------------------------
select name from user where id = (select id from t1) /* non-correlated subquery as value */
@@ -114,16 +114,16 @@ select name from user where id = (select id from t1) /* non-correlated subquery
select name from user where id in (select id from t1) /* non-correlated subquery in IN clause */
1 ks_unsharded/-: select id from t1 limit 10001 /* non-correlated subquery in IN clause */
-2 ks_sharded/-40: select `name` from `user` where 1 = 1 and id in (1) limit 10001 /* non-correlated subquery in IN clause */
+2 ks_sharded/-40: select `name` from `user` where 1 and id in (1) limit 10001 /* non-correlated subquery in IN clause */
----------------------------------------------------------------------
select name from user where id not in (select id from t1) /* non-correlated subquery in NOT IN clause */
1 ks_unsharded/-: select id from t1 limit 10001 /* non-correlated subquery in NOT IN clause */
-2 ks_sharded/-40: select `name` from `user` where 1 = 0 or id not in (1) limit 10001 /* non-correlated subquery in NOT IN clause */
-2 ks_sharded/40-80: select `name` from `user` where 1 = 0 or id not in (1) limit 10001 /* non-correlated subquery in NOT IN clause */
-2 ks_sharded/80-c0: select `name` from `user` where 1 = 0 or id not in (1) limit 10001 /* non-correlated subquery in NOT IN clause */
-2 ks_sharded/c0-: select `name` from `user` where 1 = 0 or id not in (1) limit 10001 /* non-correlated subquery in NOT IN clause */
+2 ks_sharded/-40: select `name` from `user` where not 1 or id not in (1) limit 10001 /* non-correlated subquery in NOT IN clause */
+2 ks_sharded/40-80: select `name` from `user` where not 1 or id not in (1) limit 10001 /* non-correlated subquery in NOT IN clause */
+2 ks_sharded/80-c0: select `name` from `user` where not 1 or id not in (1) limit 10001 /* non-correlated subquery in NOT IN clause */
+2 ks_sharded/c0-: select `name` from `user` where not 1 or id not in (1) limit 10001 /* non-correlated subquery in NOT IN clause */
----------------------------------------------------------------------
select name from user where exists (select id from t1) /* non-correlated subquery as EXISTS */
@@ -137,10 +137,10 @@ select name from user where exists (select id from t1) /* non-correlated subquer
----------------------------------------------------------------------
select * from name_info order by info /* select * and order by varchar column */
-1 ks_sharded/-40: select `name`, info from name_info order by info asc limit 10001 /* select * and order by varchar column */
-1 ks_sharded/40-80: select `name`, info from name_info order by info asc limit 10001 /* select * and order by varchar column */
-1 ks_sharded/80-c0: select `name`, info from name_info order by info asc limit 10001 /* select * and order by varchar column */
-1 ks_sharded/c0-: select `name`, info from name_info order by info asc limit 10001 /* select * and order by varchar column */
+1 ks_sharded/-40: select `name`, info, weight_string(info) from name_info order by name_info.info asc limit 10001 /* select * and order by varchar column */
+1 ks_sharded/40-80: select `name`, info, weight_string(info) from name_info order by name_info.info asc limit 10001 /* select * and order by varchar column */
+1 ks_sharded/80-c0: select `name`, info, weight_string(info) from name_info order by name_info.info asc limit 10001 /* select * and order by varchar column */
+1 ks_sharded/c0-: select `name`, info, weight_string(info) from name_info order by name_info.info asc limit 10001 /* select * and order by varchar column */
----------------------------------------------------------------------
select distinct(name) from user where id = 1 /* select distinct */
@@ -207,3 +207,9 @@ SELECT id FROM orders WHERE id IN (1, "1", 1)
2 ks_sharded/40-80: select id from orders where id in (1, '1', 1) limit 10001
----------------------------------------------------------------------
+(SELECT user.id, user.name FROM user WHERE user.id = 1) UNION (SELECT user.id, user.name FROM user WHERE user.id = 3)
+
+1 ks_sharded/-40: select distinct `user`.id, `user`.`name`, weight_string(`user`.id), weight_string(`user`.`name`) from `user` where `user`.id = 1 limit 10001 /* INT64 */
+1 ks_sharded/40-80: select distinct `user`.id, `user`.`name`, weight_string(`user`.id), weight_string(`user`.`name`) from `user` where `user`.id = 3 limit 10001 /* INT64 */
+
+----------------------------------------------------------------------
diff --git a/go/vt/vtexplain/testdata/selectsharded-queries.sql b/go/vt/vtexplain/testdata/selectsharded-queries.sql
index ad003d1c457..067f53df4e6 100644
--- a/go/vt/vtexplain/testdata/selectsharded-queries.sql
+++ b/go/vt/vtexplain/testdata/selectsharded-queries.sql
@@ -38,4 +38,6 @@ select id from user where not id in (select col from music where music.user_id =
SELECT user.id, user.name, name_info.info FROM user INNER JOIN music ON (user.id = music.user_id) LEFT OUTER JOIN name_info ON (user.name = name_info.name);
-SELECT id FROM orders WHERE id IN (1, "1", 1)
+SELECT id FROM orders WHERE id IN (1, "1", 1);
+
+(SELECT user.id, user.name FROM user WHERE user.id = 1) UNION (SELECT user.id, user.name FROM user WHERE user.id = 3);
diff --git a/go/vt/vtexplain/vtexplain_test.go b/go/vt/vtexplain/vtexplain_test.go
index e7a6f4bdfc8..2a1349e4fb3 100644
--- a/go/vt/vtexplain/vtexplain_test.go
+++ b/go/vt/vtexplain/vtexplain_test.go
@@ -34,6 +34,7 @@ import (
"vitess.io/vitess/go/test/utils"
"vitess.io/vitess/go/vt/key"
+ querypb "vitess.io/vitess/go/vt/proto/query"
"vitess.io/vitess/go/vt/proto/topodata"
"vitess.io/vitess/go/vt/topo"
"vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv/tabletenvtest"
@@ -149,6 +150,28 @@ func TestExplain(t *testing.T) {
}
tests := []test{
{"unsharded", defaultTestOpts()},
+ {"selectsharded", defaultTestOpts()},
+ {"insertsharded", defaultTestOpts()},
+ {"updatesharded", defaultTestOpts()},
+ {"deletesharded", defaultTestOpts()},
+ {"comments", defaultTestOpts()},
+ {"options", &Options{
+ ReplicationMode: "STATEMENT",
+ NumShards: 4,
+ Normalize: false,
+ }},
+ {"target", &Options{
+ ReplicationMode: "ROW",
+ NumShards: 4,
+ Normalize: false,
+ Target: "ks_sharded/40-80",
+ }},
+ {"gen4", &Options{
+ ReplicationMode: "ROW",
+ NumShards: 4,
+ Normalize: true,
+ PlannerVersion: querypb.ExecuteOptions_Gen4,
+ }},
}
for _, tst := range tests {
diff --git a/go/vt/vtexplain/vtexplain_vttablet.go b/go/vt/vtexplain/vtexplain_vttablet.go
index b573fe29774..cfddd222303 100644
--- a/go/vt/vtexplain/vtexplain_vttablet.go
+++ b/go/vt/vtexplain/vtexplain_vttablet.go
@@ -428,7 +428,9 @@ func newTabletEnvironment(ddls []sqlparser.DDLStatement, opts *Options, collatio
tEnv.addResult(query, result)
}
- showTableRows := make([][]sqltypes.Value, 0, 4)
+ showTableRows := make([][]sqltypes.Value, 0, len(ddls))
+ showTableWithSizesRows := make([][]sqltypes.Value, 0, len(ddls))
+
for _, ddl := range ddls {
table := ddl.GetTable().Name.String()
options := ""
@@ -441,14 +443,21 @@ func newTabletEnvironment(ddls []sqlparser.DDLStatement, opts *Options, collatio
}
}
showTableRows = append(showTableRows, mysql.BaseShowTablesRow(table, false, options))
+ showTableWithSizesRows = append(showTableWithSizesRows, mysql.BaseShowTablesWithSizesRow(table, true, options))
}
- tEnv.addResult(mysql.TablesWithSize57, &sqltypes.Result{
+
+ tEnv.addResult(mysql.BaseShowTables, &sqltypes.Result{
Fields: mysql.BaseShowTablesFields,
Rows: showTableRows,
})
+
+ tEnv.addResult(mysql.TablesWithSize57, &sqltypes.Result{
+ Fields: mysql.BaseShowTablesWithSizesFields,
+ Rows: showTableWithSizesRows,
+ })
tEnv.addResult(mysql.TablesWithSize80, &sqltypes.Result{
- Fields: mysql.BaseShowTablesFields,
- Rows: showTableRows,
+ Fields: mysql.BaseShowTablesWithSizesFields,
+ Rows: showTableWithSizesRows,
})
indexRows := make([][]sqltypes.Value, 0, 4)
@@ -854,9 +863,15 @@ func inferColTypeFromExpr(node sqlparser.Expr, tableColumnMap map[sqlparser.Iden
colTypes = append(colTypes, colType)
}
case sqlparser.Callable:
- // As a shortcut, functions are integral types
- colNames = append(colNames, sqlparser.String(node))
- colTypes = append(colTypes, querypb.Type_INT32)
+ switch node := node.(type) {
+ case *sqlparser.WeightStringFuncExpr:
+ colNames = append(colNames, sqlparser.String(node))
+ colTypes = append(colTypes, querypb.Type_BINARY)
+ default:
+ // As a shortcut, functions are integral types
+ colNames = append(colNames, sqlparser.String(node))
+ colTypes = append(colTypes, querypb.Type_INT32)
+ }
case *sqlparser.Literal:
colNames = append(colNames, sqlparser.String(node))
switch node.Type {
diff --git a/go/vt/vtgate/engine/insert.go b/go/vt/vtgate/engine/insert.go
index 332ccc92098..be0bb889083 100644
--- a/go/vt/vtgate/engine/insert.go
+++ b/go/vt/vtgate/engine/insert.go
@@ -265,13 +265,20 @@ func (ins *Insert) getInsertShardedQueries(
index, _ := strconv.ParseInt(string(indexValue.Value), 0, 64)
if keyspaceIDs[index] != nil {
walkFunc := func(node sqlparser.SQLNode) (kontinue bool, err error) {
- if arg, ok := node.(*sqlparser.Argument); ok {
- bv, exists := bindVars[arg.Name]
- if !exists {
- return false, vterrors.VT03026(arg.Name)
- }
- shardBindVars[arg.Name] = bv
+ var arg string
+ switch argType := node.(type) {
+ case *sqlparser.Argument:
+ arg = argType.Name
+ case sqlparser.ListArg:
+ arg = string(argType)
+ default:
+ return true, nil
}
+ bv, exists := bindVars[arg]
+ if !exists {
+ return false, vterrors.VT03026(arg)
+ }
+ shardBindVars[arg] = bv
return true, nil
}
mids = append(mids, sqlparser.String(ins.Mid[index]))
diff --git a/go/vt/vtgate/engine/insert_test.go b/go/vt/vtgate/engine/insert_test.go
index 762c68a83dc..af6eb4f51b2 100644
--- a/go/vt/vtgate/engine/insert_test.go
+++ b/go/vt/vtgate/engine/insert_test.go
@@ -356,13 +356,22 @@ func TestInsertShardWithONDuplicateKey(t *testing.T) {
{&sqlparser.Argument{Name: "_id_0", Type: sqltypes.Int64}},
},
sqlparser.OnDup{
- &sqlparser.UpdateExpr{Name: sqlparser.NewColName("suffix"), Expr: &sqlparser.Argument{Name: "_id_0", Type: sqltypes.Int64}},
- },
+ &sqlparser.UpdateExpr{Name: sqlparser.NewColName("suffix1"), Expr: &sqlparser.Argument{Name: "_id_0", Type: sqltypes.Int64}},
+ &sqlparser.UpdateExpr{Name: sqlparser.NewColName("suffix2"), Expr: &sqlparser.FuncExpr{
+ Name: sqlparser.NewIdentifierCI("if"),
+ Exprs: sqlparser.SelectExprs{
+ sqlparser.NewAliasedExpr(sqlparser.NewComparisonExpr(sqlparser.InOp, &sqlparser.ValuesFuncExpr{Name: sqlparser.NewColName("col")}, sqlparser.ListArg("_id_1"), nil), ""),
+ sqlparser.NewAliasedExpr(sqlparser.NewColName("col"), ""),
+ sqlparser.NewAliasedExpr(&sqlparser.ValuesFuncExpr{Name: sqlparser.NewColName("col")}, ""),
+ },
+ }}},
)
vc := newDMLTestVCursor("-20", "20-")
vc.shardForKsid = []string{"20-", "-20", "20-"}
- _, err := ins.TryExecute(context.Background(), vc, map[string]*querypb.BindVariable{}, false)
+ _, err := ins.TryExecute(context.Background(), vc, map[string]*querypb.BindVariable{
+ "_id_1": sqltypes.TestBindVariable([]int{1, 2}),
+ }, false)
if err != nil {
t.Fatal(err)
}
@@ -371,7 +380,10 @@ func TestInsertShardWithONDuplicateKey(t *testing.T) {
`ResolveDestinations sharded [value:"0"] Destinations:DestinationKeyspaceID(166b40b44aba4bd6)`,
// Row 2 will go to -20, rows 1 & 3 will go to 20-
`ExecuteMultiShard ` +
- `sharded.20-: prefix(:_id_0 /* INT64 */) on duplicate key update suffix = :_id_0 /* INT64 */ {_id_0: type:INT64 value:"1"} ` +
+ `sharded.20-: prefix(:_id_0 /* INT64 */) on duplicate key update ` +
+ `suffix1 = :_id_0 /* INT64 */, suffix2 = if(values(col) in ::_id_1, col, values(col)) ` +
+ `{_id_0: type:INT64 value:"1" ` +
+ `_id_1: type:TUPLE values:{type:INT64 value:"1"} values:{type:INT64 value:"2"}} ` +
`true true`,
})
diff --git a/go/vt/vtgate/evalengine/compiler.go b/go/vt/vtgate/evalengine/compiler.go
index 3a9b204596f..387dbe44cc2 100644
--- a/go/vt/vtgate/evalengine/compiler.go
+++ b/go/vt/vtgate/evalengine/compiler.go
@@ -106,6 +106,10 @@ func (t *Type) Nullable() bool {
return true // nullable by default for unknown types
}
+func (t *Type) SetNullability(n bool) {
+ t.nullable = n
+}
+
func (t *Type) Valid() bool {
return t.init
}
diff --git a/go/vt/vtgate/planbuilder/fuzz.go b/go/vt/vtgate/planbuilder/fuzz.go
index 6b8b37ba43f..79dcca01a53 100644
--- a/go/vt/vtgate/planbuilder/fuzz.go
+++ b/go/vt/vtgate/planbuilder/fuzz.go
@@ -20,12 +20,12 @@ import (
"sync"
"testing"
+ fuzz "github.com/AdaLogics/go-fuzz-headers"
+
"vitess.io/vitess/go/json2"
"vitess.io/vitess/go/sqltypes"
vschemapb "vitess.io/vitess/go/vt/proto/vschema"
"vitess.io/vitess/go/vt/vtgate/vindexes"
-
- fuzz "github.com/AdaLogics/go-fuzz-headers"
)
var initter sync.Once
diff --git a/go/vt/vtgate/planbuilder/operator_transformers.go b/go/vt/vtgate/planbuilder/operator_transformers.go
index cadfba91772..486cadf2fe8 100644
--- a/go/vt/vtgate/planbuilder/operator_transformers.go
+++ b/go/vt/vtgate/planbuilder/operator_transformers.go
@@ -291,7 +291,7 @@ func transformAggregator(ctx *plancontext.PlanningContext, op *operators.Aggrega
oa.aggregates = append(oa.aggregates, aggrParam)
}
for _, groupBy := range op.Grouping {
- typ, _ := ctx.SemTable.TypeForExpr(groupBy.Inner)
+ typ, _ := ctx.TypeForExpr(groupBy.Inner)
oa.groupByKeys = append(oa.groupByKeys, &engine.GroupByParams{
KeyCol: groupBy.ColOffset,
WeightStringCol: groupBy.WSOffset,
@@ -332,7 +332,7 @@ func createMemorySort(ctx *plancontext.PlanningContext, src logicalPlan, orderin
}
for idx, order := range ordering.Order {
- typ, _ := ctx.SemTable.TypeForExpr(order.SimplifiedExpr)
+ typ, _ := ctx.TypeForExpr(order.SimplifiedExpr)
ms.eMemorySort.OrderBy = append(ms.eMemorySort.OrderBy, evalengine.OrderByParams{
Col: ordering.Offset[idx],
WeightStringCol: ordering.WOffset[idx],
@@ -389,7 +389,7 @@ func getEvalEngingeExpr(ctx *plancontext.PlanningContext, pe *operators.ProjExpr
case *operators.EvalEngine:
return e.EExpr, nil
case operators.Offset:
- typ, _ := ctx.SemTable.TypeForExpr(pe.EvalExpr)
+ typ, _ := ctx.TypeForExpr(pe.EvalExpr)
return evalengine.NewColumn(int(e), typ, pe.EvalExpr), nil
default:
return nil, vterrors.VT13001("project not planned for: %s", pe.String())
@@ -560,7 +560,7 @@ func buildRouteLogicalPlan(ctx *plancontext.PlanningContext, op *operators.Route
eroute, err := routeToEngineRoute(ctx, op, hints)
for _, order := range op.Ordering {
- typ, _ := ctx.SemTable.TypeForExpr(order.AST)
+ typ, _ := ctx.TypeForExpr(order.AST)
eroute.OrderBy = append(eroute.OrderBy, evalengine.OrderByParams{
Col: order.Offset,
WeightStringCol: order.WOffset,
@@ -877,11 +877,11 @@ func transformHashJoin(ctx *plancontext.PlanningContext, op *operators.HashJoin)
var missingTypes []string
- ltyp, found := ctx.SemTable.TypeForExpr(op.JoinComparisons[0].LHS)
+ ltyp, found := ctx.TypeForExpr(op.JoinComparisons[0].LHS)
if !found {
missingTypes = append(missingTypes, sqlparser.String(op.JoinComparisons[0].LHS))
}
- rtyp, found := ctx.SemTable.TypeForExpr(op.JoinComparisons[0].RHS)
+ rtyp, found := ctx.TypeForExpr(op.JoinComparisons[0].RHS)
if !found {
missingTypes = append(missingTypes, sqlparser.String(op.JoinComparisons[0].RHS))
}
diff --git a/go/vt/vtgate/planbuilder/operators/SQL_builder.go b/go/vt/vtgate/planbuilder/operators/SQL_builder.go
index 65ff485c469..c58d6d2b002 100644
--- a/go/vt/vtgate/planbuilder/operators/SQL_builder.go
+++ b/go/vt/vtgate/planbuilder/operators/SQL_builder.go
@@ -220,7 +220,9 @@ func (qb *queryBuilder) joinInnerWith(other *queryBuilder, onCondition sqlparser
sel.Where = &sqlparser.Where{Type: sqlparser.WhereClause, Expr: predicate}
}
- qb.addPredicate(onCondition)
+ for _, pred := range sqlparser.SplitAndExpression(nil, onCondition) {
+ qb.addPredicate(pred)
+ }
}
func (qb *queryBuilder) joinOuterWith(other *queryBuilder, onCondition sqlparser.Expr) {
@@ -463,6 +465,13 @@ func buildAggregation(op *Aggregator, qb *queryBuilder) {
qb.addGroupBy(weightStringFor(simplified))
}
}
+ if op.DT != nil {
+ sel := qb.asSelectStatement()
+ qb.stmt = nil
+ qb.addTableExpr(op.DT.Alias, op.DT.Alias, TableID(op), &sqlparser.DerivedTable{
+ Select: sel,
+ }, nil, op.DT.Columns)
+ }
}
func buildOrdering(op *Ordering, qb *queryBuilder) {
diff --git a/go/vt/vtgate/planbuilder/operators/aggregation_pushing.go b/go/vt/vtgate/planbuilder/operators/aggregation_pushing.go
index a0963929eaa..25ab5f98b60 100644
--- a/go/vt/vtgate/planbuilder/operators/aggregation_pushing.go
+++ b/go/vt/vtgate/planbuilder/operators/aggregation_pushing.go
@@ -89,23 +89,21 @@ func reachedPhase(ctx *plancontext.PlanningContext, p Phase) bool {
// Any columns that are needed to evaluate the subquery needs to be added as
// grouping columns to the aggregation being pushed down, and then after the
// subquery evaluation we are free to reassemble the total aggregation values.
-// This is very similar to how we push aggregation through an shouldRun-join.
+// This is very similar to how we push aggregation through an apply-join.
func pushAggregationThroughSubquery(
ctx *plancontext.PlanningContext,
rootAggr *Aggregator,
src *SubQueryContainer,
) (Operator, *ApplyResult) {
- pushedAggr := rootAggr.Clone([]Operator{src.Outer}).(*Aggregator)
- pushedAggr.Original = false
- pushedAggr.Pushed = false
-
+ pushedAggr := rootAggr.SplitAggregatorBelowOperators(ctx, []Operator{src.Outer})
for _, subQuery := range src.Inner {
lhsCols := subQuery.OuterExpressionsNeeded(ctx, src.Outer)
for _, colName := range lhsCols {
- idx := slices.IndexFunc(pushedAggr.Columns, func(ae *sqlparser.AliasedExpr) bool {
+ findColName := func(ae *sqlparser.AliasedExpr) bool {
return ctx.SemTable.EqualsExpr(ae.Expr, colName)
- })
- if idx >= 0 {
+ }
+ if slices.IndexFunc(pushedAggr.Columns, findColName) >= 0 {
+ // we already have the column, no need to push it again
continue
}
pushedAggr.addColumnWithoutPushing(ctx, aeWrap(colName), true)
@@ -114,8 +112,10 @@ func pushAggregationThroughSubquery(
src.Outer = pushedAggr
- for _, aggregation := range pushedAggr.Aggregations {
- aggregation.Original.Expr = rewriteColNameToArgument(ctx, aggregation.Original.Expr, aggregation.SubQueryExpression, src.Inner...)
+ for _, aggr := range pushedAggr.Aggregations {
+ // we rewrite columns in the aggregation to use the argument form of the subquery
+ aggr.Original.Expr = rewriteColNameToArgument(ctx, aggr.Original.Expr, aggr.SubQueryExpression, src.Inner...)
+ pushedAggr.Columns[aggr.ColOffset].Expr = rewriteColNameToArgument(ctx, pushedAggr.Columns[aggr.ColOffset].Expr, aggr.SubQueryExpression, src.Inner...)
}
if !rootAggr.Original {
@@ -150,7 +150,7 @@ func pushAggregationThroughRoute(
route *Route,
) (Operator, *ApplyResult) {
// Create a new aggregator to be placed below the route.
- aggrBelowRoute := aggregator.SplitAggregatorBelowRoute(route.Inputs())
+ aggrBelowRoute := aggregator.SplitAggregatorBelowOperators(ctx, route.Inputs())
aggrBelowRoute.Aggregations = nil
pushAggregations(ctx, aggregator, aggrBelowRoute)
@@ -256,7 +256,6 @@ func pushAggregationThroughFilter(
pushedAggr := aggregator.Clone([]Operator{filter.Source}).(*Aggregator)
pushedAggr.Pushed = false
pushedAggr.Original = false
-
withNextColumn:
for _, col := range columnsNeeded {
for _, gb := range pushedAggr.Grouping {
diff --git a/go/vt/vtgate/planbuilder/operators/aggregation_pushing_helper.go b/go/vt/vtgate/planbuilder/operators/aggregation_pushing_helper.go
index eb14f83b7df..7b9cd8518fd 100644
--- a/go/vt/vtgate/planbuilder/operators/aggregation_pushing_helper.go
+++ b/go/vt/vtgate/planbuilder/operators/aggregation_pushing_helper.go
@@ -135,7 +135,7 @@ func (ab *aggBuilder) handleAggr(ctx *plancontext.PlanningContext, aggr Aggr) er
case opcode.AggregateGroupConcat:
f := aggr.Func.(*sqlparser.GroupConcatExpr)
if f.Distinct || len(f.OrderBy) > 0 || f.Separator != "" {
- panic("fail here")
+ panic(vterrors.VT12001("cannot evaluate group concat with distinct, order by or a separator"))
}
// this needs special handling, currently aborting the push of function
// and later will try pushing the column instead.
diff --git a/go/vt/vtgate/planbuilder/operators/aggregator.go b/go/vt/vtgate/planbuilder/operators/aggregator.go
index 256372c172f..0f4b5181385 100644
--- a/go/vt/vtgate/planbuilder/operators/aggregator.go
+++ b/go/vt/vtgate/planbuilder/operators/aggregator.go
@@ -292,6 +292,21 @@ func (a *Aggregator) planOffsets(ctx *plancontext.PlanningContext) Operator {
return nil
}
+func (aggr Aggr) setPushColumn(exprs sqlparser.Exprs) {
+ if aggr.Func == nil {
+ if len(exprs) > 1 {
+ panic(vterrors.VT13001(fmt.Sprintf("unexpected number of expression in an random aggregation: %s", sqlparser.String(exprs))))
+ }
+ aggr.Original.Expr = exprs[0]
+ return
+ }
+
+ err := aggr.Func.SetArgs(exprs)
+ if err != nil {
+ panic(err)
+ }
+}
+
func (aggr Aggr) getPushColumn() sqlparser.Expr {
switch aggr.OpCode {
case opcode.AggregateAnyValue:
@@ -311,6 +326,17 @@ func (aggr Aggr) getPushColumn() sqlparser.Expr {
}
}
+func (aggr Aggr) getPushColumnExprs() sqlparser.Exprs {
+ switch aggr.OpCode {
+ case opcode.AggregateAnyValue:
+ return sqlparser.Exprs{aggr.Original.Expr}
+ case opcode.AggregateCountStar:
+ return sqlparser.Exprs{sqlparser.NewIntLiteral("1")}
+ default:
+ return aggr.Func.GetArgs()
+ }
+}
+
func (a *Aggregator) planOffsetsNotPushed(ctx *plancontext.PlanningContext) {
a.Source = newAliasedProjection(a.Source)
// we need to keep things in the column order, so we can't iterate over the aggregations or groupings
@@ -408,14 +434,26 @@ func (a *Aggregator) internalAddColumn(ctx *plancontext.PlanningContext, aliased
return offset
}
-// SplitAggregatorBelowRoute returns the aggregator that will live under the Route.
+// SplitAggregatorBelowOperators returns the aggregator that will live under the Route.
// This is used when we are splitting the aggregation so one part is done
// at the mysql level and one part at the vtgate level
-func (a *Aggregator) SplitAggregatorBelowRoute(input []Operator) *Aggregator {
+func (a *Aggregator) SplitAggregatorBelowOperators(ctx *plancontext.PlanningContext, input []Operator) *Aggregator {
newOp := a.Clone(input).(*Aggregator)
newOp.Pushed = false
newOp.Original = false
newOp.DT = nil
+
+ // We need to make sure that the columns are cloned so that the original operator is not affected
+ // by the changes we make to the new operator
+ newOp.Columns = slice.Map(a.Columns, func(from *sqlparser.AliasedExpr) *sqlparser.AliasedExpr {
+ return ctx.SemTable.Clone(from).(*sqlparser.AliasedExpr)
+ })
+ for idx, aggr := range newOp.Aggregations {
+ newOp.Aggregations[idx].Original = ctx.SemTable.Clone(aggr.Original).(*sqlparser.AliasedExpr)
+ }
+ for idx, gb := range newOp.Grouping {
+ newOp.Grouping[idx].Inner = ctx.SemTable.Clone(gb.Inner).(sqlparser.Expr)
+ }
return newOp
}
diff --git a/go/vt/vtgate/planbuilder/operators/ast_to_op.go b/go/vt/vtgate/planbuilder/operators/ast_to_op.go
index 8a46109e959..55b29a146c7 100644
--- a/go/vt/vtgate/planbuilder/operators/ast_to_op.go
+++ b/go/vt/vtgate/planbuilder/operators/ast_to_op.go
@@ -224,7 +224,7 @@ func getOperatorFromJoinTableExpr(ctx *plancontext.PlanningContext, tableExpr *s
case sqlparser.NormalJoinType:
return createInnerJoin(ctx, tableExpr, lhs, rhs)
case sqlparser.LeftJoinType, sqlparser.RightJoinType:
- return createOuterJoin(tableExpr, lhs, rhs)
+ return createOuterJoin(ctx, tableExpr, lhs, rhs)
default:
panic(vterrors.VT13001("unsupported: %s", tableExpr.Join.ToString()))
}
diff --git a/go/vt/vtgate/planbuilder/operators/distinct.go b/go/vt/vtgate/planbuilder/operators/distinct.go
index eeddd928f66..e3784dbb904 100644
--- a/go/vt/vtgate/planbuilder/operators/distinct.go
+++ b/go/vt/vtgate/planbuilder/operators/distinct.go
@@ -56,7 +56,6 @@ func (d *Distinct) planOffsets(ctx *plancontext.PlanningContext) Operator {
offset := d.Source.AddColumn(ctx, true, false, aeWrap(weightStringFor(e)))
wsCol = &offset
}
-
d.Columns = append(d.Columns, engine.CheckCol{
Col: idx,
WsCol: wsCol,
diff --git a/go/vt/vtgate/planbuilder/operators/filter.go b/go/vt/vtgate/planbuilder/operators/filter.go
index c2432a40da9..0570d61860d 100644
--- a/go/vt/vtgate/planbuilder/operators/filter.go
+++ b/go/vt/vtgate/planbuilder/operators/filter.go
@@ -123,7 +123,7 @@ func (f *Filter) Compact(*plancontext.PlanningContext) (Operator, *ApplyResult)
func (f *Filter) planOffsets(ctx *plancontext.PlanningContext) Operator {
cfg := &evalengine.Config{
- ResolveType: ctx.SemTable.TypeForExpr,
+ ResolveType: ctx.TypeForExpr,
Collation: ctx.SemTable.Collation,
Environment: ctx.VSchema.Environment(),
}
diff --git a/go/vt/vtgate/planbuilder/operators/hash_join.go b/go/vt/vtgate/planbuilder/operators/hash_join.go
index 0ad46bcbc82..f997ed5205d 100644
--- a/go/vt/vtgate/planbuilder/operators/hash_join.go
+++ b/go/vt/vtgate/planbuilder/operators/hash_join.go
@@ -332,7 +332,7 @@ func (hj *HashJoin) addColumn(ctx *plancontext.PlanningContext, in sqlparser.Exp
rewrittenExpr := sqlparser.CopyOnRewrite(in, pre, r.post, ctx.SemTable.CopySemanticInfo).(sqlparser.Expr)
cfg := &evalengine.Config{
- ResolveType: ctx.SemTable.TypeForExpr,
+ ResolveType: ctx.TypeForExpr,
Collation: ctx.SemTable.Collation,
Environment: ctx.VSchema.Environment(),
}
@@ -432,7 +432,7 @@ func (hj *HashJoin) addSingleSidedColumn(
rewrittenExpr := sqlparser.CopyOnRewrite(in, pre, r.post, ctx.SemTable.CopySemanticInfo).(sqlparser.Expr)
cfg := &evalengine.Config{
- ResolveType: ctx.SemTable.TypeForExpr,
+ ResolveType: ctx.TypeForExpr,
Collation: ctx.SemTable.Collation,
Environment: ctx.VSchema.Environment(),
}
diff --git a/go/vt/vtgate/planbuilder/operators/helpers.go b/go/vt/vtgate/planbuilder/operators/helpers.go
index 0049a919e2a..31d9bcfd279 100644
--- a/go/vt/vtgate/planbuilder/operators/helpers.go
+++ b/go/vt/vtgate/planbuilder/operators/helpers.go
@@ -26,13 +26,13 @@ import (
"vitess.io/vitess/go/vt/vtgate/vindexes"
)
+type compactable interface {
+ // Compact implement this interface for operators that have easy to see optimisations
+ Compact(ctx *plancontext.PlanningContext) (Operator, *ApplyResult)
+}
+
// compact will optimise the operator tree into a smaller but equivalent version
func compact(ctx *plancontext.PlanningContext, op Operator) Operator {
- type compactable interface {
- // Compact implement this interface for operators that have easy to see optimisations
- Compact(ctx *plancontext.PlanningContext) (Operator, *ApplyResult)
- }
-
newOp := BottomUp(op, TableID, func(op Operator, _ semantics.TableSet, _ bool) (Operator, *ApplyResult) {
newOp, ok := op.(compactable)
if !ok {
diff --git a/go/vt/vtgate/planbuilder/operators/horizon.go b/go/vt/vtgate/planbuilder/operators/horizon.go
index 34f6dc79217..7388476ab72 100644
--- a/go/vt/vtgate/planbuilder/operators/horizon.go
+++ b/go/vt/vtgate/planbuilder/operators/horizon.go
@@ -99,7 +99,7 @@ func (h *Horizon) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.
panic(err)
}
- newExpr := semantics.RewriteDerivedTableExpression(expr, tableInfo)
+ newExpr := ctx.RewriteDerivedTableExpression(expr, tableInfo)
if sqlparser.ContainsAggregation(newExpr) {
return newFilter(h, expr)
}
diff --git a/go/vt/vtgate/planbuilder/operators/horizon_expanding.go b/go/vt/vtgate/planbuilder/operators/horizon_expanding.go
index 68880bef90b..64c471ac62c 100644
--- a/go/vt/vtgate/planbuilder/operators/horizon_expanding.go
+++ b/go/vt/vtgate/planbuilder/operators/horizon_expanding.go
@@ -24,6 +24,7 @@ import (
"vitess.io/vitess/go/vt/sqlparser"
"vitess.io/vitess/go/vt/vterrors"
"vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext"
+ "vitess.io/vitess/go/vt/vtgate/semantics"
)
func expandHorizon(ctx *plancontext.PlanningContext, horizon *Horizon) (Operator, *ApplyResult) {
@@ -74,9 +75,16 @@ func expandUnionHorizon(ctx *plancontext.PlanningContext, horizon *Horizon, unio
}
func expandSelectHorizon(ctx *plancontext.PlanningContext, horizon *Horizon, sel *sqlparser.Select) (Operator, *ApplyResult) {
- op := createProjectionFromSelect(ctx, horizon)
qp := horizon.getQP(ctx)
var extracted []string
+ if horizon.IsDerived() {
+ // if we are dealing with a derived table, we need to make sure that the ordering columns
+ // are available outside the derived table
+ for _, order := range horizon.Query.GetOrderBy() {
+ qp.addDerivedColumn(ctx, order.Expr)
+ }
+ }
+ op := createProjectionFromSelect(ctx, horizon)
if qp.HasAggr {
extracted = append(extracted, "Aggregation")
} else {
@@ -98,7 +106,7 @@ func expandSelectHorizon(ctx *plancontext.PlanningContext, horizon *Horizon, sel
}
if len(qp.OrderExprs) > 0 {
- op = expandOrderBy(ctx, op, qp)
+ op = expandOrderBy(ctx, op, qp, horizon.Alias)
extracted = append(extracted, "Ordering")
}
@@ -113,18 +121,36 @@ func expandSelectHorizon(ctx *plancontext.PlanningContext, horizon *Horizon, sel
return op, Rewrote(fmt.Sprintf("expand SELECT horizon into (%s)", strings.Join(extracted, ", ")))
}
-func expandOrderBy(ctx *plancontext.PlanningContext, op Operator, qp *QueryProjection) Operator {
- proj := newAliasedProjection(op)
+func expandOrderBy(ctx *plancontext.PlanningContext, op Operator, qp *QueryProjection, derived string) Operator {
var newOrder []OrderBy
sqc := &SubQueryBuilder{}
+ proj, ok := op.(*Projection)
+
for _, expr := range qp.OrderExprs {
+ // Attempt to extract any subqueries within the expression
newExpr, subqs := sqc.pullOutValueSubqueries(ctx, expr.SimplifiedExpr, TableID(op), false)
if newExpr == nil {
- // no subqueries found, let's move on
+ // If no subqueries are found, retain the original order expression
+ if derived != "" {
+ expr = exposeOrderingColumn(ctx, qp, expr, derived)
+ }
newOrder = append(newOrder, expr)
continue
}
- proj.addSubqueryExpr(aeWrap(newExpr), newExpr, subqs...)
+
+ // If the operator is not a projection, we cannot handle subqueries with aggregation if we are unable to push everything into a single route.
+ if !ok {
+ ctx.SemTable.NotSingleRouteErr = vterrors.VT12001("subquery with aggregation in order by")
+ return &Ordering{
+ Source: op,
+ Order: qp.OrderExprs,
+ }
+ } else {
+ // Add the new subquery expression to the projection
+ proj.addSubqueryExpr(ctx, aeWrap(newExpr), newExpr, subqs...)
+ }
+
+ // Replace the original order expression with the new expression containing subqueries
newOrder = append(newOrder, OrderBy{
Inner: &sqlparser.Order{
Expr: newExpr,
@@ -132,26 +158,45 @@ func expandOrderBy(ctx *plancontext.PlanningContext, op Operator, qp *QueryProje
},
SimplifiedExpr: newExpr,
})
-
}
- if len(proj.Columns.GetColumns()) > 0 {
- // if we had to project columns for the ordering,
- // we need the projection as source
- op = proj
+ // Update the source of the projection if we have it
+ if proj != nil {
+ proj.Source = sqc.getRootOperator(proj.Source, nil)
}
+ // Return the updated operator with the new order by expressions
return &Ordering{
Source: op,
Order: newOrder,
}
}
+// exposeOrderingColumn will expose the ordering column to the outer query
+func exposeOrderingColumn(ctx *plancontext.PlanningContext, qp *QueryProjection, orderBy OrderBy, derived string) OrderBy {
+ for _, se := range qp.SelectExprs {
+ aliasedExpr, err := se.GetAliasedExpr()
+ if err != nil {
+ panic(vterrors.VT13001("unexpected expression in select"))
+ }
+ if ctx.SemTable.EqualsExprWithDeps(aliasedExpr.Expr, orderBy.SimplifiedExpr) {
+ newExpr := sqlparser.NewColNameWithQualifier(aliasedExpr.ColumnName(), sqlparser.NewTableName(derived))
+ ctx.SemTable.CopySemanticInfo(orderBy.SimplifiedExpr, newExpr)
+ orderBy.SimplifiedExpr = newExpr
+ orderBy.Inner = &sqlparser.Order{Expr: newExpr, Direction: orderBy.Inner.Direction}
+ break
+ }
+ }
+
+ return orderBy
+}
+
func createProjectionFromSelect(ctx *plancontext.PlanningContext, horizon *Horizon) Operator {
qp := horizon.getQP(ctx)
var dt *DerivedTable
if horizon.TableId != nil {
+ // if we are dealing with a derived table, we need to create a derived table object
dt = &DerivedTable{
TableID: *horizon.TableId,
Alias: horizon.Alias,
@@ -159,13 +204,13 @@ func createProjectionFromSelect(ctx *plancontext.PlanningContext, horizon *Horiz
}
}
- if !qp.NeedsAggregation() {
- projX := createProjectionWithoutAggr(ctx, qp, horizon.src())
- projX.DT = dt
- return projX
+ if qp.NeedsAggregation() {
+ return createProjectionWithAggr(ctx, qp, dt, horizon.src())
}
- return createProjectionWithAggr(ctx, qp, dt, horizon.src())
+ projX := createProjectionWithoutAggr(ctx, qp, horizon.src())
+ projX.DT = dt
+ return projX
}
func createProjectionWithAggr(ctx *plancontext.PlanningContext, qp *QueryProjection, dt *DerivedTable, src Operator) Operator {
@@ -181,13 +226,8 @@ func createProjectionWithAggr(ctx *plancontext.PlanningContext, qp *QueryProject
// Go through all aggregations and check for any subquery.
sqc := &SubQueryBuilder{}
- outerID := TableID(src)
for idx, aggr := range aggregations {
- expr := aggr.Original.Expr
- newExpr, subqs := sqc.pullOutValueSubqueries(ctx, expr, outerID, false)
- if newExpr != nil {
- aggregations[idx].SubQueryExpression = subqs
- }
+ aggregations[idx] = pullOutValueSubqueries(ctx, aggr, sqc, TableID(src))
}
aggrOp.Source = sqc.getRootOperator(src, nil)
@@ -198,6 +238,25 @@ func createProjectionWithAggr(ctx *plancontext.PlanningContext, qp *QueryProject
return createProjectionForSimpleAggregation(ctx, aggrOp, qp)
}
+func pullOutValueSubqueries(ctx *plancontext.PlanningContext, aggr Aggr, sqc *SubQueryBuilder, outerID semantics.TableSet) Aggr {
+ exprs := aggr.getPushColumnExprs()
+ var newExprs sqlparser.Exprs
+ for _, expr := range exprs {
+ newExpr, subqs := sqc.pullOutValueSubqueries(ctx, expr, outerID, false)
+ if newExpr != nil {
+ newExprs = append(newExprs, newExpr)
+ aggr.SubQueryExpression = append(aggr.SubQueryExpression, subqs...)
+ } else {
+ newExprs = append(newExprs, expr)
+ }
+ }
+ if len(aggr.SubQueryExpression) > 0 {
+ aggr.setPushColumn(newExprs)
+ }
+
+ return aggr
+}
+
func createProjectionForSimpleAggregation(ctx *plancontext.PlanningContext, a *Aggregator, qp *QueryProjection) Operator {
outer:
for colIdx, expr := range qp.SelectExprs {
@@ -280,7 +339,7 @@ func createProjectionWithoutAggr(ctx *plancontext.PlanningContext, qp *QueryProj
// there was no subquery in this expression
proj.addUnexploredExpr(org, expr)
} else {
- proj.addSubqueryExpr(org, newExpr, subqs...)
+ proj.addSubqueryExpr(ctx, org, newExpr, subqs...)
}
}
proj.Source = sqc.getRootOperator(src, nil)
diff --git a/go/vt/vtgate/planbuilder/operators/insert.go b/go/vt/vtgate/planbuilder/operators/insert.go
index 7c6e242ae9c..75466500fe6 100644
--- a/go/vt/vtgate/planbuilder/operators/insert.go
+++ b/go/vt/vtgate/planbuilder/operators/insert.go
@@ -506,7 +506,7 @@ func insertRowsPlan(ctx *plancontext.PlanningContext, insOp *Insert, ins *sqlpar
colNum, _ := findOrAddColumn(ins, col)
for rowNum, row := range rows {
innerpv, err := evalengine.Translate(row[colNum], &evalengine.Config{
- ResolveType: ctx.SemTable.TypeForExpr,
+ ResolveType: ctx.TypeForExpr,
Collation: ctx.SemTable.Collation,
Environment: ctx.VSchema.Environment(),
})
@@ -637,7 +637,7 @@ func modifyForAutoinc(ctx *plancontext.PlanningContext, ins *sqlparser.Insert, v
}
var err error
gen.Values, err = evalengine.Translate(autoIncValues, &evalengine.Config{
- ResolveType: ctx.SemTable.TypeForExpr,
+ ResolveType: ctx.TypeForExpr,
Collation: ctx.SemTable.Collation,
Environment: ctx.VSchema.Environment(),
})
diff --git a/go/vt/vtgate/planbuilder/operators/join.go b/go/vt/vtgate/planbuilder/operators/join.go
index 787d7fedfcc..8e685beb4cb 100644
--- a/go/vt/vtgate/planbuilder/operators/join.go
+++ b/go/vt/vtgate/planbuilder/operators/join.go
@@ -83,7 +83,7 @@ func (j *Join) Compact(ctx *plancontext.PlanningContext) (Operator, *ApplyResult
return newOp, Rewrote("merge querygraphs into a single one")
}
-func createOuterJoin(tableExpr *sqlparser.JoinTableExpr, lhs, rhs Operator) Operator {
+func createOuterJoin(ctx *plancontext.PlanningContext, tableExpr *sqlparser.JoinTableExpr, lhs, rhs Operator) Operator {
if tableExpr.Join == sqlparser.RightJoinType {
lhs, rhs = rhs, lhs
}
@@ -93,6 +93,8 @@ func createOuterJoin(tableExpr *sqlparser.JoinTableExpr, lhs, rhs Operator) Oper
}
predicate := tableExpr.Condition.On
sqlparser.RemoveKeyspaceInCol(predicate)
+ // mark the RHS as outer tables so we know which columns are nullable
+ ctx.OuterTables = ctx.OuterTables.Merge(TableID(rhs))
return &Join{LHS: lhs, RHS: rhs, LeftJoin: true, Predicate: predicate}
}
diff --git a/go/vt/vtgate/planbuilder/operators/projection.go b/go/vt/vtgate/planbuilder/operators/projection.go
index 1eae4e0e06e..38164b71a94 100644
--- a/go/vt/vtgate/planbuilder/operators/projection.go
+++ b/go/vt/vtgate/planbuilder/operators/projection.go
@@ -182,13 +182,24 @@ var _ selectExpressions = (*Projection)(nil)
// createSimpleProjection returns a projection where all columns are offsets.
// used to change the name and order of the columns in the final output
-func createSimpleProjection(ctx *plancontext.PlanningContext, qp *QueryProjection, src Operator) *Projection {
+func createSimpleProjection(ctx *plancontext.PlanningContext, selExprs sqlparser.SelectExprs, src Operator) *Projection {
p := newAliasedProjection(src)
- for _, e := range qp.SelectExprs {
- ae, err := e.GetAliasedExpr()
- if err != nil {
- panic(err)
+ for _, e := range selExprs {
+ ae, isAe := e.(*sqlparser.AliasedExpr)
+ if !isAe {
+ panic(vterrors.VT09015())
+ }
+
+ if ae.As.IsEmpty() {
+ // if we don't have an alias, we can use the column name as the alias
+ // the expectation is that when users use columns without aliases, they want the column name as the alias
+ // for more complex expressions, we just assume they'll use column offsets instead of column names
+ col, ok := ae.Expr.(*sqlparser.ColName)
+ if ok {
+ ae.As = col.Name
+ }
}
+
offset := p.Source.AddColumn(ctx, true, false, ae)
expr := newProjExpr(ae)
expr.Info = Offset(offset)
@@ -218,11 +229,14 @@ func (p *Projection) canPush(ctx *plancontext.PlanningContext) bool {
}
func (p *Projection) GetAliasedProjections() (AliasedProjections, error) {
- ap, ok := p.Columns.(AliasedProjections)
- if !ok {
+ switch cols := p.Columns.(type) {
+ case AliasedProjections:
+ return cols, nil
+ case nil:
+ return nil, nil
+ default:
return nil, vterrors.VT09015()
}
- return ap, nil
}
func (p *Projection) isDerived() bool {
@@ -263,8 +277,7 @@ func (p *Projection) addProjExpr(pe ...*ProjExpr) int {
}
offset := len(ap)
- ap = append(ap, pe...)
- p.Columns = ap
+ p.Columns = append(ap, pe...)
return offset
}
@@ -273,7 +286,18 @@ func (p *Projection) addUnexploredExpr(ae *sqlparser.AliasedExpr, e sqlparser.Ex
return p.addProjExpr(newProjExprWithInner(ae, e))
}
-func (p *Projection) addSubqueryExpr(ae *sqlparser.AliasedExpr, expr sqlparser.Expr, sqs ...*SubQuery) {
+func (p *Projection) addSubqueryExpr(ctx *plancontext.PlanningContext, ae *sqlparser.AliasedExpr, expr sqlparser.Expr, sqs ...*SubQuery) {
+ ap, err := p.GetAliasedProjections()
+ if err != nil {
+ panic(err)
+ }
+ for _, projExpr := range ap {
+ if ctx.SemTable.EqualsExprWithDeps(projExpr.EvalExpr, expr) {
+ // if we already have this column, we can just return the offset
+ return
+ }
+ }
+
pe := newProjExprWithInner(ae, expr)
pe.Info = SubQueryExpression(sqs)
@@ -562,7 +586,7 @@ func (p *Projection) planOffsets(ctx *plancontext.PlanningContext) Operator {
// for everything else, we'll turn to the evalengine
eexpr, err := evalengine.Translate(rewritten, &evalengine.Config{
- ResolveType: ctx.SemTable.TypeForExpr,
+ ResolveType: ctx.TypeForExpr,
Collation: ctx.SemTable.Collation,
Environment: ctx.VSchema.Environment(),
})
diff --git a/go/vt/vtgate/planbuilder/operators/query_planning.go b/go/vt/vtgate/planbuilder/operators/query_planning.go
index f412e783f42..9cb47a7f8bd 100644
--- a/go/vt/vtgate/planbuilder/operators/query_planning.go
+++ b/go/vt/vtgate/planbuilder/operators/query_planning.go
@@ -19,6 +19,7 @@ package operators
import (
"fmt"
"io"
+ "strconv"
"vitess.io/vitess/go/vt/sqlparser"
"vitess.io/vitess/go/vt/vterrors"
@@ -28,6 +29,12 @@ import (
)
func planQuery(ctx *plancontext.PlanningContext, root Operator) Operator {
+ var selExpr sqlparser.SelectExprs
+ if horizon, isHorizon := root.(*Horizon); isHorizon {
+ sel := sqlparser.GetFirstSelect(horizon.Query)
+ selExpr = sqlparser.CloneSelectExprs(sel.SelectExprs)
+ }
+
output := runPhases(ctx, root)
output = planOffsets(ctx, output)
@@ -38,7 +45,7 @@ func planQuery(ctx *plancontext.PlanningContext, root Operator) Operator {
output = compact(ctx, output)
- return addTruncationOrProjectionToReturnOutput(ctx, root, output)
+ return addTruncationOrProjectionToReturnOutput(ctx, selExpr, output)
}
// runPhases is the process of figuring out how to perform the operations in the Horizon
@@ -249,8 +256,169 @@ func tryPushLimit(in *Limit) (Operator, *ApplyResult) {
return tryPushingDownLimitInRoute(in, src)
case *Aggregator:
return in, NoRewrite
+ case *Limit:
+ combinedLimit := mergeLimits(in.AST, src.AST)
+ if combinedLimit == nil {
+ break
+ }
+ // we can remove the other LIMIT
+ in.AST = combinedLimit
+ in.Source = src.Source
+ return in, Rewrote("merged two limits")
+ }
+ return setUpperLimit(in)
+}
+
+func mergeLimits(l1, l2 *sqlparser.Limit) *sqlparser.Limit {
+ // To merge two relational LIMIT operators with LIMIT and OFFSET, we need to combine their
+ // LIMIT and OFFSET values appropriately.
+ // Let's denote the first LIMIT operator as LIMIT_1 with LIMIT_1 and OFFSET_1,
+ // and the second LIMIT operator as LIMIT_2 with LIMIT_2 and OFFSET_2.
+ // The second LIMIT operator receives the output of the first LIMIT operator, meaning the first LIMIT and
+ // OFFSET are applied first, and then the second LIMIT and OFFSET are applied to the resulting subset.
+ //
+ // The goal is to determine the effective combined LIMIT and OFFSET values when applying these two operators sequentially.
+ //
+ // Combined Offset:
+ // The combined offset (OFFSET_combined) is the sum of the two offsets because you need to skip OFFSET_1 rows first,
+ // and then apply the second offset OFFSET_2 to the result.
+ // OFFSET_combined = OFFSET_1 + OFFSET_2
+
+ // Combined Limit:
+ // The combined limit (LIMIT_combined) needs to account for both limits. The effective limit should not exceed the rows returned by the first limit,
+ // so it is the minimum of the remaining rows after the first offset and the second limit.
+ // LIMIT_combined = min(LIMIT_2, LIMIT_1 - OFFSET_2)
+
+ // Note: If LIMIT_1 - OFFSET_2 is negative or zero, it means there are no rows left to limit, so LIMIT_combined should be zero.
+
+ // Example:
+ // First LIMIT operator: LIMIT 10 OFFSET 5 (LIMIT_1 = 10, OFFSET_1 = 5)
+ // Second LIMIT operator: LIMIT 7 OFFSET 3 (LIMIT_2 = 7, OFFSET_2 = 3)
+
+ // Calculations:
+ // Combined OFFSET:
+ // OFFSET_combined = 5 + 3 = 8
+
+ // Combined LIMIT:
+ // remaining rows after OFFSET_2 = 10 - 3 = 7
+ // LIMIT_combined = min(7, 7) = 7
+
+ // So, the combined result would be:
+ // LIMIT 7 OFFSET 8
+
+ // This method ensures that the final combined LIMIT and OFFSET correctly reflect the sequential application of the two original operators.
+ combinedLimit, failed := mergeLimitExpressions(l1.Rowcount, l2.Rowcount, l2.Offset)
+ if failed {
+ return nil
+ }
+ combinedOffset, failed := mergeOffsetExpressions(l1.Offset, l2.Offset)
+ if failed {
+ return nil
+ }
+
+ return &sqlparser.Limit{
+ Offset: combinedOffset,
+ Rowcount: combinedLimit,
+ }
+}
+
+func mergeOffsetExpressions(e1, e2 sqlparser.Expr) (expr sqlparser.Expr, failed bool) {
+ switch {
+ case e1 == nil && e2 == nil:
+ return nil, false
+ case e1 == nil:
+ return e2, false
+ case e2 == nil:
+ return e1, false
default:
- return setUpperLimit(in)
+ v1str, ok := e1.(*sqlparser.Literal)
+ if !ok {
+ return nil, true
+ }
+ v2str, ok := e2.(*sqlparser.Literal)
+ if !ok {
+ return nil, true
+ }
+ v1, _ := strconv.Atoi(v1str.Val)
+ v2, _ := strconv.Atoi(v2str.Val)
+ return sqlparser.NewIntLiteral(strconv.Itoa(v1 + v2)), false
+ }
+}
+
+// mergeLimitExpressions merges two LIMIT expressions with an OFFSET expression.
+// l1: First LIMIT expression.
+// l2: Second LIMIT expression.
+// off2: Second OFFSET expression.
+// Returns the merged LIMIT expression and a boolean indicating if the merge failed.
+func mergeLimitExpressions(l1, l2, off2 sqlparser.Expr) (expr sqlparser.Expr, failed bool) {
+ switch {
+ // If both limits are nil, there's nothing to merge, return nil without failure.
+ case l1 == nil && l2 == nil:
+ return nil, false
+
+ // If the first limit is nil, the second limit determines the final limit.
+ case l1 == nil:
+ return l2, false
+
+ // If the second limit is nil, calculate the remaining limit after applying the offset to the first limit.
+ case l2 == nil:
+ if off2 == nil {
+ // No offset, so the first limit is used directly.
+ return l1, false
+ }
+ off2, ok := off2.(*sqlparser.Literal)
+ if !ok {
+ // If the offset is not a literal, fail the merge.
+ return nil, true
+ }
+ lim1str, ok := l1.(*sqlparser.Literal)
+ if !ok {
+ // If the first limit is not a literal, return the first limit without failing.
+ return nil, false
+ }
+ // Calculate the remaining limit after the offset.
+ off2int, _ := strconv.Atoi(off2.Val)
+ l1int, _ := strconv.Atoi(lim1str.Val)
+ lim := l1int - off2int
+ if lim < 0 {
+ lim = 0
+ }
+ return sqlparser.NewIntLiteral(strconv.Itoa(lim)), false
+
+ default:
+ v1str, ok1 := l1.(*sqlparser.Literal)
+ if ok1 && v1str.Val == "1" {
+ // If the first limit is "1", it dominates, so return it.
+ return l1, false
+ }
+ v2str, ok2 := l2.(*sqlparser.Literal)
+ if ok2 && v2str.Val == "1" {
+ // If the second limit is "1", it dominates, so return it.
+ return l2, false
+ }
+ if !ok1 || !ok2 {
+ // If either limit is not a literal, fail the merge.
+ return nil, true
+ }
+
+ var off2int int
+ if off2 != nil {
+ off2, ok := off2.(*sqlparser.Literal)
+ if !ok {
+ // If the offset is not a literal, fail the merge.
+ return nil, true
+ }
+ off2int, _ = strconv.Atoi(off2.Val)
+ }
+
+ v1, _ := strconv.Atoi(v1str.Val)
+ v2, _ := strconv.Atoi(v2str.Val)
+ lim := min(v2, v1-off2int)
+ if lim < 0 {
+ // If the combined limit is negative, set it to zero.
+ lim = 0
+ }
+ return sqlparser.NewIntLiteral(strconv.Itoa(lim)), false
}
}
@@ -276,6 +444,11 @@ func setUpperLimit(in *Limit) (Operator, *ApplyResult) {
case *Join, *ApplyJoin, *SubQueryContainer, *SubQuery:
// we can't push limits down on either side
return SkipChildren
+ case *Aggregator:
+ if len(op.Grouping) > 0 {
+ // we can't push limits down if we have a group by
+ return SkipChildren
+ }
case *Route:
newSrc := &Limit{
Source: op.Source,
@@ -285,9 +458,8 @@ func setUpperLimit(in *Limit) (Operator, *ApplyResult) {
op.Source = newSrc
result = result.Merge(Rewrote("push limit under route"))
return SkipChildren
- default:
- return VisitChildren
}
+ return VisitChildren
}
TopDown(in.Source, TableID, visitor, shouldVisit)
@@ -303,23 +475,14 @@ func tryPushOrdering(ctx *plancontext.PlanningContext, in *Ordering) (Operator,
return Swap(in, src, "push ordering under filter")
case *ApplyJoin:
if canPushLeft(ctx, src, in.Order) {
- // ApplyJoin is stable in regard to the columns coming from the LHS,
- // so if all the ordering columns come from the LHS, we can push down the Ordering there
- src.LHS, in.Source = in, src.LHS
- return src, Rewrote("push down ordering on the LHS of a join")
+ return pushOrderLeftOfJoin(src, in)
}
case *Ordering:
// we'll just remove the order underneath. The top order replaces whatever was incoming
in.Source = src.Source
return in, Rewrote("remove double ordering")
case *Projection:
- // we can move ordering under a projection if it's not introducing a column we're sorting by
- for _, by := range in.Order {
- if !mustFetchFromInput(by.SimplifiedExpr) {
- return in, NoRewrite
- }
- }
- return Swap(in, src, "push ordering under projection")
+ return pushOrderingUnderProjection(ctx, in, src)
case *Aggregator:
if !src.QP.AlignGroupByAndOrderBy(ctx) && !overlaps(ctx, in.Order, src.Grouping) {
return in, NoRewrite
@@ -327,29 +490,65 @@ func tryPushOrdering(ctx *plancontext.PlanningContext, in *Ordering) (Operator,
return pushOrderingUnderAggr(ctx, in, src)
case *SubQueryContainer:
- outerTableID := TableID(src.Outer)
- for _, order := range in.Order {
- deps := ctx.SemTable.RecursiveDeps(order.Inner.Expr)
- if !deps.IsSolvedBy(outerTableID) {
- return in, NoRewrite
- }
- }
- src.Outer, in.Source = in, src.Outer
- return src, Rewrote("push ordering into outer side of subquery")
+ return pushOrderingToOuterOfSubqueryContainer(ctx, in, src)
case *SubQuery:
- outerTableID := TableID(src.Outer)
- for _, order := range in.Order {
- deps := ctx.SemTable.RecursiveDeps(order.Inner.Expr)
- if !deps.IsSolvedBy(outerTableID) {
- return in, NoRewrite
- }
- }
- src.Outer, in.Source = in, src.Outer
- return src, Rewrote("push ordering into outer side of subquery")
+ return pushOrderingToOuterOfSubquery(ctx, in, src)
}
return in, NoRewrite
}
+func pushOrderingToOuterOfSubquery(ctx *plancontext.PlanningContext, in *Ordering, sq *SubQuery) (Operator, *ApplyResult) {
+ outerTableID := TableID(sq.Outer)
+ for idx, order := range in.Order {
+ deps := ctx.SemTable.RecursiveDeps(order.Inner.Expr)
+ if !deps.IsSolvedBy(outerTableID) {
+ return in, NoRewrite
+ }
+ in.Order[idx].SimplifiedExpr = sq.rewriteColNameToArgument(order.SimplifiedExpr)
+ in.Order[idx].Inner.Expr = sq.rewriteColNameToArgument(order.Inner.Expr)
+ }
+ sq.Outer, in.Source = in, sq.Outer
+ return sq, Rewrote("push ordering into outer side of subquery")
+}
+
+func pushOrderingToOuterOfSubqueryContainer(ctx *plancontext.PlanningContext, in *Ordering, subq *SubQueryContainer) (Operator, *ApplyResult) {
+ outerTableID := TableID(subq.Outer)
+ for _, order := range in.Order {
+ deps := ctx.SemTable.RecursiveDeps(order.Inner.Expr)
+ if !deps.IsSolvedBy(outerTableID) {
+ return in, NoRewrite
+ }
+ }
+ subq.Outer, in.Source = in, subq.Outer
+ return subq, Rewrote("push ordering into outer side of subquery")
+}
+
+func pushOrderingUnderProjection(ctx *plancontext.PlanningContext, in *Ordering, proj *Projection) (Operator, *ApplyResult) {
+ // we can move ordering under a projection if it's not introducing a column we're sorting by
+ for _, by := range in.Order {
+ if !mustFetchFromInput(by.SimplifiedExpr) {
+ return in, NoRewrite
+ }
+ }
+ ap, ok := proj.Columns.(AliasedProjections)
+ if !ok {
+ return in, NoRewrite
+ }
+ for _, projExpr := range ap {
+ if projExpr.Info != nil {
+ return in, NoRewrite
+ }
+ }
+ return Swap(in, proj, "push ordering under projection")
+}
+
+func pushOrderLeftOfJoin(src *ApplyJoin, in *Ordering) (Operator, *ApplyResult) {
+ // ApplyJoin is stable in regard to the columns coming from the LHS,
+ // so if all the ordering columns come from the LHS, we can push down the Ordering there
+ src.LHS, in.Source = in, src.LHS
+ return src, Rewrote("push down ordering on the LHS of a join")
+}
+
func overlaps(ctx *plancontext.PlanningContext, order []OrderBy, grouping []GroupBy) bool {
ordering:
for _, orderBy := range order {
@@ -605,25 +804,56 @@ func tryPushUnion(ctx *plancontext.PlanningContext, op *Union) (Operator, *Apply
}
// addTruncationOrProjectionToReturnOutput uses the original Horizon to make sure that the output columns line up with what the user asked for
-func addTruncationOrProjectionToReturnOutput(ctx *plancontext.PlanningContext, oldHorizon Operator, output Operator) Operator {
- horizon, ok := oldHorizon.(*Horizon)
- if !ok {
+func addTruncationOrProjectionToReturnOutput(ctx *plancontext.PlanningContext, selExprs sqlparser.SelectExprs, output Operator) Operator {
+ if len(selExprs) == 0 {
return output
}
cols := output.GetSelectExprs(ctx)
- sel := sqlparser.GetFirstSelect(horizon.Query)
- if len(sel.SelectExprs) == len(cols) {
+ sizeCorrect := len(selExprs) == len(cols) || tryTruncateColumnsAt(output, len(selExprs))
+ if sizeCorrect && colNamesAlign(selExprs, cols) {
return output
}
- if tryTruncateColumnsAt(output, len(sel.SelectExprs)) {
- return output
+ return createSimpleProjection(ctx, selExprs, output)
+}
+
+func colNamesAlign(expected, actual sqlparser.SelectExprs) bool {
+ if len(expected) > len(actual) {
+ // if we expect more columns than we have, we can't align
+ return false
}
- qp := horizon.getQP(ctx)
- proj := createSimpleProjection(ctx, qp, output)
- return proj
+ for i, seE := range expected {
+ switch se := seE.(type) {
+ case *sqlparser.AliasedExpr:
+ if !areColumnNamesAligned(se, actual[i]) {
+ return false
+ }
+ case *sqlparser.StarExpr:
+ actualStar, isStar := actual[i].(*sqlparser.StarExpr)
+ if !isStar {
+ panic(vterrors.VT13001(fmt.Sprintf("star expression is expected here, found: %T", actual[i])))
+ }
+ if !sqlparser.Equals.RefOfStarExpr(se, actualStar) {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+func areColumnNamesAligned(expectation *sqlparser.AliasedExpr, actual sqlparser.SelectExpr) bool {
+ _, isCol := expectation.Expr.(*sqlparser.ColName)
+ if expectation.As.IsEmpty() && !isCol {
+ // is the user didn't specify a name, we don't care
+ return true
+ }
+ actualAE, isAe := actual.(*sqlparser.AliasedExpr)
+ if !isAe {
+ panic(vterrors.VT13001("used star expression when user did not"))
+ }
+ return expectation.ColumnName() == actualAE.ColumnName()
}
func stopAtRoute(operator Operator) VisitRule {
diff --git a/go/vt/vtgate/planbuilder/operators/query_planning_test.go b/go/vt/vtgate/planbuilder/operators/query_planning_test.go
new file mode 100644
index 00000000000..f0405c5a566
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/operators/query_planning_test.go
@@ -0,0 +1,185 @@
+/*
+Copyright 2024 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package operators
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+
+ "vitess.io/vitess/go/vt/sqlparser"
+)
+
+func TestMergeOffsetExpressions(t *testing.T) {
+ tests := []struct {
+ name string
+ offset1 sqlparser.Expr
+ offset2 sqlparser.Expr
+ expectedExpr sqlparser.Expr
+ expectedFailed bool
+ }{
+ {
+ name: "both offsets are integers",
+ offset1: sqlparser.NewIntLiteral("5"),
+ offset2: sqlparser.NewIntLiteral("3"),
+ expectedExpr: sqlparser.NewIntLiteral("8"),
+ expectedFailed: false,
+ },
+ {
+ name: "first offset is nil",
+ offset1: nil,
+ offset2: sqlparser.NewIntLiteral("3"),
+ expectedExpr: sqlparser.NewIntLiteral("3"),
+ expectedFailed: false,
+ },
+ {
+ name: "second offset is nil",
+ offset1: sqlparser.NewIntLiteral("5"),
+ offset2: nil,
+ expectedExpr: sqlparser.NewIntLiteral("5"),
+ expectedFailed: false,
+ },
+ {
+ name: "both offsets are nil",
+ offset1: nil,
+ offset2: nil,
+ expectedExpr: nil,
+ expectedFailed: false,
+ },
+ {
+ name: "first offset is argument",
+ offset1: sqlparser.NewArgument("offset1"),
+ offset2: sqlparser.NewIntLiteral("3"),
+ expectedExpr: nil,
+ expectedFailed: true,
+ },
+ {
+ name: "second offset is argument",
+ offset1: sqlparser.NewIntLiteral("5"),
+ offset2: sqlparser.NewArgument("offset2"),
+ expectedExpr: nil,
+ expectedFailed: true,
+ },
+ {
+ name: "both offsets are arguments",
+ offset1: sqlparser.NewArgument("offset1"),
+ offset2: sqlparser.NewArgument("offset2"),
+ expectedExpr: nil,
+ expectedFailed: true,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ expr, failed := mergeOffsetExpressions(tt.offset1, tt.offset2)
+ assert.Equal(t, tt.expectedExpr, expr)
+ assert.Equal(t, tt.expectedFailed, failed, "failed")
+ })
+ }
+}
+
+func TestMergeLimitExpressions(t *testing.T) {
+ tests := []struct {
+ name string
+ limit1 sqlparser.Expr
+ limit2 sqlparser.Expr
+ offset2 sqlparser.Expr
+ expectedExpr sqlparser.Expr
+ expectedFailed bool
+ }{
+ {
+ name: "valid limits and offset",
+ limit1: sqlparser.NewIntLiteral("10"),
+ limit2: sqlparser.NewIntLiteral("7"),
+ offset2: sqlparser.NewIntLiteral("3"),
+ expectedExpr: sqlparser.NewIntLiteral("7"),
+ expectedFailed: false,
+ },
+ {
+ name: "remaining rows after offset2 is zero",
+ limit1: sqlparser.NewIntLiteral("3"),
+ limit2: sqlparser.NewIntLiteral("7"),
+ offset2: sqlparser.NewIntLiteral("5"),
+ expectedExpr: sqlparser.NewIntLiteral("0"),
+ expectedFailed: false,
+ },
+ {
+ name: "first limit is nil",
+ limit1: nil,
+ limit2: sqlparser.NewIntLiteral("7"),
+ offset2: sqlparser.NewIntLiteral("3"),
+ expectedExpr: sqlparser.NewIntLiteral("7"),
+ expectedFailed: false,
+ },
+ {
+ name: "second limit is nil",
+ limit1: sqlparser.NewIntLiteral("10"),
+ limit2: nil,
+ offset2: sqlparser.NewIntLiteral("3"),
+ expectedExpr: sqlparser.NewIntLiteral("7"),
+ expectedFailed: false,
+ },
+ {
+ name: "offset2 is nil",
+ limit1: sqlparser.NewIntLiteral("10"),
+ limit2: sqlparser.NewIntLiteral("7"),
+ offset2: nil,
+ expectedExpr: sqlparser.NewIntLiteral("7"),
+ expectedFailed: false,
+ },
+ {
+ name: "first limit is argument",
+ limit1: sqlparser.NewArgument("limit1"),
+ limit2: sqlparser.NewIntLiteral("7"),
+ offset2: sqlparser.NewIntLiteral("3"),
+ expectedExpr: nil,
+ expectedFailed: true,
+ },
+ {
+ name: "second limit is argument",
+ limit1: sqlparser.NewIntLiteral("10"),
+ limit2: sqlparser.NewArgument("limit2"),
+ offset2: sqlparser.NewIntLiteral("3"),
+ expectedExpr: nil,
+ expectedFailed: true,
+ },
+ {
+ name: "offset2 is argument",
+ limit1: sqlparser.NewIntLiteral("10"),
+ limit2: sqlparser.NewIntLiteral("7"),
+ offset2: sqlparser.NewArgument("offset2"),
+ expectedExpr: nil,
+ expectedFailed: true,
+ },
+ {
+ name: "all are arguments",
+ limit1: sqlparser.NewArgument("limit1"),
+ limit2: sqlparser.NewArgument("limit2"),
+ offset2: sqlparser.NewArgument("offset2"),
+ expectedExpr: nil,
+ expectedFailed: true,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ expr, failed := mergeLimitExpressions(tt.limit1, tt.limit2, tt.offset2)
+ assert.Equal(t, tt.expectedExpr, expr)
+ assert.Equal(t, tt.expectedFailed, failed, "failed")
+ })
+ }
+}
diff --git a/go/vt/vtgate/planbuilder/operators/queryprojection.go b/go/vt/vtgate/planbuilder/operators/queryprojection.go
index 14bea4f4674..e2d9adb315d 100644
--- a/go/vt/vtgate/planbuilder/operators/queryprojection.go
+++ b/go/vt/vtgate/planbuilder/operators/queryprojection.go
@@ -69,7 +69,7 @@ type (
// Aggr encodes all information needed for aggregation functions
Aggr struct {
Original *sqlparser.AliasedExpr
- Func sqlparser.AggrFunc
+ Func sqlparser.AggrFunc // if we are missing a Func, it means this is a AggregateAnyValue
OpCode opcode.AggregateOpcode
// OriginalOpCode will contain opcode.AggregateUnassigned unless we are changing opcode while pushing them down
@@ -107,7 +107,7 @@ func (aggr Aggr) GetTypeCollation(ctx *plancontext.PlanningContext) evalengine.T
}
switch aggr.OpCode {
case opcode.AggregateMin, opcode.AggregateMax, opcode.AggregateSumDistinct, opcode.AggregateCountDistinct:
- typ, _ := ctx.SemTable.TypeForExpr(aggr.Func.GetArg())
+ typ, _ := ctx.TypeForExpr(aggr.Func.GetArg())
return typ
}
@@ -314,8 +314,7 @@ func (qp *QueryProjection) addOrderBy(ctx *plancontext.PlanningContext, orderBy
canPushSorting := true
es := &expressionSet{}
for _, order := range orderBy {
- if sqlparser.IsNull(order.Expr) {
- // ORDER BY null can safely be ignored
+ if canIgnoreOrdering(ctx, order.Expr) {
continue
}
if !es.add(ctx, order.Expr) {
@@ -329,6 +328,18 @@ func (qp *QueryProjection) addOrderBy(ctx *plancontext.PlanningContext, orderBy
}
}
+// canIgnoreOrdering returns true if the ordering expression has no effect on the result.
+func canIgnoreOrdering(ctx *plancontext.PlanningContext, expr sqlparser.Expr) bool {
+ switch expr.(type) {
+ case *sqlparser.NullVal, *sqlparser.Literal, *sqlparser.Argument:
+ return true
+ case *sqlparser.Subquery:
+ return ctx.SemTable.RecursiveDeps(expr).IsEmpty()
+ default:
+ return false
+ }
+}
+
func (qp *QueryProjection) calculateDistinct(ctx *plancontext.PlanningContext) {
if qp.Distinct && !qp.HasAggr {
distinct := qp.useGroupingOverDistinct(ctx)
@@ -732,6 +743,24 @@ func (qp *QueryProjection) useGroupingOverDistinct(ctx *plancontext.PlanningCont
return true
}
+// addColumn adds a column to the QueryProjection if it is not already present.
+// It will use a column name that is available on the outside of the derived table
+func (qp *QueryProjection) addDerivedColumn(ctx *plancontext.PlanningContext, expr sqlparser.Expr) {
+ for _, selectExpr := range qp.SelectExprs {
+ getExpr, err := selectExpr.GetExpr()
+ if err != nil {
+ continue
+ }
+ if ctx.SemTable.EqualsExprWithDeps(getExpr, expr) {
+ return
+ }
+ }
+ qp.SelectExprs = append(qp.SelectExprs, SelectExpr{
+ Col: aeWrap(expr),
+ Aggr: containsAggr(expr),
+ })
+}
+
func checkForInvalidGroupingExpressions(expr sqlparser.Expr) {
_ = sqlparser.Walk(func(node sqlparser.SQLNode) (bool, error) {
if _, isAggregate := node.(sqlparser.AggrFunc); isAggregate {
diff --git a/go/vt/vtgate/planbuilder/operators/rewriters.go b/go/vt/vtgate/planbuilder/operators/rewriters.go
index 6a329860b4b..7ec8379dfab 100644
--- a/go/vt/vtgate/planbuilder/operators/rewriters.go
+++ b/go/vt/vtgate/planbuilder/operators/rewriters.go
@@ -218,6 +218,9 @@ func bottomUp(
childID = childID.Merge(resolveID(oldInputs[0]))
}
in, changed := bottomUp(operator, childID, resolveID, rewriter, shouldVisit, false)
+ if DebugOperatorTree && changed.Changed() {
+ fmt.Println(ToTree(in))
+ }
anythingChanged = anythingChanged.Merge(changed)
newInputs[i] = in
}
diff --git a/go/vt/vtgate/planbuilder/operators/sharded_routing.go b/go/vt/vtgate/planbuilder/operators/sharded_routing.go
index ef6117b1d8e..6818311c0dd 100644
--- a/go/vt/vtgate/planbuilder/operators/sharded_routing.go
+++ b/go/vt/vtgate/planbuilder/operators/sharded_routing.go
@@ -540,7 +540,7 @@ func (tr *ShardedRouting) planCompositeInOpArg(
Key: right.String(),
Index: idx,
}
- if typ, found := ctx.SemTable.TypeForExpr(col); found {
+ if typ, found := ctx.TypeForExpr(col); found {
value.Type = typ.Type()
value.Collation = typ.Collation()
}
@@ -654,7 +654,7 @@ func makeEvalEngineExpr(ctx *plancontext.PlanningContext, n sqlparser.Expr) eval
for _, expr := range ctx.SemTable.GetExprAndEqualities(n) {
ee, _ := evalengine.Translate(expr, &evalengine.Config{
Collation: ctx.SemTable.Collation,
- ResolveType: ctx.SemTable.TypeForExpr,
+ ResolveType: ctx.TypeForExpr,
Environment: ctx.VSchema.Environment(),
})
if ee != nil {
diff --git a/go/vt/vtgate/planbuilder/operators/subquery.go b/go/vt/vtgate/planbuilder/operators/subquery.go
index 537737363c8..3e39e5355ef 100644
--- a/go/vt/vtgate/planbuilder/operators/subquery.go
+++ b/go/vt/vtgate/planbuilder/operators/subquery.go
@@ -53,7 +53,8 @@ type SubQuery struct {
// We use this information to fail the planning if we are unable to merge the subquery with a route.
correlated bool
- IsProjection bool
+ // IsArgument is set to true if the subquery puts the
+ IsArgument bool
}
func (sq *SubQuery) planOffsets(ctx *plancontext.PlanningContext) Operator {
@@ -156,8 +157,8 @@ func (sq *SubQuery) SetInputs(inputs []Operator) {
func (sq *SubQuery) ShortDescription() string {
var typ string
- if sq.IsProjection {
- typ = "PROJ"
+ if sq.IsArgument {
+ typ = "ARGUMENT"
} else {
typ = "FILTER"
}
@@ -175,8 +176,11 @@ func (sq *SubQuery) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparse
return sq
}
-func (sq *SubQuery) AddColumn(ctx *plancontext.PlanningContext, reuseExisting bool, addToGroupBy bool, exprs *sqlparser.AliasedExpr) int {
- return sq.Outer.AddColumn(ctx, reuseExisting, addToGroupBy, exprs)
+func (sq *SubQuery) AddColumn(ctx *plancontext.PlanningContext, reuseExisting bool, addToGroupBy bool, ae *sqlparser.AliasedExpr) int {
+ ae = sqlparser.CloneRefOfAliasedExpr(ae)
+ // we need to rewrite the column name to an argument if it's the same as the subquery column name
+ ae.Expr = rewriteColNameToArgument(ctx, ae.Expr, []*SubQuery{sq}, sq)
+ return sq.Outer.AddColumn(ctx, reuseExisting, addToGroupBy, ae)
}
func (sq *SubQuery) FindCol(ctx *plancontext.PlanningContext, expr sqlparser.Expr, underRoute bool) int {
@@ -206,7 +210,7 @@ func (sq *SubQuery) settle(ctx *plancontext.PlanningContext, outer Operator) Ope
if sq.correlated && sq.FilterType != opcode.PulloutExists {
panic(correlatedSubqueryErr)
}
- if sq.IsProjection {
+ if sq.IsArgument {
if len(sq.GetMergePredicates()) > 0 {
// this means that we have a correlated subquery on our hands
panic(correlatedSubqueryErr)
@@ -220,11 +224,20 @@ func (sq *SubQuery) settle(ctx *plancontext.PlanningContext, outer Operator) Ope
var correlatedSubqueryErr = vterrors.VT12001("correlated subquery is only supported for EXISTS")
var subqueryNotAtTopErr = vterrors.VT12001("unmergable subquery can not be inside complex expression")
+func (sq *SubQuery) addLimit() {
+ // for a correlated subquery, we can add a limit 1 to the subquery
+ sq.Subquery = &Limit{
+ Source: sq.Subquery,
+ AST: &sqlparser.Limit{Rowcount: sqlparser.NewIntLiteral("1")},
+ }
+}
+
func (sq *SubQuery) settleFilter(ctx *plancontext.PlanningContext, outer Operator) Operator {
if len(sq.Predicates) > 0 {
if sq.FilterType != opcode.PulloutExists {
panic(correlatedSubqueryErr)
}
+ sq.addLimit()
return outer
}
@@ -252,8 +265,10 @@ func (sq *SubQuery) settleFilter(ctx *plancontext.PlanningContext, outer Operato
var predicates []sqlparser.Expr
switch sq.FilterType {
case opcode.PulloutExists:
+ sq.addLimit()
predicates = append(predicates, sqlparser.NewArgument(hasValuesArg()))
case opcode.PulloutNotExists:
+ sq.addLimit()
sq.FilterType = opcode.PulloutExists // it's the same pullout as EXISTS, just with a NOT in front of the predicate
predicates = append(predicates, sqlparser.NewNotExpr(sqlparser.NewArgument(hasValuesArg())))
case opcode.PulloutIn:
@@ -289,3 +304,18 @@ func (sq *SubQuery) mapExpr(f func(expr sqlparser.Expr) sqlparser.Expr) {
sq.Original = f(sq.Original)
sq.originalSubquery = f(sq.originalSubquery).(*sqlparser.Subquery)
}
+
+func (sq *SubQuery) rewriteColNameToArgument(expr sqlparser.Expr) sqlparser.Expr {
+ pre := func(cursor *sqlparser.Cursor) bool {
+ colName, ok := cursor.Node().(*sqlparser.ColName)
+ if !ok || colName.Qualifier.NonEmpty() || !colName.Name.EqualString(sq.ArgName) {
+ // we only want to rewrite the column name to an argument if it's the right column
+ return true
+ }
+
+ cursor.Replace(sqlparser.NewArgument(sq.ArgName))
+ return true
+ }
+
+ return sqlparser.Rewrite(expr, pre, nil).(sqlparser.Expr)
+}
diff --git a/go/vt/vtgate/planbuilder/operators/subquery_builder.go b/go/vt/vtgate/planbuilder/operators/subquery_builder.go
index 4caf3530075..42bbfdb7c99 100644
--- a/go/vt/vtgate/planbuilder/operators/subquery_builder.go
+++ b/go/vt/vtgate/planbuilder/operators/subquery_builder.go
@@ -159,7 +159,7 @@ func createSubquery(
parent sqlparser.Expr,
argName string,
filterType opcode.PulloutOpcode,
- isProjection bool,
+ isArg bool,
) *SubQuery {
topLevel := ctx.SemTable.EqualsExpr(original, parent)
original = cloneASTAndSemState(ctx, original)
@@ -181,7 +181,7 @@ func createSubquery(
Original: original,
ArgName: argName,
originalSubquery: originalSq,
- IsProjection: isProjection,
+ IsArgument: isArg,
TopLevel: topLevel,
JoinColumns: joinCols,
correlated: correlated,
diff --git a/go/vt/vtgate/planbuilder/operators/subquery_container.go b/go/vt/vtgate/planbuilder/operators/subquery_container.go
index e4feeab49d8..41b645ac7b4 100644
--- a/go/vt/vtgate/planbuilder/operators/subquery_container.go
+++ b/go/vt/vtgate/planbuilder/operators/subquery_container.go
@@ -43,7 +43,7 @@ func (sqc *SubQueryContainer) Clone(inputs []Operator) Operator {
if !ok {
panic("got bad input")
}
- result.Inner = append(result.Inner, inner)
+ result.addInner(inner)
}
return result
}
@@ -90,3 +90,13 @@ func (sqc *SubQueryContainer) GetColumns(ctx *plancontext.PlanningContext) []*sq
func (sqc *SubQueryContainer) GetSelectExprs(ctx *plancontext.PlanningContext) sqlparser.SelectExprs {
return sqc.Outer.GetSelectExprs(ctx)
}
+
+func (sqc *SubQueryContainer) addInner(inner *SubQuery) {
+ for _, sq := range sqc.Inner {
+ if sq.ArgName == inner.ArgName {
+ // we already have this subquery
+ return
+ }
+ }
+ sqc.Inner = append(sqc.Inner, inner)
+}
diff --git a/go/vt/vtgate/planbuilder/operators/subquery_planning.go b/go/vt/vtgate/planbuilder/operators/subquery_planning.go
index 960cde99acc..a85829bab6d 100644
--- a/go/vt/vtgate/planbuilder/operators/subquery_planning.go
+++ b/go/vt/vtgate/planbuilder/operators/subquery_planning.go
@@ -100,8 +100,11 @@ func settleSubqueries(ctx *plancontext.PlanningContext, op Operator) Operator {
newExpr, rewritten := rewriteMergedSubqueryExpr(ctx, aggr.SubQueryExpression, aggr.Original.Expr)
if rewritten {
aggr.Original.Expr = newExpr
+ op.Columns[aggr.ColOffset].Expr = newExpr
}
}
+ case *Ordering:
+ op.settleOrderingExpressions(ctx)
}
return op, NoRewrite
}
@@ -109,6 +112,29 @@ func settleSubqueries(ctx *plancontext.PlanningContext, op Operator) Operator {
return BottomUp(op, TableID, visit, nil)
}
+func (o *Ordering) settleOrderingExpressions(ctx *plancontext.PlanningContext) {
+ for idx, order := range o.Order {
+ for _, sq := range ctx.MergedSubqueries {
+ arg := ctx.GetReservedArgumentFor(sq)
+ expr := sqlparser.Rewrite(order.SimplifiedExpr, nil, func(cursor *sqlparser.Cursor) bool {
+ switch expr := cursor.Node().(type) {
+ case *sqlparser.ColName:
+ if expr.Name.String() == arg {
+ cursor.Replace(sq)
+ }
+ case *sqlparser.Argument:
+ if expr.Name == arg {
+ cursor.Replace(sq)
+ }
+ }
+
+ return true
+ })
+ o.Order[idx].SimplifiedExpr = expr.(sqlparser.Expr)
+ }
+ }
+}
+
func mergeSubqueryExpr(ctx *plancontext.PlanningContext, pe *ProjExpr) {
se, ok := pe.Info.(SubQueryExpression)
if !ok {
@@ -319,7 +345,7 @@ func addSubQuery(in Operator, inner *SubQuery) Operator {
}
}
- sql.Inner = append(sql.Inner, inner)
+ sql.addInner(inner)
return sql
}
@@ -467,6 +493,7 @@ func tryMergeSubqueryWithOuter(ctx *plancontext.PlanningContext, subQuery *SubQu
return outer, NoRewrite
}
exprs := subQuery.GetMergePredicates()
+ sqlparser.RemoveKeyspace(subQuery.Original)
merger := &subqueryRouteMerger{
outer: outer,
original: subQuery.Original,
@@ -476,7 +503,7 @@ func tryMergeSubqueryWithOuter(ctx *plancontext.PlanningContext, subQuery *SubQu
if op == nil {
return outer, NoRewrite
}
- if !subQuery.IsProjection {
+ if !subQuery.IsArgument {
op.Source = newFilter(outer.Source, subQuery.Original)
}
ctx.MergedSubqueries = append(ctx.MergedSubqueries, subQuery.originalSubquery)
@@ -581,7 +608,7 @@ func (s *subqueryRouteMerger) merge(ctx *plancontext.PlanningContext, inner, out
var src Operator
if isSharded {
src = s.outer.Source
- if !s.subq.IsProjection {
+ if !s.subq.IsArgument {
src = newFilter(s.outer.Source, s.original)
}
} else {
@@ -642,7 +669,7 @@ func (s *subqueryRouteMerger) rewriteASTExpression(ctx *plancontext.PlanningCont
panic(err)
}
- if s.subq.IsProjection {
+ if s.subq.IsArgument {
ctx.SemTable.CopySemanticInfo(s.subq.originalSubquery.Select, subqStmt)
s.subq.originalSubquery.Select = subqStmt
} else {
diff --git a/go/vt/vtgate/planbuilder/operators/union_merging.go b/go/vt/vtgate/planbuilder/operators/union_merging.go
index 1fb7d4fb454..c2fd79cd026 100644
--- a/go/vt/vtgate/planbuilder/operators/union_merging.go
+++ b/go/vt/vtgate/planbuilder/operators/union_merging.go
@@ -202,8 +202,8 @@ func createMergedUnion(
continue
}
deps = deps.Merge(ctx.SemTable.RecursiveDeps(rae.Expr))
- rt, foundR := ctx.SemTable.TypeForExpr(rae.Expr)
- lt, foundL := ctx.SemTable.TypeForExpr(lae.Expr)
+ rt, foundR := ctx.TypeForExpr(rae.Expr)
+ lt, foundL := ctx.TypeForExpr(lae.Expr)
if foundR && foundL {
types := []sqltypes.Type{rt.Type(), lt.Type()}
t := evalengine.AggregateTypes(types)
diff --git a/go/vt/vtgate/planbuilder/plan_test.go b/go/vt/vtgate/planbuilder/plan_test.go
index 8d339eb52ee..62dd567b0d8 100644
--- a/go/vt/vtgate/planbuilder/plan_test.go
+++ b/go/vt/vtgate/planbuilder/plan_test.go
@@ -30,6 +30,7 @@ import (
"github.com/nsf/jsondiff"
"github.com/stretchr/testify/require"
+ "github.com/stretchr/testify/suite"
"vitess.io/vitess/go/sqltypes"
"vitess.io/vitess/go/test/utils"
@@ -47,23 +48,40 @@ import (
"vitess.io/vitess/go/vt/vtgate/vindexes"
)
-func makeTestOutput(t *testing.T) string {
- testOutputTempDir := utils.MakeTestOutput(t, "testdata", "plan_test")
+var expectedDir = "testdata/expected"
- return testOutputTempDir
+func getTestExpectationDir() string {
+ return filepath.Clean(expectedDir)
}
-func TestPlan(t *testing.T) {
- defer utils.EnsureNoLeaks(t)
+type planTestSuite struct {
+ suite.Suite
+ outputDir string
+}
+
+func (s *planTestSuite) SetupSuite() {
+ dir := getTestExpectationDir()
+ err := os.RemoveAll(dir)
+ require.NoError(s.T(), err)
+ err = os.Mkdir(dir, 0755)
+ require.NoError(s.T(), err)
+ s.outputDir = dir
+}
+
+func TestPlanTestSuite(t *testing.T) {
+ suite.Run(t, new(planTestSuite))
+}
+
+func (s *planTestSuite) TestPlan() {
+ defer utils.EnsureNoLeaks(s.T())
vschemaWrapper := &vschemawrapper.VSchemaWrapper{
- V: loadSchema(t, "vschemas/schema.json", true),
+ V: loadSchema(s.T(), "vschemas/schema.json", true),
TabletType_: topodatapb.TabletType_PRIMARY,
SysVarEnabled: true,
TestBuilder: TestBuilder,
Env: vtenv.NewTestEnv(),
}
- testOutputTempDir := makeTestOutput(t)
- addPKs(t, vschemaWrapper.V, "user", []string{"user", "music"})
+ s.addPKs(vschemaWrapper.V, "user", []string{"user", "music"})
// You will notice that some tests expect user.Id instead of user.id.
// This is because we now pre-create vindex columns in the symbol
@@ -71,55 +89,53 @@ func TestPlan(t *testing.T) {
// the column is named as Id. This is to make sure that
// column names are case-preserved, but treated as
// case-insensitive even if they come from the vschema.
- testFile(t, "aggr_cases.json", testOutputTempDir, vschemaWrapper, false)
- testFile(t, "dml_cases.json", testOutputTempDir, vschemaWrapper, false)
- testFile(t, "from_cases.json", testOutputTempDir, vschemaWrapper, false)
- testFile(t, "filter_cases.json", testOutputTempDir, vschemaWrapper, false)
- testFile(t, "postprocess_cases.json", testOutputTempDir, vschemaWrapper, false)
- testFile(t, "select_cases.json", testOutputTempDir, vschemaWrapper, false)
- testFile(t, "symtab_cases.json", testOutputTempDir, vschemaWrapper, false)
- testFile(t, "unsupported_cases.json", testOutputTempDir, vschemaWrapper, false)
- testFile(t, "unknown_schema_cases.json", testOutputTempDir, vschemaWrapper, false)
- testFile(t, "vindex_func_cases.json", testOutputTempDir, vschemaWrapper, false)
- testFile(t, "wireup_cases.json", testOutputTempDir, vschemaWrapper, false)
- testFile(t, "memory_sort_cases.json", testOutputTempDir, vschemaWrapper, false)
- testFile(t, "use_cases.json", testOutputTempDir, vschemaWrapper, false)
- testFile(t, "set_cases.json", testOutputTempDir, vschemaWrapper, false)
- testFile(t, "union_cases.json", testOutputTempDir, vschemaWrapper, false)
- testFile(t, "large_union_cases.json", testOutputTempDir, vschemaWrapper, false)
- testFile(t, "transaction_cases.json", testOutputTempDir, vschemaWrapper, false)
- testFile(t, "lock_cases.json", testOutputTempDir, vschemaWrapper, false)
- testFile(t, "large_cases.json", testOutputTempDir, vschemaWrapper, false)
- testFile(t, "ddl_cases_no_default_keyspace.json", testOutputTempDir, vschemaWrapper, false)
- testFile(t, "flush_cases_no_default_keyspace.json", testOutputTempDir, vschemaWrapper, false)
- testFile(t, "show_cases_no_default_keyspace.json", testOutputTempDir, vschemaWrapper, false)
- testFile(t, "stream_cases.json", testOutputTempDir, vschemaWrapper, false)
- testFile(t, "info_schema80_cases.json", testOutputTempDir, vschemaWrapper, false)
- testFile(t, "reference_cases.json", testOutputTempDir, vschemaWrapper, false)
- testFile(t, "vexplain_cases.json", testOutputTempDir, vschemaWrapper, false)
- testFile(t, "misc_cases.json", testOutputTempDir, vschemaWrapper, false)
- testFile(t, "cte_cases.json", testOutputTempDir, vschemaWrapper, false)
+ s.testFile("aggr_cases.json", vschemaWrapper, false)
+ s.testFile("dml_cases.json", vschemaWrapper, false)
+ s.testFile("from_cases.json", vschemaWrapper, false)
+ s.testFile("filter_cases.json", vschemaWrapper, false)
+ s.testFile("postprocess_cases.json", vschemaWrapper, false)
+ s.testFile("select_cases.json", vschemaWrapper, false)
+ s.testFile("symtab_cases.json", vschemaWrapper, false)
+ s.testFile("unsupported_cases.json", vschemaWrapper, false)
+ s.testFile("unknown_schema_cases.json", vschemaWrapper, false)
+ s.testFile("vindex_func_cases.json", vschemaWrapper, false)
+ s.testFile("wireup_cases.json", vschemaWrapper, false)
+ s.testFile("memory_sort_cases.json", vschemaWrapper, false)
+ s.testFile("use_cases.json", vschemaWrapper, false)
+ s.testFile("set_cases.json", vschemaWrapper, false)
+ s.testFile("union_cases.json", vschemaWrapper, false)
+ s.testFile("large_union_cases.json", vschemaWrapper, false)
+ s.testFile("transaction_cases.json", vschemaWrapper, false)
+ s.testFile("lock_cases.json", vschemaWrapper, false)
+ s.testFile("large_cases.json", vschemaWrapper, false)
+ s.testFile("ddl_cases_no_default_keyspace.json", vschemaWrapper, false)
+ s.testFile("flush_cases_no_default_keyspace.json", vschemaWrapper, false)
+ s.testFile("show_cases_no_default_keyspace.json", vschemaWrapper, false)
+ s.testFile("stream_cases.json", vschemaWrapper, false)
+ s.testFile("info_schema80_cases.json", vschemaWrapper, false)
+ s.testFile("reference_cases.json", vschemaWrapper, false)
+ s.testFile("vexplain_cases.json", vschemaWrapper, false)
+ s.testFile("misc_cases.json", vschemaWrapper, false)
+ s.testFile("cte_cases.json", vschemaWrapper, false)
}
// TestForeignKeyPlanning tests the planning of foreign keys in a managed mode by Vitess.
-func TestForeignKeyPlanning(t *testing.T) {
- vschema := loadSchema(t, "vschemas/schema.json", true)
- setFks(t, vschema)
+func (s *planTestSuite) TestForeignKeyPlanning() {
+ vschema := loadSchema(s.T(), "vschemas/schema.json", true)
+ s.setFks(vschema)
vschemaWrapper := &vschemawrapper.VSchemaWrapper{
V: vschema,
TestBuilder: TestBuilder,
Env: vtenv.NewTestEnv(),
}
- testOutputTempDir := makeTestOutput(t)
-
- testFile(t, "foreignkey_cases.json", testOutputTempDir, vschemaWrapper, false)
+ s.testFile("foreignkey_cases.json", vschemaWrapper, false)
}
// TestForeignKeyChecksOn tests the planning when the session variable for foreign_key_checks is set to ON.
-func TestForeignKeyChecksOn(t *testing.T) {
- vschema := loadSchema(t, "vschemas/schema.json", true)
- setFks(t, vschema)
+func (s *planTestSuite) TestForeignKeyChecksOn() {
+ vschema := loadSchema(s.T(), "vschemas/schema.json", true)
+ s.setFks(vschema)
fkChecksState := true
vschemaWrapper := &vschemawrapper.VSchemaWrapper{
V: vschema,
@@ -128,15 +144,13 @@ func TestForeignKeyChecksOn(t *testing.T) {
Env: vtenv.NewTestEnv(),
}
- testOutputTempDir := makeTestOutput(t)
-
- testFile(t, "foreignkey_checks_on_cases.json", testOutputTempDir, vschemaWrapper, false)
+ s.testFile("foreignkey_checks_on_cases.json", vschemaWrapper, false)
}
// TestForeignKeyChecksOff tests the planning when the session variable for foreign_key_checks is set to OFF.
-func TestForeignKeyChecksOff(t *testing.T) {
- vschema := loadSchema(t, "vschemas/schema.json", true)
- setFks(t, vschema)
+func (s *planTestSuite) TestForeignKeyChecksOff() {
+ vschema := loadSchema(s.T(), "vschemas/schema.json", true)
+ s.setFks(vschema)
fkChecksState := false
vschemaWrapper := &vschemawrapper.VSchemaWrapper{
V: vschema,
@@ -145,12 +159,10 @@ func TestForeignKeyChecksOff(t *testing.T) {
Env: vtenv.NewTestEnv(),
}
- testOutputTempDir := makeTestOutput(t)
-
- testFile(t, "foreignkey_checks_off_cases.json", testOutputTempDir, vschemaWrapper, false)
+ s.testFile("foreignkey_checks_off_cases.json", vschemaWrapper, false)
}
-func setFks(t *testing.T, vschema *vindexes.VSchema) {
+func (s *planTestSuite) setFks(vschema *vindexes.VSchema) {
if vschema.Keyspaces["sharded_fk_allow"] != nil {
// FK from multicol_tbl2 referencing multicol_tbl1 that is shard scoped.
_ = vschema.AddForeignKey("sharded_fk_allow", "multicol_tbl2", createFkDefinition([]string{"colb", "cola", "x", "colc", "y"}, "multicol_tbl1", []string{"colb", "cola", "y", "colc", "x"}, sqlparser.Cascade, sqlparser.Cascade))
@@ -185,7 +197,7 @@ func setFks(t *testing.T, vschema *vindexes.VSchema) {
// FK from tbl_auth referencing tbl20 that is shard scoped of CASCADE types.
_ = vschema.AddForeignKey("sharded_fk_allow", "tbl_auth", createFkDefinition([]string{"id"}, "tbl20", []string{"col2"}, sqlparser.Cascade, sqlparser.Cascade))
- addPKs(t, vschema, "sharded_fk_allow", []string{"tbl1", "tbl2", "tbl3", "tbl4", "tbl5", "tbl6", "tbl7", "tbl9", "tbl10",
+ s.addPKs(vschema, "sharded_fk_allow", []string{"tbl1", "tbl2", "tbl3", "tbl4", "tbl5", "tbl6", "tbl7", "tbl9", "tbl10",
"multicol_tbl1", "multicol_tbl2", "tbl_auth", "tblrefDef", "tbl20"})
}
if vschema.Keyspaces["unsharded_fk_allow"] != nil {
@@ -225,86 +237,85 @@ func setFks(t *testing.T, vschema *vindexes.VSchema) {
_ = vschema.AddUniqueKey("unsharded_fk_allow", "u_tbl9", sqlparser.Exprs{sqlparser.NewColName("bar"), sqlparser.NewColName("col9")})
_ = vschema.AddUniqueKey("unsharded_fk_allow", "u_tbl8", sqlparser.Exprs{sqlparser.NewColName("col8")})
- addPKs(t, vschema, "unsharded_fk_allow", []string{"u_tbl1", "u_tbl2", "u_tbl3", "u_tbl4", "u_tbl5", "u_tbl6", "u_tbl7", "u_tbl8", "u_tbl9", "u_tbl10", "u_tbl11",
+ s.addPKs(vschema, "unsharded_fk_allow", []string{"u_tbl1", "u_tbl2", "u_tbl3", "u_tbl4", "u_tbl5", "u_tbl6", "u_tbl7", "u_tbl8", "u_tbl9", "u_tbl10", "u_tbl11",
"u_multicol_tbl1", "u_multicol_tbl2", "u_multicol_tbl3"})
}
}
-func addPKs(t *testing.T, vschema *vindexes.VSchema, ks string, tbls []string) {
+func (s *planTestSuite) addPKs(vschema *vindexes.VSchema, ks string, tbls []string) {
for _, tbl := range tbls {
- require.NoError(t,
+ require.NoError(s.T(),
vschema.AddPrimaryKey(ks, tbl, []string{"id"}))
}
}
-func TestSystemTables57(t *testing.T) {
+func (s *planTestSuite) TestSystemTables57() {
// first we move everything to use 5.7 logic
env, err := vtenv.New(vtenv.Options{
MySQLServerVersion: "5.7.9",
})
- require.NoError(t, err)
+ require.NoError(s.T(), err)
vschemaWrapper := &vschemawrapper.VSchemaWrapper{
- V: loadSchema(t, "vschemas/schema.json", true),
+ V: loadSchema(s.T(), "vschemas/schema.json", true),
Env: env,
}
- testOutputTempDir := makeTestOutput(t)
- testFile(t, "info_schema57_cases.json", testOutputTempDir, vschemaWrapper, false)
+ s.testFile("info_schema57_cases.json", vschemaWrapper, false)
}
-func TestSysVarSetDisabled(t *testing.T) {
+func (s *planTestSuite) TestSysVarSetDisabled() {
vschemaWrapper := &vschemawrapper.VSchemaWrapper{
- V: loadSchema(t, "vschemas/schema.json", true),
+ V: loadSchema(s.T(), "vschemas/schema.json", true),
SysVarEnabled: false,
Env: vtenv.NewTestEnv(),
}
- testFile(t, "set_sysvar_disabled_cases.json", makeTestOutput(t), vschemaWrapper, false)
+ s.testFile("set_sysvar_disabled_cases.json", vschemaWrapper, false)
}
-func TestViews(t *testing.T) {
+func (s *planTestSuite) TestViews() {
vschemaWrapper := &vschemawrapper.VSchemaWrapper{
- V: loadSchema(t, "vschemas/schema.json", true),
+ V: loadSchema(s.T(), "vschemas/schema.json", true),
EnableViews: true,
Env: vtenv.NewTestEnv(),
}
- testFile(t, "view_cases.json", makeTestOutput(t), vschemaWrapper, false)
+ s.testFile("view_cases.json", vschemaWrapper, false)
}
-func TestOne(t *testing.T) {
+func (s *planTestSuite) TestOne() {
reset := operators.EnableDebugPrinting()
defer reset()
- lv := loadSchema(t, "vschemas/schema.json", true)
- setFks(t, lv)
- addPKs(t, lv, "user", []string{"user", "music"})
+ lv := loadSchema(s.T(), "vschemas/schema.json", true)
+ s.setFks(lv)
+ s.addPKs(lv, "user", []string{"user", "music"})
vschema := &vschemawrapper.VSchemaWrapper{
V: lv,
TestBuilder: TestBuilder,
Env: vtenv.NewTestEnv(),
}
- testFile(t, "onecase.json", "", vschema, false)
+ s.testFile("onecase.json", vschema, false)
}
-func TestOneTPCC(t *testing.T) {
+func (s *planTestSuite) TestOneTPCC() {
reset := operators.EnableDebugPrinting()
defer reset()
vschema := &vschemawrapper.VSchemaWrapper{
- V: loadSchema(t, "vschemas/tpcc_schema.json", true),
+ V: loadSchema(s.T(), "vschemas/tpcc_schema.json", true),
Env: vtenv.NewTestEnv(),
}
- testFile(t, "onecase.json", "", vschema, false)
+ s.testFile("onecase.json", vschema, false)
}
-func TestOneWithMainAsDefault(t *testing.T) {
+func (s *planTestSuite) TestOneWithMainAsDefault() {
reset := operators.EnableDebugPrinting()
defer reset()
vschema := &vschemawrapper.VSchemaWrapper{
- V: loadSchema(t, "vschemas/schema.json", true),
+ V: loadSchema(s.T(), "vschemas/schema.json", true),
Keyspace: &vindexes.Keyspace{
Name: "main",
Sharded: false,
@@ -312,14 +323,14 @@ func TestOneWithMainAsDefault(t *testing.T) {
Env: vtenv.NewTestEnv(),
}
- testFile(t, "onecase.json", "", vschema, false)
+ s.testFile("onecase.json", vschema, false)
}
-func TestOneWithSecondUserAsDefault(t *testing.T) {
+func (s *planTestSuite) TestOneWithSecondUserAsDefault() {
reset := operators.EnableDebugPrinting()
defer reset()
vschema := &vschemawrapper.VSchemaWrapper{
- V: loadSchema(t, "vschemas/schema.json", true),
+ V: loadSchema(s.T(), "vschemas/schema.json", true),
Keyspace: &vindexes.Keyspace{
Name: "second_user",
Sharded: true,
@@ -327,14 +338,14 @@ func TestOneWithSecondUserAsDefault(t *testing.T) {
Env: vtenv.NewTestEnv(),
}
- testFile(t, "onecase.json", "", vschema, false)
+ s.testFile("onecase.json", vschema, false)
}
-func TestOneWithUserAsDefault(t *testing.T) {
+func (s *planTestSuite) TestOneWithUserAsDefault() {
reset := operators.EnableDebugPrinting()
defer reset()
vschema := &vschemawrapper.VSchemaWrapper{
- V: loadSchema(t, "vschemas/schema.json", true),
+ V: loadSchema(s.T(), "vschemas/schema.json", true),
Keyspace: &vindexes.Keyspace{
Name: "user",
Sharded: true,
@@ -342,75 +353,75 @@ func TestOneWithUserAsDefault(t *testing.T) {
Env: vtenv.NewTestEnv(),
}
- testFile(t, "onecase.json", "", vschema, false)
+ s.testFile("onecase.json", vschema, false)
}
-func TestOneWithTPCHVSchema(t *testing.T) {
+func (s *planTestSuite) TestOneWithTPCHVSchema() {
reset := operators.EnableDebugPrinting()
defer reset()
vschema := &vschemawrapper.VSchemaWrapper{
- V: loadSchema(t, "vschemas/tpch_schema.json", true),
+ V: loadSchema(s.T(), "vschemas/tpch_schema.json", true),
SysVarEnabled: true,
Env: vtenv.NewTestEnv(),
}
- testFile(t, "onecase.json", "", vschema, false)
+ s.testFile("onecase.json", vschema, false)
}
-func TestOneWith57Version(t *testing.T) {
+func (s *planTestSuite) TestOneWith57Version() {
reset := operators.EnableDebugPrinting()
defer reset()
// first we move everything to use 5.7 logic
env, err := vtenv.New(vtenv.Options{
MySQLServerVersion: "5.7.9",
})
- require.NoError(t, err)
+ require.NoError(s.T(), err)
vschema := &vschemawrapper.VSchemaWrapper{
- V: loadSchema(t, "vschemas/schema.json", true),
+ V: loadSchema(s.T(), "vschemas/schema.json", true),
Env: env,
}
- testFile(t, "onecase.json", "", vschema, false)
+ s.testFile("onecase.json", vschema, false)
}
-func TestRubyOnRailsQueries(t *testing.T) {
+func (s *planTestSuite) TestRubyOnRailsQueries() {
vschemaWrapper := &vschemawrapper.VSchemaWrapper{
- V: loadSchema(t, "vschemas/rails_schema.json", true),
+ V: loadSchema(s.T(), "vschemas/rails_schema.json", true),
SysVarEnabled: true,
Env: vtenv.NewTestEnv(),
}
- testFile(t, "rails_cases.json", makeTestOutput(t), vschemaWrapper, false)
+ s.testFile("rails_cases.json", vschemaWrapper, false)
}
-func TestOLTP(t *testing.T) {
+func (s *planTestSuite) TestOLTP() {
vschemaWrapper := &vschemawrapper.VSchemaWrapper{
- V: loadSchema(t, "vschemas/oltp_schema.json", true),
+ V: loadSchema(s.T(), "vschemas/oltp_schema.json", true),
SysVarEnabled: true,
Env: vtenv.NewTestEnv(),
}
- testFile(t, "oltp_cases.json", makeTestOutput(t), vschemaWrapper, false)
+ s.testFile("oltp_cases.json", vschemaWrapper, false)
}
-func TestTPCC(t *testing.T) {
+func (s *planTestSuite) TestTPCC() {
vschemaWrapper := &vschemawrapper.VSchemaWrapper{
- V: loadSchema(t, "vschemas/tpcc_schema.json", true),
+ V: loadSchema(s.T(), "vschemas/tpcc_schema.json", true),
SysVarEnabled: true,
Env: vtenv.NewTestEnv(),
}
- testFile(t, "tpcc_cases.json", makeTestOutput(t), vschemaWrapper, false)
+ s.testFile("tpcc_cases.json", vschemaWrapper, false)
}
-func TestTPCH(t *testing.T) {
+func (s *planTestSuite) TestTPCH() {
vschemaWrapper := &vschemawrapper.VSchemaWrapper{
- V: loadSchema(t, "vschemas/tpch_schema.json", true),
+ V: loadSchema(s.T(), "vschemas/tpch_schema.json", true),
SysVarEnabled: true,
Env: vtenv.NewTestEnv(),
}
- testFile(t, "tpch_cases.json", makeTestOutput(t), vschemaWrapper, false)
+ s.testFile("tpch_cases.json", vschemaWrapper, false)
}
func BenchmarkOLTP(b *testing.B) {
@@ -441,9 +452,9 @@ func benchmarkWorkload(b *testing.B, name string) {
}
}
-func TestBypassPlanningShardTargetFromFile(t *testing.T) {
+func (s *planTestSuite) TestBypassPlanningShardTargetFromFile() {
vschema := &vschemawrapper.VSchemaWrapper{
- V: loadSchema(t, "vschemas/schema.json", true),
+ V: loadSchema(s.T(), "vschemas/schema.json", true),
Keyspace: &vindexes.Keyspace{
Name: "main",
Sharded: false,
@@ -453,14 +464,14 @@ func TestBypassPlanningShardTargetFromFile(t *testing.T) {
Env: vtenv.NewTestEnv(),
}
- testFile(t, "bypass_shard_cases.json", makeTestOutput(t), vschema, false)
+ s.testFile("bypass_shard_cases.json", vschema, false)
}
-func TestBypassPlanningKeyrangeTargetFromFile(t *testing.T) {
+func (s *planTestSuite) TestBypassPlanningKeyrangeTargetFromFile() {
keyRange, _ := key.ParseShardingSpec("-")
vschema := &vschemawrapper.VSchemaWrapper{
- V: loadSchema(t, "vschemas/schema.json", true),
+ V: loadSchema(s.T(), "vschemas/schema.json", true),
Keyspace: &vindexes.Keyspace{
Name: "main",
Sharded: false,
@@ -470,15 +481,15 @@ func TestBypassPlanningKeyrangeTargetFromFile(t *testing.T) {
Env: vtenv.NewTestEnv(),
}
- testFile(t, "bypass_keyrange_cases.json", makeTestOutput(t), vschema, false)
+ s.testFile("bypass_keyrange_cases.json", vschema, false)
}
-func TestWithDefaultKeyspaceFromFile(t *testing.T) {
+func (s *planTestSuite) TestWithDefaultKeyspaceFromFile() {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// We are testing this separately so we can set a default keyspace
vschema := &vschemawrapper.VSchemaWrapper{
- V: loadSchema(t, "vschemas/schema.json", true),
+ V: loadSchema(s.T(), "vschemas/schema.json", true),
Keyspace: &vindexes.Keyspace{
Name: "main",
Sharded: false,
@@ -498,21 +509,20 @@ func TestWithDefaultKeyspaceFromFile(t *testing.T) {
}
return ki.SidecarDbName, nil
})
- require.True(t, created)
+ require.True(s.T(), created)
- testOutputTempDir := makeTestOutput(t)
- testFile(t, "alterVschema_cases.json", testOutputTempDir, vschema, false)
- testFile(t, "ddl_cases.json", testOutputTempDir, vschema, false)
- testFile(t, "migration_cases.json", testOutputTempDir, vschema, false)
- testFile(t, "flush_cases.json", testOutputTempDir, vschema, false)
- testFile(t, "show_cases.json", testOutputTempDir, vschema, false)
- testFile(t, "call_cases.json", testOutputTempDir, vschema, false)
+ s.testFile("alterVschema_cases.json", vschema, false)
+ s.testFile("ddl_cases.json", vschema, false)
+ s.testFile("migration_cases.json", vschema, false)
+ s.testFile("flush_cases.json", vschema, false)
+ s.testFile("show_cases.json", vschema, false)
+ s.testFile("call_cases.json", vschema, false)
}
-func TestWithDefaultKeyspaceFromFileSharded(t *testing.T) {
+func (s *planTestSuite) TestWithDefaultKeyspaceFromFileSharded() {
// We are testing this separately so we can set a default keyspace
vschema := &vschemawrapper.VSchemaWrapper{
- V: loadSchema(t, "vschemas/schema.json", true),
+ V: loadSchema(s.T(), "vschemas/schema.json", true),
Keyspace: &vindexes.Keyspace{
Name: "second_user",
Sharded: true,
@@ -521,14 +531,13 @@ func TestWithDefaultKeyspaceFromFileSharded(t *testing.T) {
Env: vtenv.NewTestEnv(),
}
- testOutputTempDir := makeTestOutput(t)
- testFile(t, "select_cases_with_default.json", testOutputTempDir, vschema, false)
+ s.testFile("select_cases_with_default.json", vschema, false)
}
-func TestWithUserDefaultKeyspaceFromFileSharded(t *testing.T) {
+func (s *planTestSuite) TestWithUserDefaultKeyspaceFromFileSharded() {
// We are testing this separately so we can set a default keyspace
vschema := &vschemawrapper.VSchemaWrapper{
- V: loadSchema(t, "vschemas/schema.json", true),
+ V: loadSchema(s.T(), "vschemas/schema.json", true),
Keyspace: &vindexes.Keyspace{
Name: "user",
Sharded: true,
@@ -537,26 +546,25 @@ func TestWithUserDefaultKeyspaceFromFileSharded(t *testing.T) {
Env: vtenv.NewTestEnv(),
}
- testOutputTempDir := makeTestOutput(t)
- testFile(t, "select_cases_with_user_as_default.json", testOutputTempDir, vschema, false)
+ s.testFile("select_cases_with_user_as_default.json", vschema, false)
}
-func TestWithSystemSchemaAsDefaultKeyspace(t *testing.T) {
+func (s *planTestSuite) TestWithSystemSchemaAsDefaultKeyspace() {
// We are testing this separately so we can set a default keyspace
vschema := &vschemawrapper.VSchemaWrapper{
- V: loadSchema(t, "vschemas/schema.json", true),
+ V: loadSchema(s.T(), "vschemas/schema.json", true),
Keyspace: &vindexes.Keyspace{Name: "information_schema"},
TabletType_: topodatapb.TabletType_PRIMARY,
Env: vtenv.NewTestEnv(),
}
- testFile(t, "sysschema_default.json", makeTestOutput(t), vschema, false)
+ s.testFile("sysschema_default.json", vschema, false)
}
-func TestOtherPlanningFromFile(t *testing.T) {
+func (s *planTestSuite) TestOtherPlanningFromFile() {
// We are testing this separately so we can set a default keyspace
vschema := &vschemawrapper.VSchemaWrapper{
- V: loadSchema(t, "vschemas/schema.json", true),
+ V: loadSchema(s.T(), "vschemas/schema.json", true),
Keyspace: &vindexes.Keyspace{
Name: "main",
Sharded: false,
@@ -565,9 +573,8 @@ func TestOtherPlanningFromFile(t *testing.T) {
Env: vtenv.NewTestEnv(),
}
- testOutputTempDir := makeTestOutput(t)
- testFile(t, "other_read_cases.json", testOutputTempDir, vschema, false)
- testFile(t, "other_admin_cases.json", testOutputTempDir, vschema, false)
+ s.testFile("other_read_cases.json", vschema, false)
+ s.testFile("other_admin_cases.json", vschema, false)
}
func loadSchema(t testing.TB, filename string, setCollation bool) *vindexes.VSchema {
@@ -630,10 +637,11 @@ type (
}
)
-func testFile(t *testing.T, filename, tempDir string, vschema *vschemawrapper.VSchemaWrapper, render bool) {
+func (s *planTestSuite) testFile(filename string, vschema *vschemawrapper.VSchemaWrapper, render bool) {
opts := jsondiff.DefaultConsoleOptions()
- t.Run(filename, func(t *testing.T) {
+ s.T().Run(filename, func(t *testing.T) {
+ failed := false
var expected []planTest
for _, tcase := range readJSONTests(filename) {
testName := tcase.Comment
@@ -655,6 +663,11 @@ func testFile(t *testing.T, filename, tempDir string, vschema *vschemawrapper.VS
// - produces a different plan than expected
// - fails to produce a plan
t.Run(testName, func(t *testing.T) {
+ defer func() {
+ if t.Failed() {
+ failed = true
+ }
+ }()
compare, s := jsondiff.Compare(tcase.Plan, []byte(out), &opts)
if compare != jsondiff.FullMatch {
message := fmt.Sprintf("%s\nDiff:\n%s\n[%s] \n[%s]", filename, s, tcase.Plan, out)
@@ -670,9 +683,9 @@ func testFile(t *testing.T, filename, tempDir string, vschema *vschemawrapper.VS
})
expected = append(expected, current)
}
- if tempDir != "" {
+ if s.outputDir != "" && failed {
name := strings.TrimSuffix(filename, filepath.Ext(filename))
- name = filepath.Join(tempDir, name+".json")
+ name = filepath.Join(s.outputDir, name+".json")
file, err := os.Create(name)
require.NoError(t, err)
enc := json.NewEncoder(file)
diff --git a/go/vt/vtgate/planbuilder/plancontext/planning_context.go b/go/vt/vtgate/planbuilder/plancontext/planning_context.go
index 49039ddd347..90a6bdac6f8 100644
--- a/go/vt/vtgate/planbuilder/plancontext/planning_context.go
+++ b/go/vt/vtgate/planbuilder/plancontext/planning_context.go
@@ -20,6 +20,7 @@ import (
querypb "vitess.io/vitess/go/vt/proto/query"
"vitess.io/vitess/go/vt/sqlparser"
"vitess.io/vitess/go/vt/vterrors"
+ "vitess.io/vitess/go/vt/vtgate/evalengine"
"vitess.io/vitess/go/vt/vtgate/semantics"
)
@@ -57,6 +58,10 @@ type PlanningContext struct {
// Statement contains the originally parsed statement
Statement sqlparser.Statement
+
+ // OuterTables contains the tables that are outer to the current query
+ // Used to set the nullable flag on the columns
+ OuterTables semantics.TableSet
}
// CreatePlanningContext initializes a new PlanningContext with the given parameters.
@@ -188,3 +193,32 @@ func (ctx *PlanningContext) execOnJoinPredicateEqual(joinPred sqlparser.Expr, fn
}
return false
}
+
+func (ctx *PlanningContext) RewriteDerivedTableExpression(expr sqlparser.Expr, tableInfo semantics.TableInfo) sqlparser.Expr {
+ modifiedExpr := semantics.RewriteDerivedTableExpression(expr, tableInfo)
+ for key, exprs := range ctx.joinPredicates {
+ for _, rhsExpr := range exprs {
+ if ctx.SemTable.EqualsExpr(expr, rhsExpr) {
+ ctx.joinPredicates[key] = append(ctx.joinPredicates[key], modifiedExpr)
+ return modifiedExpr
+ }
+ }
+ }
+ return modifiedExpr
+}
+
+// TypeForExpr returns the type of the given expression, with nullable set if the expression is from an outer table.
+func (ctx *PlanningContext) TypeForExpr(e sqlparser.Expr) (evalengine.Type, bool) {
+ t, found := ctx.SemTable.TypeForExpr(e)
+ if !found {
+ return t, found
+ }
+ deps := ctx.SemTable.RecursiveDeps(e)
+ // If the expression is from an outer table, it should be nullable
+ // There are some exceptions to this, where an expression depending on the outer side
+ // will never return NULL, but it's better to be conservative here.
+ if deps.IsOverlapping(ctx.OuterTables) {
+ t.SetNullability(true)
+ }
+ return t, true
+}
diff --git a/go/vt/vtgate/planbuilder/plancontext/planning_context_test.go b/go/vt/vtgate/planbuilder/plancontext/planning_context_test.go
new file mode 100644
index 00000000000..70faa61737d
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/plancontext/planning_context_test.go
@@ -0,0 +1,108 @@
+/*
+Copyright 2024 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package plancontext
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ "vitess.io/vitess/go/mysql/collations"
+ "vitess.io/vitess/go/sqltypes"
+ "vitess.io/vitess/go/vt/vtgate/evalengine"
+
+ "vitess.io/vitess/go/vt/sqlparser"
+ "vitess.io/vitess/go/vt/vtgate/semantics"
+)
+
+func TestOuterTableNullability(t *testing.T) {
+ // Tests that columns from outer tables are nullable,
+ // even though the semantic state says that they are not nullable.
+ // This is because the outer table may not have a matching row.
+ // All columns are marked as NOT NULL in the schema.
+ query := "select * from t1 left join t2 on t1.a = t2.a where t1.a+t2.a/abs(t2.boing)"
+ ctx, columns := prepareContextAndFindColumns(t, query)
+
+ // Check if the columns are correctly marked as nullable.
+ for _, col := range columns {
+ colName := "column: " + sqlparser.String(col)
+ t.Run(colName, func(t *testing.T) {
+ // Extract the column type from the context and the semantic state.
+ // The context should mark the column as nullable.
+ ctxType, found := ctx.TypeForExpr(col)
+ require.True(t, found, colName)
+ stType, found := ctx.SemTable.TypeForExpr(col)
+ require.True(t, found, colName)
+ ctxNullable := ctxType.Nullable()
+ stNullable := stType.Nullable()
+
+ switch col.Qualifier.Name.String() {
+ case "t1":
+ assert.False(t, ctxNullable, colName)
+ assert.False(t, stNullable, colName)
+ case "t2":
+ assert.True(t, ctxNullable, colName)
+
+ // The semantic state says that the column is not nullable. Don't trust it.
+ assert.False(t, stNullable, colName)
+ }
+ })
+ }
+}
+
+func prepareContextAndFindColumns(t *testing.T, query string) (ctx *PlanningContext, columns []*sqlparser.ColName) {
+ parser := sqlparser.NewTestParser()
+ ast, err := parser.Parse(query)
+ require.NoError(t, err)
+ semTable := semantics.EmptySemTable()
+ t1 := semantics.SingleTableSet(0)
+ t2 := semantics.SingleTableSet(1)
+ stmt := ast.(*sqlparser.Select)
+ expr := stmt.Where.Expr
+
+ // Instead of using the semantic analysis, we manually set the types for the columns.
+ _ = sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) {
+ col, ok := node.(*sqlparser.ColName)
+ if !ok {
+ return true, nil
+ }
+
+ switch col.Qualifier.Name.String() {
+ case "t1":
+ semTable.Recursive[col] = t1
+ case "t2":
+ semTable.Recursive[col] = t2
+ }
+
+ intNotNull := evalengine.NewType(sqltypes.Int64, collations.Unknown)
+ intNotNull.SetNullability(false)
+ semTable.ExprTypes[col] = intNotNull
+ columns = append(columns, col)
+ return false, nil
+ }, nil, expr)
+
+ ctx = &PlanningContext{
+ SemTable: semTable,
+ joinPredicates: map[sqlparser.Expr][]sqlparser.Expr{},
+ skipPredicates: map[sqlparser.Expr]any{},
+ ReservedArguments: map[sqlparser.Expr]string{},
+ Statement: stmt,
+ OuterTables: t2, // t2 is the outer table.
+ }
+ return
+}
diff --git a/go/vt/vtgate/planbuilder/predicate_rewrite_test.go b/go/vt/vtgate/planbuilder/predicate_rewrite_test.go
index ba1d60ff234..240c7ff3581 100644
--- a/go/vt/vtgate/planbuilder/predicate_rewrite_test.go
+++ b/go/vt/vtgate/planbuilder/predicate_rewrite_test.go
@@ -27,7 +27,6 @@ import (
"github.com/stretchr/testify/require"
"vitess.io/vitess/go/mysql/collations"
-
"vitess.io/vitess/go/sqltypes"
"vitess.io/vitess/go/vt/sqlparser"
"vitess.io/vitess/go/vt/vtenv"
diff --git a/go/vt/vtgate/planbuilder/set.go b/go/vt/vtgate/planbuilder/set.go
index bf6820b7489..77f20be40f9 100644
--- a/go/vt/vtgate/planbuilder/set.go
+++ b/go/vt/vtgate/planbuilder/set.go
@@ -21,18 +21,14 @@ import (
"strconv"
"strings"
- "vitess.io/vitess/go/vt/sysvars"
- "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext"
-
- "vitess.io/vitess/go/vt/vtgate/evalengine"
-
- "vitess.io/vitess/go/vt/vtgate/vindexes"
-
- "vitess.io/vitess/go/vt/vterrors"
-
"vitess.io/vitess/go/vt/key"
"vitess.io/vitess/go/vt/sqlparser"
+ "vitess.io/vitess/go/vt/sysvars"
+ "vitess.io/vitess/go/vt/vterrors"
"vitess.io/vitess/go/vt/vtgate/engine"
+ "vitess.io/vitess/go/vt/vtgate/evalengine"
+ "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext"
+ "vitess.io/vitess/go/vt/vtgate/vindexes"
)
type (
diff --git a/go/vt/vtgate/planbuilder/show_test.go b/go/vt/vtgate/planbuilder/show_test.go
index 931c914149d..bfdb9a623a0 100644
--- a/go/vt/vtgate/planbuilder/show_test.go
+++ b/go/vt/vtgate/planbuilder/show_test.go
@@ -21,15 +21,13 @@ import (
"fmt"
"testing"
- "vitess.io/vitess/go/test/vschemawrapper"
- "vitess.io/vitess/go/vt/vtenv"
-
"github.com/stretchr/testify/require"
"vitess.io/vitess/go/mysql/collations"
-
"vitess.io/vitess/go/sqltypes"
+ "vitess.io/vitess/go/test/vschemawrapper"
"vitess.io/vitess/go/vt/sqlparser"
+ "vitess.io/vitess/go/vt/vtenv"
"vitess.io/vitess/go/vt/vtgate/vindexes"
)
diff --git a/go/vt/vtgate/planbuilder/testdata/aggr_cases.json b/go/vt/vtgate/planbuilder/testdata/aggr_cases.json
index f7e556956e3..f1555686230 100644
--- a/go/vt/vtgate/planbuilder/testdata/aggr_cases.json
+++ b/go/vt/vtgate/planbuilder/testdata/aggr_cases.json
@@ -716,6 +716,58 @@
]
}
},
+ {
+ "comment": "Aggregation with derived table",
+ "query": "select u.id, u.name, t.num_segments from (select id, count(*) as num_segments from user group by 1 order by 2 desc limit 20) t join unsharded u on u.id = t.id",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "select u.id, u.name, t.num_segments from (select id, count(*) as num_segments from user group by 1 order by 2 desc limit 20) t join unsharded u on u.id = t.id",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0,R:1,L:0",
+ "JoinVars": {
+ "t_id": 1
+ },
+ "TableName": "`user`_unsharded",
+ "Inputs": [
+ {
+ "OperatorType": "Limit",
+ "Count": "20",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select t.num_segments, t.id from (select id, count(*) as num_segments from `user` where 1 != 1 group by id) as t where 1 != 1",
+ "OrderBy": "0 DESC",
+ "Query": "select t.num_segments, t.id from (select id, count(*) as num_segments from `user` group by id) as t order by t.num_segments desc limit :__upper_limit",
+ "Table": "`user`"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select u.id, u.`name` from unsharded as u where 1 != 1",
+ "Query": "select u.id, u.`name` from unsharded as u where u.id = :t_id",
+ "Table": "unsharded"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "user.user"
+ ]
+ }
+ },
{
"comment": "scatter aggregate multiple group by (numbers)",
"query": "select a, b, count(*) from user group by 2, 1",
@@ -1691,7 +1743,7 @@
"Sharded": true
},
"FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra where user_id = 3 and user_id < :user_id",
+ "Query": "select 1 from user_extra where user_id = 3 and user_id < :user_id limit 1",
"Table": "user_extra",
"Values": [
"3"
@@ -2546,15 +2598,21 @@
},
{
"InputName": "SubQuery",
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra where user_extra.bar = :user_apa",
- "Table": "user_extra"
+ "OperatorType": "Limit",
+ "Count": "1",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra where user_extra.bar = :user_apa limit :__upper_limit",
+ "Table": "user_extra"
+ }
+ ]
}
]
}
@@ -3535,7 +3593,7 @@
},
"FieldQuery": "select x.id, x.val1, 1, weight_string(x.val1) from (select id, val1 from `user` where 1 != 1) as x where 1 != 1",
"OrderBy": "(1|3) ASC",
- "Query": "select x.id, x.val1, 1, weight_string(x.val1) from (select id, val1 from `user` where val2 < 4) as x order by `user`.val1 asc limit :__upper_limit",
+ "Query": "select x.id, x.val1, 1, weight_string(x.val1) from (select id, val1 from `user` where val2 < 4) as x order by x.val1 asc limit :__upper_limit",
"Table": "`user`"
}
]
@@ -6696,6 +6754,51 @@
]
}
},
+ {
+ "comment": "Aggregation over a ORDER BY/LIMIT inside a derived table",
+ "query": "SELECT COUNT(*) FROM (SELECT 1 AS one FROM `user` WHERE `user`.`is_not_deleted` = true ORDER BY id DESC LIMIT 25 OFFSET 0) subquery_for_count",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT COUNT(*) FROM (SELECT 1 AS one FROM `user` WHERE `user`.`is_not_deleted` = true ORDER BY id DESC LIMIT 25 OFFSET 0) subquery_for_count",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "count_star(0) AS count(*)",
+ "Inputs": [
+ {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 2
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Limit",
+ "Count": "25",
+ "Offset": "0",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select subquery_for_count.one, subquery_for_count.id, 1, weight_string(subquery_for_count.id) from (select 1 as one, id from `user` where 1 != 1) as subquery_for_count where 1 != 1",
+ "OrderBy": "(1|3) DESC",
+ "Query": "select subquery_for_count.one, subquery_for_count.id, 1, weight_string(subquery_for_count.id) from (select 1 as one, id from `user` where `user`.is_not_deleted = true) as subquery_for_count order by subquery_for_count.id desc limit :__upper_limit",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
{
"comment": "sharded subquery inside group_concat multi-column aggregation function on a sharded table on same vindex value",
"query": "select max((select group_concat(col1, col2) from user where id = 1)) from user where id = 1",
@@ -6958,5 +7061,50 @@
"comment": "baz in the HAVING clause can't be accessed because of the GROUP BY",
"query": "select foo, count(bar) as x from user group by foo having baz > avg(baz) order by x",
"plan": "Unknown column 'baz' in 'having clause'"
+ },
+ {
+ "comment": "Aggregation over a ORDER BY/LIMIT inside a derived table",
+ "query": "SELECT COUNT(*) FROM (SELECT 1 AS one FROM `user` WHERE `user`.`is_not_deleted` = true ORDER BY id DESC LIMIT 25 OFFSET 0) subquery_for_count",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT COUNT(*) FROM (SELECT 1 AS one FROM `user` WHERE `user`.`is_not_deleted` = true ORDER BY id DESC LIMIT 25 OFFSET 0) subquery_for_count",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "count_star(0) AS count(*)",
+ "Inputs": [
+ {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 2
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Limit",
+ "Count": "25",
+ "Offset": "0",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select subquery_for_count.one, subquery_for_count.id, 1, weight_string(subquery_for_count.id) from (select 1 as one, id from `user` where 1 != 1) as subquery_for_count where 1 != 1",
+ "OrderBy": "(1|3) DESC",
+ "Query": "select subquery_for_count.one, subquery_for_count.id, 1, weight_string(subquery_for_count.id) from (select 1 as one, id from `user` where `user`.is_not_deleted = true) as subquery_for_count order by subquery_for_count.id desc limit :__upper_limit",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
}
]
diff --git a/go/vt/vtgate/planbuilder/testdata/cte_cases.json b/go/vt/vtgate/planbuilder/testdata/cte_cases.json
index 0d7d9020ac2..51b130d25cc 100644
--- a/go/vt/vtgate/planbuilder/testdata/cte_cases.json
+++ b/go/vt/vtgate/planbuilder/testdata/cte_cases.json
@@ -348,7 +348,7 @@
},
"FieldQuery": "select x.id, x.val1, 1, weight_string(x.val1) from (select id, val1 from `user` where 1 != 1) as x where 1 != 1",
"OrderBy": "(1|3) ASC",
- "Query": "select x.id, x.val1, 1, weight_string(x.val1) from (select id, val1 from `user` where val2 < 4) as x order by `user`.val1 asc limit :__upper_limit",
+ "Query": "select x.id, x.val1, 1, weight_string(x.val1) from (select id, val1 from `user` where val2 < 4) as x order by x.val1 asc limit :__upper_limit",
"Table": "`user`"
}
]
diff --git a/go/vt/vtgate/planbuilder/testdata/filter_cases.json b/go/vt/vtgate/planbuilder/testdata/filter_cases.json
index aee0bac3365..272bf694d03 100644
--- a/go/vt/vtgate/planbuilder/testdata/filter_cases.json
+++ b/go/vt/vtgate/planbuilder/testdata/filter_cases.json
@@ -870,6 +870,29 @@
]
}
},
+ {
+ "comment": "Merging subqueries should remove keyspace from query",
+ "query": "select u.id from user.user as u where not exists (select 1 from user.user_extra as ue where u.id = ue.user_id)",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "select u.id from user.user as u where not exists (select 1 from user.user_extra as ue where u.id = ue.user_id)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u.id from `user` as u where 1 != 1",
+ "Query": "select u.id from `user` as u where not exists (select 1 from user_extra as ue where u.id = ue.user_id)",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
{
"comment": "Single table equality route with unsigned value",
"query": "select id from user where name = 18446744073709551615",
@@ -1991,15 +2014,21 @@
"Inputs": [
{
"InputName": "SubQuery",
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from `user` where 1 != 1",
- "Query": "select 1 from `user`",
- "Table": "`user`"
+ "OperatorType": "Limit",
+ "Count": "1",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from `user` where 1 != 1",
+ "Query": "select 1 from `user` limit :__upper_limit",
+ "Table": "`user`"
+ }
+ ]
},
{
"InputName": "Outer",
@@ -2831,7 +2860,7 @@
"Sharded": true
},
"FieldQuery": "select 1 from `user` as u2 where 1 != 1",
- "Query": "select 1 from `user` as u2 where u2.id = 5",
+ "Query": "select 1 from `user` as u2 where u2.id = 5 limit 1",
"Table": "`user`",
"Values": [
"5"
@@ -4288,7 +4317,7 @@
"Sharded": false
},
"FieldQuery": "select 1 from unsharded as u2 where 1 != 1",
- "Query": "select 1 from unsharded as u2 where u2.baz = :u1_bar",
+ "Query": "select 1 from unsharded as u2 where u2.baz = :u1_bar limit 1",
"Table": "unsharded"
}
]
diff --git a/go/vt/vtgate/planbuilder/testdata/from_cases.json b/go/vt/vtgate/planbuilder/testdata/from_cases.json
index 6433c06ed89..86753825e42 100644
--- a/go/vt/vtgate/planbuilder/testdata/from_cases.json
+++ b/go/vt/vtgate/planbuilder/testdata/from_cases.json
@@ -497,6 +497,30 @@
]
}
},
+ {
+ "comment": "three table join with join predicate touching all tables",
+ "query": "select 42 from user u join user_extra ue on u.id = ue.user_id join music m on m.user_id = u.id where u.foo or m.foo or ue.foo",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "select 42 from user u join user_extra ue on u.id = ue.user_id join music m on m.user_id = u.id where u.foo or m.foo or ue.foo",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 42 from `user` as u, user_extra as ue, music as m where 1 != 1",
+ "Query": "select 42 from `user` as u, user_extra as ue, music as m where u.id = ue.user_id and m.user_id = u.id and (u.foo or m.foo or ue.foo)",
+ "Table": "`user`, music, user_extra"
+ },
+ "TablesUsed": [
+ "user.music",
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
{
"comment": "Left join, multi-chunk",
"query": "select u.col from user u left join unsharded m on u.a = m.b",
diff --git a/go/vt/vtgate/planbuilder/testdata/info_schema57_cases.json b/go/vt/vtgate/planbuilder/testdata/info_schema57_cases.json
index 91755159b67..397d9ce6046 100644
--- a/go/vt/vtgate/planbuilder/testdata/info_schema57_cases.json
+++ b/go/vt/vtgate/planbuilder/testdata/info_schema57_cases.json
@@ -946,31 +946,37 @@
"Inputs": [
{
"InputName": "SubQuery",
- "OperatorType": "Concatenate",
+ "OperatorType": "Limit",
+ "Count": "1",
"Inputs": [
{
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 as found from information_schema.`tables` where 1 != 1",
- "Query": "select 1 as found from information_schema.`tables` where table_name = :table_name1 /* VARCHAR */ and table_name = :table_name1 /* VARCHAR */",
- "SysTableTableName": "[table_name1:'Music']",
- "Table": "information_schema.`tables`"
- },
- {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 as found from information_schema.views where 1 != 1",
- "Query": "select 1 as found from information_schema.views where table_name = :table_name2 /* VARCHAR */ and table_name = :table_name2 /* VARCHAR */ limit 1",
- "SysTableTableName": "[table_name2:'user']",
- "Table": "information_schema.views"
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 as found from information_schema.`tables` where 1 != 1",
+ "Query": "select 1 as found from information_schema.`tables` where table_name = :table_name1 /* VARCHAR */ and table_name = :table_name1 /* VARCHAR */ limit :__upper_limit",
+ "SysTableTableName": "[table_name1:'Music']",
+ "Table": "information_schema.`tables`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 as found from information_schema.views where 1 != 1",
+ "Query": "select 1 as found from information_schema.views where table_name = :table_name2 /* VARCHAR */ and table_name = :table_name2 /* VARCHAR */ limit :__upper_limit",
+ "SysTableTableName": "[table_name2:'user']",
+ "Table": "information_schema.views"
+ }
+ ]
}
]
},
diff --git a/go/vt/vtgate/planbuilder/testdata/info_schema80_cases.json b/go/vt/vtgate/planbuilder/testdata/info_schema80_cases.json
index 251457e1994..626bf0d505a 100644
--- a/go/vt/vtgate/planbuilder/testdata/info_schema80_cases.json
+++ b/go/vt/vtgate/planbuilder/testdata/info_schema80_cases.json
@@ -1011,31 +1011,37 @@
"Inputs": [
{
"InputName": "SubQuery",
- "OperatorType": "Concatenate",
+ "OperatorType": "Limit",
+ "Count": "1",
"Inputs": [
{
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 as found from information_schema.`tables` where 1 != 1",
- "Query": "select 1 as found from information_schema.`tables` where table_name = :table_name1 /* VARCHAR */ and table_name = :table_name1 /* VARCHAR */",
- "SysTableTableName": "[table_name1:'Music']",
- "Table": "information_schema.`tables`"
- },
- {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 as found from information_schema.views where 1 != 1",
- "Query": "select 1 as found from information_schema.views where table_name = :table_name2 /* VARCHAR */ and table_name = :table_name2 /* VARCHAR */ limit 1",
- "SysTableTableName": "[table_name2:'user']",
- "Table": "information_schema.views"
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 as found from information_schema.`tables` where 1 != 1",
+ "Query": "select 1 as found from information_schema.`tables` where table_name = :table_name1 /* VARCHAR */ and table_name = :table_name1 /* VARCHAR */ limit :__upper_limit",
+ "SysTableTableName": "[table_name1:'Music']",
+ "Table": "information_schema.`tables`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 as found from information_schema.views where 1 != 1",
+ "Query": "select 1 as found from information_schema.views where table_name = :table_name2 /* VARCHAR */ and table_name = :table_name2 /* VARCHAR */ limit :__upper_limit",
+ "SysTableTableName": "[table_name2:'user']",
+ "Table": "information_schema.views"
+ }
+ ]
}
]
},
diff --git a/go/vt/vtgate/planbuilder/testdata/memory_sort_cases.json b/go/vt/vtgate/planbuilder/testdata/memory_sort_cases.json
index 4a879997925..8390dde80bc 100644
--- a/go/vt/vtgate/planbuilder/testdata/memory_sort_cases.json
+++ b/go/vt/vtgate/planbuilder/testdata/memory_sort_cases.json
@@ -147,7 +147,7 @@
},
"FieldQuery": "select a, b, count(*) as k, weight_string(a) from `user` where 1 != 1 group by a, weight_string(a)",
"OrderBy": "(0|3) ASC",
- "Query": "select a, b, count(*) as k, weight_string(a) from `user` group by a, weight_string(a) order by a asc limit :__upper_limit",
+ "Query": "select a, b, count(*) as k, weight_string(a) from `user` group by a, weight_string(a) order by a asc",
"Table": "`user`"
}
]
@@ -288,49 +288,59 @@
"QueryType": "SELECT",
"Original": "select user.col1 as a, user.col2 b, music.col3 c from user, music where user.id = music.id and user.id = 1 order by c",
"Instructions": {
- "OperatorType": "Sort",
- "Variant": "Memory",
- "OrderBy": "(2|3) ASC",
- "ResultColumns": 3,
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0,
+ 1,
+ 2
+ ],
"Inputs": [
{
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,L:1,R:0,R:1",
- "JoinVars": {
- "user_id": 2
- },
- "TableName": "`user`_music",
+ "OperatorType": "Sort",
+ "Variant": "Memory",
+ "OrderBy": "(2|3) ASC",
+ "ResultColumns": 3,
"Inputs": [
{
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col1 as a, `user`.col2 as b, `user`.id from `user` where 1 != 1",
- "Query": "select `user`.col1 as a, `user`.col2 as b, `user`.id from `user` where `user`.id = 1",
- "Table": "`user`",
- "Values": [
- "1"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,L:1,R:0,R:1",
+ "JoinVars": {
+ "user_id": 2
},
- "FieldQuery": "select music.col3 as c, weight_string(music.col3) from music where 1 != 1",
- "Query": "select music.col3 as c, weight_string(music.col3) from music where music.id = :user_id",
- "Table": "music",
- "Values": [
- ":user_id"
- ],
- "Vindex": "music_user_map"
+ "TableName": "`user`_music",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col1 as a, `user`.col2 as b, `user`.id from `user` where 1 != 1",
+ "Query": "select `user`.col1 as a, `user`.col2 as b, `user`.id from `user` where `user`.id = 1",
+ "Table": "`user`",
+ "Values": [
+ "1"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.col3 as c, weight_string(music.col3) from music where 1 != 1",
+ "Query": "select music.col3 as c, weight_string(music.col3) from music where music.id = :user_id",
+ "Table": "music",
+ "Values": [
+ ":user_id"
+ ],
+ "Vindex": "music_user_map"
+ }
+ ]
}
]
}
@@ -349,49 +359,59 @@
"QueryType": "SELECT",
"Original": "select user.col1 as a, user.col2, music.col3 from user join music on user.id = music.id where user.id = 1 order by 1 asc, 3 desc, 2 asc",
"Instructions": {
- "OperatorType": "Sort",
- "Variant": "Memory",
- "OrderBy": "(0|3) ASC, (2|4) DESC, (1|5) ASC",
- "ResultColumns": 3,
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0,
+ 1,
+ 2
+ ],
"Inputs": [
{
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,L:1,R:0,L:2,R:1,L:3",
- "JoinVars": {
- "user_id": 4
- },
- "TableName": "`user`_music",
+ "OperatorType": "Sort",
+ "Variant": "Memory",
+ "OrderBy": "(0|3) ASC, (2|4) DESC, (1|5) ASC",
+ "ResultColumns": 3,
"Inputs": [
{
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,L:1,R:0,L:2,R:1,L:3",
+ "JoinVars": {
+ "user_id": 4
},
- "FieldQuery": "select `user`.col1 as a, `user`.col2, weight_string(`user`.col1), weight_string(`user`.col2), `user`.id from `user` where 1 != 1",
- "Query": "select `user`.col1 as a, `user`.col2, weight_string(`user`.col1), weight_string(`user`.col2), `user`.id from `user` where `user`.id = 1",
- "Table": "`user`",
- "Values": [
- "1"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.col3, weight_string(music.col3) from music where 1 != 1",
- "Query": "select music.col3, weight_string(music.col3) from music where music.id = :user_id",
- "Table": "music",
- "Values": [
- ":user_id"
- ],
- "Vindex": "music_user_map"
+ "TableName": "`user`_music",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col1 as a, `user`.col2, weight_string(`user`.col1), weight_string(`user`.col2), `user`.id from `user` where 1 != 1",
+ "Query": "select `user`.col1 as a, `user`.col2, weight_string(`user`.col1), weight_string(`user`.col2), `user`.id from `user` where `user`.id = 1",
+ "Table": "`user`",
+ "Values": [
+ "1"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.col3, weight_string(music.col3) from music where 1 != 1",
+ "Query": "select music.col3, weight_string(music.col3) from music where music.id = :user_id",
+ "Table": "music",
+ "Values": [
+ ":user_id"
+ ],
+ "Vindex": "music_user_map"
+ }
+ ]
}
]
}
diff --git a/go/vt/vtgate/planbuilder/testdata/other_read_cases.json b/go/vt/vtgate/planbuilder/testdata/other_read_cases.json
index 8b67623186a..2258bcd768c 100644
--- a/go/vt/vtgate/planbuilder/testdata/other_read_cases.json
+++ b/go/vt/vtgate/planbuilder/testdata/other_read_cases.json
@@ -146,5 +146,26 @@
"user.music"
]
}
+ },
+ {
+ "comment": "describe info_schema table",
+ "query": "describe information_schema.administrable_role_authorizations",
+ "plan": {
+ "QueryType": "EXPLAIN",
+ "Original": "describe information_schema.administrable_role_authorizations",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "AnyShard()",
+ "Query": "explain information_schema.administrable_role_authorizations",
+ "SingleShardOnly": true
+ },
+ "TablesUsed": [
+ "main.administrable_role_authorizations"
+ ]
+ }
}
]
diff --git a/go/vt/vtgate/planbuilder/testdata/select_cases.json b/go/vt/vtgate/planbuilder/testdata/select_cases.json
index cf13e92d4dc..f502e162705 100644
--- a/go/vt/vtgate/planbuilder/testdata/select_cases.json
+++ b/go/vt/vtgate/planbuilder/testdata/select_cases.json
@@ -736,6 +736,33 @@
]
}
},
+ {
+ "comment": "subquery with an aggregation in order by that can be merged into a single route",
+ "query": "select col, trim((select user_name from user where id = 3)) val from user_extra where user_id = 3 group by col order by val",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "select col, trim((select user_name from user where id = 3)) val from user_extra where user_id = 3 group by col order by val",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col, trim((select user_name from `user` where 1 != 1)) as val from user_extra where 1 != 1 group by col",
+ "Query": "select col, trim((select user_name from `user` where id = 3)) as val from user_extra where user_id = 3 group by col order by trim((select `user`.user_name from `user` where `user`.id = 3)) asc",
+ "Table": "user_extra",
+ "Values": [
+ "3"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
{
"comment": "Jumbled references",
"query": "select user.col, user_extra.id, user.col2 from user join user_extra",
@@ -2113,7 +2140,7 @@
}
},
{
- "comment": "select (select col from user limit 1) as a from user join user_extra order by a",
+ "comment": "ORDER BY subquery",
"query": "select (select col from user limit 1) as a from user join user_extra order by a",
"plan": {
"QueryType": "SELECT",
@@ -2157,9 +2184,8 @@
"Name": "user",
"Sharded": true
},
- "FieldQuery": "select :__sq1 as __sq1, weight_string(:__sq1) from `user` where 1 != 1",
- "OrderBy": "(0|1) ASC",
- "Query": "select :__sq1 as __sq1, weight_string(:__sq1) from `user` order by __sq1 asc",
+ "FieldQuery": "select :__sq1 as a from `user` where 1 != 1",
+ "Query": "select :__sq1 as a from `user`",
"Table": "`user`"
}
]
@@ -2290,7 +2316,7 @@
"Sharded": true
},
"FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra where user_id = 3 and user_id < :user_id",
+ "Query": "select 1 from user_extra where user_id = 3 and user_id < :user_id limit 1",
"Table": "user_extra",
"Values": [
"3"
@@ -2348,7 +2374,7 @@
"Sharded": true
},
"FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra where user_id = 3 and user_id < :user_id",
+ "Query": "select 1 from user_extra where user_id = 3 and user_id < :user_id limit 1",
"Table": "user_extra",
"Values": [
"3"
@@ -2412,15 +2438,21 @@
},
{
"InputName": "SubQuery",
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra as ue where 1 != 1",
- "Query": "select 1 from user_extra as ue where ue.col = :u1_col and ue.col = :u2_col",
- "Table": "user_extra"
+ "OperatorType": "Limit",
+ "Count": "1",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra as ue where 1 != 1",
+ "Query": "select 1 from user_extra as ue where ue.col = :u1_col and ue.col = :u2_col limit :__upper_limit",
+ "Table": "user_extra"
+ }
+ ]
}
]
}
@@ -2465,15 +2497,21 @@
},
{
"InputName": "SubQuery",
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra as ue where 1 != 1",
- "Query": "select 1 from user_extra as ue where ue.col = :u_col and ue.col2 = :u_col",
- "Table": "user_extra"
+ "OperatorType": "Limit",
+ "Count": "1",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra as ue where 1 != 1",
+ "Query": "select 1 from user_extra as ue where ue.col = :u_col and ue.col2 = :u_col limit :__upper_limit",
+ "Table": "user_extra"
+ }
+ ]
}
]
}
@@ -4943,5 +4981,27 @@
"user.user"
]
}
+ },
+ {
+ "comment": "join with derived table with alias and join condition - merge into route",
+ "query": "select 1 from user join (select id as uid from user) as t where t.uid = user.id",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "select 1 from user join (select id as uid from user) as t where t.uid = user.id",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from (select id as uid from `user` where 1 != 1) as t, `user` where 1 != 1",
+ "Query": "select 1 from (select id as uid from `user`) as t, `user` where t.uid = `user`.id",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
}
]
diff --git a/go/vt/vtgate/planbuilder/testdata/tpch_cases.json b/go/vt/vtgate/planbuilder/testdata/tpch_cases.json
index 609285c4bfe..5b3404cc895 100644
--- a/go/vt/vtgate/planbuilder/testdata/tpch_cases.json
+++ b/go/vt/vtgate/planbuilder/testdata/tpch_cases.json
@@ -252,7 +252,7 @@
"Sharded": true
},
"FieldQuery": "select 1 from lineitem where 1 != 1",
- "Query": "select 1 from lineitem where l_orderkey = :o_orderkey and l_commitdate < l_receiptdate",
+ "Query": "select 1 from lineitem where l_orderkey = :o_orderkey and l_commitdate < l_receiptdate limit 1",
"Table": "lineitem"
}
]
diff --git a/go/vt/vtgate/planbuilder/testdata/unsupported_cases.json b/go/vt/vtgate/planbuilder/testdata/unsupported_cases.json
index 10cf6b84791..b66ddd79ad5 100644
--- a/go/vt/vtgate/planbuilder/testdata/unsupported_cases.json
+++ b/go/vt/vtgate/planbuilder/testdata/unsupported_cases.json
@@ -49,6 +49,11 @@
"query": "update user set id = 1 where id = 1",
"plan": "VT12001: unsupported: you cannot UPDATE primary vindex columns; invalid update on vindex: user_index"
},
+ {
+ "comment": "subquery with an aggregation in order by that cannot be merged into a single route",
+ "query": "select col, trim((select user_name from user where col = 'a')) val from user_extra where user_id = 3 group by col order by val",
+ "plan": "VT12001: unsupported: subquery with aggregation in order by"
+ },
{
"comment": "update change in multicol vindex column",
"query": "update multicol_tbl set colc = 5, colb = 4 where cola = 1 and colb = 2",
diff --git a/go/vt/vtgate/planbuilder/vexplain.go b/go/vt/vtgate/planbuilder/vexplain.go
index 7b200fb2e09..3d2b94a791b 100644
--- a/go/vt/vtgate/planbuilder/vexplain.go
+++ b/go/vt/vtgate/planbuilder/vexplain.go
@@ -29,6 +29,7 @@ import (
"vitess.io/vitess/go/vt/vtgate/engine"
"vitess.io/vitess/go/vt/vtgate/planbuilder/operators"
"vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext"
+ "vitess.io/vitess/go/vt/vtgate/vindexes"
)
func buildVExplainPlan(ctx context.Context, vexplainStmt *sqlparser.VExplainStmt, reservedVars *sqlparser.ReservedVars, vschema plancontext.VSchema, enableOnlineDDL, enableDirectDDL bool) (*planResult, error) {
@@ -42,24 +43,37 @@ func buildVExplainPlan(ctx context.Context, vexplainStmt *sqlparser.VExplainStmt
}
func explainTabPlan(explain *sqlparser.ExplainTab, vschema plancontext.VSchema) (*planResult, error) {
- _, _, ks, _, destination, err := vschema.FindTableOrVindex(explain.Table)
- if err != nil {
- return nil, err
+ var keyspace *vindexes.Keyspace
+ var destination key.Destination
+
+ if sqlparser.SystemSchema(explain.Table.Qualifier.String()) {
+ var err error
+ keyspace, err = vschema.AnyKeyspace()
+ if err != nil {
+ return nil, err
+ }
+ } else {
+ var err error
+ var ks string
+ _, _, ks, _, destination, err = vschema.FindTableOrVindex(explain.Table)
+ if err != nil {
+ return nil, err
+ }
+ explain.Table.Qualifier = sqlparser.NewIdentifierCS("")
+
+ keyspace, err = vschema.FindKeyspace(ks)
+ if err != nil {
+ return nil, err
+ }
+ if keyspace == nil {
+ return nil, vterrors.VT14004(ks)
+ }
}
- explain.Table.Qualifier = sqlparser.NewIdentifierCS("")
if destination == nil {
destination = key.DestinationAnyShard{}
}
- keyspace, err := vschema.FindKeyspace(ks)
- if err != nil {
- return nil, err
- }
- if keyspace == nil {
- return nil, vterrors.VT14004(ks)
- }
-
return newPlanResult(&engine.Send{
Keyspace: keyspace,
TargetDestination: destination,
@@ -114,7 +128,6 @@ func buildExplainStmtPlan(stmt sqlparser.Statement, reservedVars *sqlparser.Rese
default:
return buildOtherReadAndAdmin(sqlparser.String(explain), vschema)
}
-
}
func explainPlan(explain *sqlparser.ExplainStmt, reservedVars *sqlparser.ReservedVars, vschema plancontext.VSchema) (*planResult, error) {
diff --git a/go/vt/vtgate/planbuilder/vindex_func.go b/go/vt/vtgate/planbuilder/vindex_func.go
index abfd2d1d9b3..d3231249639 100644
--- a/go/vt/vtgate/planbuilder/vindex_func.go
+++ b/go/vt/vtgate/planbuilder/vindex_func.go
@@ -20,13 +20,11 @@ import (
"fmt"
"vitess.io/vitess/go/mysql/collations"
- "vitess.io/vitess/go/vt/vtgate/semantics"
-
- "vitess.io/vitess/go/vt/vterrors"
-
querypb "vitess.io/vitess/go/vt/proto/query"
"vitess.io/vitess/go/vt/sqlparser"
+ "vitess.io/vitess/go/vt/vterrors"
"vitess.io/vitess/go/vt/vtgate/engine"
+ "vitess.io/vitess/go/vt/vtgate/semantics"
)
var _ logicalPlan = (*vindexFunc)(nil)
diff --git a/go/vt/vtgate/planbuilder/vstream.go b/go/vt/vtgate/planbuilder/vstream.go
index fe07a4a021b..19713a6ffa3 100644
--- a/go/vt/vtgate/planbuilder/vstream.go
+++ b/go/vt/vtgate/planbuilder/vstream.go
@@ -20,13 +20,12 @@ import (
"strconv"
"strings"
- "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext"
-
"vitess.io/vitess/go/vt/key"
topodatapb "vitess.io/vitess/go/vt/proto/topodata"
"vitess.io/vitess/go/vt/sqlparser"
"vitess.io/vitess/go/vt/vterrors"
"vitess.io/vitess/go/vt/vtgate/engine"
+ "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext"
)
const defaultLimit = 100
diff --git a/go/vt/vtgate/semantics/semantic_state.go b/go/vt/vtgate/semantics/semantic_state.go
index 76a51efd160..9a8721108b3 100644
--- a/go/vt/vtgate/semantics/semantic_state.go
+++ b/go/vt/vtgate/semantics/semantic_state.go
@@ -614,6 +614,7 @@ func (st *SemTable) AddExprs(tbl *sqlparser.AliasedTableExpr, cols sqlparser.Sel
}
// TypeForExpr returns the type of expressions in the query
+// Note that PlanningContext has the same method, and you should use that if you have a PlanningContext
func (st *SemTable) TypeForExpr(e sqlparser.Expr) (evalengine.Type, bool) {
if typ, found := st.ExprTypes[e]; found {
return typ, true
diff --git a/go/vt/vtgate/vschema_manager.go b/go/vt/vtgate/vschema_manager.go
index f215fd9df11..1fed0419f6b 100644
--- a/go/vt/vtgate/vschema_manager.go
+++ b/go/vt/vtgate/vschema_manager.go
@@ -212,9 +212,9 @@ func (vm *VSchemaManager) updateFromSchema(vschema *vindexes.VSchema) {
// Now that we have ensured that all the tables are created, we can start populating the foreign keys
// in the tables.
for tblName, tblInfo := range m {
- rTbl, err := vschema.FindRoutedTable(ksName, tblName, topodatapb.TabletType_PRIMARY)
- if err != nil {
- log.Errorf("error finding routed table %s: %v", tblName, err)
+ rTbl := ks.Tables[tblName]
+ if rTbl == nil {
+ log.Errorf("unable to find table %s in %s", tblName, ksName)
continue
}
for _, fkDef := range tblInfo.ForeignKeys {
@@ -223,7 +223,7 @@ func (vm *VSchemaManager) updateFromSchema(vschema *vindexes.VSchema) {
continue
}
parentTbl, err := vschema.FindRoutedTable(ksName, fkDef.ReferenceDefinition.ReferencedTable.Name.String(), topodatapb.TabletType_PRIMARY)
- if err != nil {
+ if err != nil || parentTbl == nil {
log.Errorf("error finding parent table %s: %v", fkDef.ReferenceDefinition.ReferencedTable.Name.String(), err)
continue
}
diff --git a/go/vt/vtgate/vschema_manager_test.go b/go/vt/vtgate/vschema_manager_test.go
index f810d7c42af..1f6eabc7020 100644
--- a/go/vt/vtgate/vschema_manager_test.go
+++ b/go/vt/vtgate/vschema_manager_test.go
@@ -1,6 +1,7 @@
package vtgate
import (
+ "fmt"
"testing"
"github.com/stretchr/testify/require"
@@ -336,6 +337,48 @@ func TestVSchemaUpdate(t *testing.T) {
}
}
+// TestRoutingRules tests that the vschema manager uses the correct tables despite the routing rules.
+func TestRoutingRules(t *testing.T) {
+ cols1 := []vindexes.Column{{
+ Name: sqlparser.NewIdentifierCI("id"),
+ Type: querypb.Type_INT64,
+ }}
+ // Create a vschema manager with a fake vschema that returns a table with a column and a primary key.
+ vm := &VSchemaManager{}
+ vm.schema = &fakeSchema{t: map[string]*vindexes.TableInfo{
+ "t1": {
+ Columns: cols1,
+ Indexes: []*sqlparser.IndexDefinition{
+ {
+ Info: &sqlparser.IndexInfo{Type: sqlparser.IndexTypePrimary},
+ Columns: []*sqlparser.IndexColumn{
+ {
+ Column: sqlparser.NewIdentifierCI("id"),
+ },
+ },
+ },
+ },
+ },
+ }}
+ // Define a vschema that has a keyspace routing rule.
+ vs := &vindexes.VSchema{
+ Keyspaces: map[string]*vindexes.KeyspaceSchema{
+ "ks": {
+ Tables: map[string]*vindexes.Table{},
+ Keyspace: &vindexes.Keyspace{Name: "ks", Sharded: true},
+ },
+ },
+ RoutingRules: map[string]*vindexes.RoutingRule{
+ "ks.t1": {
+ Error: fmt.Errorf("error in routing rules"),
+ },
+ },
+ }
+ // Ensure that updating the vschema manager from the vschema doesn't cause a panic.
+ vm.updateFromSchema(vs)
+ require.Len(t, vs.Keyspaces["ks"].Tables["t1"].PrimaryKey, 1)
+}
+
func TestRebuildVSchema(t *testing.T) {
cols1 := []vindexes.Column{{
Name: sqlparser.NewIdentifierCI("id"),
diff --git a/go/vt/vttablet/endtoend/misc_test.go b/go/vt/vttablet/endtoend/misc_test.go
index c054179c20a..768399572db 100644
--- a/go/vt/vttablet/endtoend/misc_test.go
+++ b/go/vt/vttablet/endtoend/misc_test.go
@@ -261,8 +261,10 @@ func TestSidecarTables(t *testing.T) {
}
func TestConsolidation(t *testing.T) {
- defer framework.Server.SetPoolSize(framework.Server.PoolSize())
- framework.Server.SetPoolSize(1)
+ defer framework.Server.SetPoolSize(context.Background(), framework.Server.PoolSize())
+
+ err := framework.Server.SetPoolSize(context.Background(), 1)
+ require.NoError(t, err)
const tag = "Waits/Histograms/Consolidations/Count"
diff --git a/go/vt/vttablet/endtoend/stream_test.go b/go/vt/vttablet/endtoend/stream_test.go
index 05045fd6f7d..a3c73dd8152 100644
--- a/go/vt/vttablet/endtoend/stream_test.go
+++ b/go/vt/vttablet/endtoend/stream_test.go
@@ -17,6 +17,7 @@ limitations under the License.
package endtoend
import (
+ "context"
"errors"
"fmt"
"reflect"
@@ -98,11 +99,13 @@ func TestStreamConsolidation(t *testing.T) {
defaultPoolSize := framework.Server.StreamPoolSize()
- framework.Server.SetStreamPoolSize(4)
+ err = framework.Server.SetStreamPoolSize(context.Background(), 4)
+ require.NoError(t, err)
+
framework.Server.SetStreamConsolidationBlocking(true)
defer func() {
- framework.Server.SetStreamPoolSize(defaultPoolSize)
+ _ = framework.Server.SetStreamPoolSize(context.Background(), defaultPoolSize)
framework.Server.SetStreamConsolidationBlocking(false)
}()
diff --git a/go/vt/vttablet/onlineddl/executor.go b/go/vt/vttablet/onlineddl/executor.go
index de641d1d65a..910f52a0333 100644
--- a/go/vt/vttablet/onlineddl/executor.go
+++ b/go/vt/vttablet/onlineddl/executor.go
@@ -995,7 +995,6 @@ func (e *Executor) cutOverVReplMigration(ctx context.Context, s *VReplStream, sh
}
renameQuery := sqlparser.BuildParsedQuery(sqlSwapTables, onlineDDL.Table, sentryTableName, vreplTable, onlineDDL.Table, sentryTableName, vreplTable)
-
waitForRenameProcess := func() error {
// This function waits until it finds the RENAME TABLE... query running in MySQL's PROCESSLIST, or until timeout
// The function assumes that one of the renamed tables is locked, thus causing the RENAME to block. If nothing
@@ -1391,6 +1390,25 @@ func (e *Executor) duplicateCreateTable(ctx context.Context, onlineDDL *schema.O
}
newCreateTable = sqlparser.CloneRefOfCreateTable(originalCreateTable)
newCreateTable.SetTable(newCreateTable.GetTable().Qualifier.CompliantName(), newTableName)
+
+ // If this table has a self-referencing foreign key constraint, ensure the referenced table gets renamed:
+ renameSelfFK := func(node sqlparser.SQLNode) (kontinue bool, err error) {
+ switch node := node.(type) {
+ case *sqlparser.ConstraintDefinition:
+ fk, ok := node.Details.(*sqlparser.ForeignKeyDefinition)
+ if !ok {
+ return true, nil
+ }
+ if referencedTableName := fk.ReferenceDefinition.ReferencedTable.Name.String(); referencedTableName == originalCreateTable.Table.Name.String() {
+ // This is a self-referencing foreign key
+ // We need to rename the referenced table as well
+ fk.ReferenceDefinition.ReferencedTable.Name = sqlparser.NewIdentifierCS(newTableName)
+ }
+ }
+ return true, nil
+ }
+ _ = sqlparser.Walk(renameSelfFK, newCreateTable)
+
// manipulate CreateTable statement: take care of constraints names which have to be
// unique across the schema
constraintMap, err = e.validateAndEditCreateTableStatement(ctx, onlineDDL, newCreateTable)
diff --git a/go/vt/vttablet/onlineddl/executor_test.go b/go/vt/vttablet/onlineddl/executor_test.go
index c6fc0044c91..bf031f3221a 100644
--- a/go/vt/vttablet/onlineddl/executor_test.go
+++ b/go/vt/vttablet/onlineddl/executor_test.go
@@ -353,6 +353,24 @@ func TestDuplicateCreateTable(t *testing.T) {
expectSQL: "create table mytable (\n\tid int primary key,\n\ti int,\n\tconstraint f_bjj16562shq086ozik3zf6kjg foreign key (i) references parent (id) on delete cascade\n)",
expectMapSize: 1,
},
+ {
+ sql: "create table self (id int primary key, i int, constraint f foreign key (i) references self (id))",
+ newName: "mytable",
+ expectSQL: "create table mytable (\n\tid int primary key,\n\ti int,\n\tconstraint f_8aymb58nzb78l5jhq600veg6y foreign key (i) references mytable (id)\n)",
+ expectMapSize: 1,
+ },
+ {
+ sql: "create table self (id int primary key, i1 int, i2 int, constraint f1 foreign key (i1) references self (id), constraint f1 foreign key (i2) references parent (id))",
+ newName: "mytable",
+ expectSQL: `create table mytable (
+ id int primary key,
+ i1 int,
+ i2 int,
+ constraint f1_1rlsg9yls1t91i35zq5gyeoq7 foreign key (i1) references mytable (id),
+ constraint f1_59t4lvb1ncti6fxy27drad4jp foreign key (i2) references parent (id)
+)`,
+ expectMapSize: 1,
+ },
}
for _, tcase := range tcases {
t.Run(tcase.sql, func(t *testing.T) {
diff --git a/go/vt/vttablet/tabletmanager/rpc_vreplication.go b/go/vt/vttablet/tabletmanager/rpc_vreplication.go
index a9ce8b841a5..83679413a5c 100644
--- a/go/vt/vttablet/tabletmanager/rpc_vreplication.go
+++ b/go/vt/vttablet/tabletmanager/rpc_vreplication.go
@@ -18,6 +18,7 @@ package tabletmanager
import (
"context"
+ "fmt"
"strings"
"google.golang.org/protobuf/encoding/prototext"
@@ -49,6 +50,8 @@ const (
sqlSelectVReplicationWorkflowConfig = "select id, source, cell, tablet_types, state, message from %s.vreplication where workflow = %a"
// Update the configuration values for a workflow's vreplication stream.
sqlUpdateVReplicationWorkflowStreamConfig = "update %s.vreplication set state = %a, source = %a, cell = %a, tablet_types = %a where id = %a"
+ // Check if workflow is still copying.
+ sqlGetVReplicationCopyStatus = "select distinct vrepl_id from %s.copy_state where vrepl_id = %d"
)
func (tm *TabletManager) CreateVReplicationWorkflow(ctx context.Context, req *tabletmanagerdatapb.CreateVReplicationWorkflowRequest) (*tabletmanagerdatapb.CreateVReplicationWorkflowResponse, error) {
@@ -227,6 +230,18 @@ func (tm *TabletManager) ReadVReplicationWorkflow(ctx context.Context, req *tabl
return resp, nil
}
+func isStreamCopying(tm *TabletManager, id int64) (bool, error) {
+ query := fmt.Sprintf(sqlGetVReplicationCopyStatus, sidecar.GetIdentifier(), id)
+ res, err := tm.VREngine.Exec(query)
+ if err != nil {
+ return false, err
+ }
+ if res != nil && len(res.Rows) > 0 {
+ return true, nil
+ }
+ return false, nil
+}
+
// UpdateVReplicationWorkflow updates the sidecar databases's vreplication
// record(s) for this tablet's vreplication workflow stream(s). If there
// are no streams for the given workflow on the tablet then a nil result
@@ -302,6 +317,17 @@ func (tm *TabletManager) UpdateVReplicationWorkflow(ctx context.Context, req *ta
if !textutil.ValueIsSimulatedNull(req.State) {
state = binlogdatapb.VReplicationWorkflowState_name[int32(req.State)]
}
+ if state == binlogdatapb.VReplicationWorkflowState_Running.String() {
+ // `Workflow Start` sets the new state to Running. However, if stream is still copying tables, we should set
+ // the state as Copying.
+ isCopying, err := isStreamCopying(tm, id)
+ if err != nil {
+ return nil, err
+ }
+ if isCopying {
+ state = binlogdatapb.VReplicationWorkflowState_Copying.String()
+ }
+ }
bindVars = map[string]*querypb.BindVariable{
"st": sqltypes.StringBindVariable(state),
"sc": sqltypes.StringBindVariable(string(source)),
diff --git a/go/vt/vttablet/tabletmanager/rpc_vreplication_test.go b/go/vt/vttablet/tabletmanager/rpc_vreplication_test.go
index 5ef9b4cd8c6..e54de14092d 100644
--- a/go/vt/vttablet/tabletmanager/rpc_vreplication_test.go
+++ b/go/vt/vttablet/tabletmanager/rpc_vreplication_test.go
@@ -495,11 +495,18 @@ func TestUpdateVReplicationWorkflow(t *testing.T) {
),
fmt.Sprintf("%d", vreplID),
)
-
+ getCopyStateQuery := fmt.Sprintf(sqlGetVReplicationCopyStatus, sidecar.GetIdentifier(), int64(vreplID))
+ copyStatusFields := sqltypes.MakeTestFields(
+ "id",
+ "int64",
+ )
+ notCopying := sqltypes.MakeTestResult(copyStatusFields)
+ copying := sqltypes.MakeTestResult(copyStatusFields, "1")
tests := []struct {
- name string
- request *tabletmanagerdatapb.UpdateVReplicationWorkflowRequest
- query string
+ name string
+ request *tabletmanagerdatapb.UpdateVReplicationWorkflowRequest
+ query string
+ isCopying bool
}{
{
name: "update cells",
@@ -579,6 +586,19 @@ func TestUpdateVReplicationWorkflow(t *testing.T) {
query: fmt.Sprintf(`update _vt.vreplication set state = '%s', source = 'keyspace:\"%s\" shard:\"%s\" filter:{rules:{match:\"customer\" filter:\"select * from customer\"} rules:{match:\"corder\" filter:\"select * from corder\"}}', cell = '%s', tablet_types = '%s' where id in (%d)`,
binlogdatapb.VReplicationWorkflowState_Stopped.String(), keyspace, shard, cells[0], tabletTypes[0], vreplID),
},
+ {
+ name: "update to running while copying",
+ request: &tabletmanagerdatapb.UpdateVReplicationWorkflowRequest{
+ Workflow: workflow,
+ State: binlogdatapb.VReplicationWorkflowState_Running,
+ Cells: textutil.SimulatedNullStringSlice,
+ TabletTypes: []topodatapb.TabletType{topodatapb.TabletType(textutil.SimulatedNullInt)},
+ OnDdl: binlogdatapb.OnDDLAction(textutil.SimulatedNullInt),
+ },
+ isCopying: true,
+ query: fmt.Sprintf(`update _vt.vreplication set state = 'Copying', source = 'keyspace:\"%s\" shard:\"%s\" filter:{rules:{match:\"customer\" filter:\"select * from customer\"} rules:{match:\"corder\" filter:\"select * from corder\"}}', cell = '%s', tablet_types = '%s' where id in (%d)`,
+ keyspace, shard, cells[0], tabletTypes[0], vreplID),
+ },
}
for _, tt := range tests {
@@ -597,6 +617,16 @@ func TestUpdateVReplicationWorkflow(t *testing.T) {
// These are the same for each RPC call.
tenv.tmc.tablets[tabletUID].vrdbClient.ExpectRequest(fmt.Sprintf("use %s", sidecar.DefaultName), &sqltypes.Result{}, nil)
tenv.tmc.tablets[tabletUID].vrdbClient.ExpectRequest(selectQuery, selectRes, nil)
+ if tt.request.State == binlogdatapb.VReplicationWorkflowState_Running ||
+ tt.request.State == binlogdatapb.VReplicationWorkflowState(textutil.SimulatedNullInt) {
+ tenv.tmc.tablets[tabletUID].vrdbClient.ExpectRequest(fmt.Sprintf("use %s", sidecar.GetIdentifier()), &sqltypes.Result{}, nil)
+ if tt.isCopying {
+ tenv.tmc.tablets[tabletUID].vrdbClient.ExpectRequest(getCopyStateQuery, copying, nil)
+ } else {
+ tenv.tmc.tablets[tabletUID].vrdbClient.ExpectRequest(getCopyStateQuery, notCopying, nil)
+
+ }
+ }
tenv.tmc.tablets[tabletUID].vrdbClient.ExpectRequest(fmt.Sprintf("use %s", sidecar.DefaultName), &sqltypes.Result{}, nil)
tenv.tmc.tablets[tabletUID].vrdbClient.ExpectRequest(idQuery, idRes, nil)
diff --git a/go/vt/vttablet/tabletserver/connpool/pool.go b/go/vt/vttablet/tabletserver/connpool/pool.go
index 567745e37b5..14fcc6d0f2e 100644
--- a/go/vt/vttablet/tabletserver/connpool/pool.go
+++ b/go/vt/vttablet/tabletserver/connpool/pool.go
@@ -31,15 +31,9 @@ import (
"vitess.io/vitess/go/vt/dbconnpool"
"vitess.io/vitess/go/vt/mysqlctl"
"vitess.io/vitess/go/vt/servenv"
- "vitess.io/vitess/go/vt/vterrors"
"vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv"
-
- vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
)
-// ErrConnPoolClosed is returned when the connection pool is closed.
-var ErrConnPoolClosed = vterrors.New(vtrpcpb.Code_INTERNAL, "internal error: unexpected: conn pool is closed")
-
const (
getWithoutS = "GetWithoutSettings"
getWithS = "GetWithSettings"
diff --git a/go/vt/vttablet/tabletserver/connpool/pool_test.go b/go/vt/vttablet/tabletserver/connpool/pool_test.go
index 28f3e27803a..8cf27cbb327 100644
--- a/go/vt/vttablet/tabletserver/connpool/pool_test.go
+++ b/go/vt/vttablet/tabletserver/connpool/pool_test.go
@@ -67,7 +67,7 @@ func TestConnPoolTimeout(t *testing.T) {
require.NoError(t, err)
defer dbConn.Recycle()
_, err = connPool.Get(context.Background(), nil)
- assert.EqualError(t, err, "resource pool timed out")
+ assert.EqualError(t, err, "connection pool timed out")
}
func TestConnPoolGetEmptyDebugConfig(t *testing.T) {
@@ -126,9 +126,10 @@ func TestConnPoolSetCapacity(t *testing.T) {
defer connPool.Close()
assert.Panics(t, func() {
- connPool.SetCapacity(-10)
+ _ = connPool.SetCapacity(context.Background(), -10)
})
- connPool.SetCapacity(10)
+ err := connPool.SetCapacity(context.Background(), 10)
+ assert.NoError(t, err)
if connPool.Capacity() != 10 {
t.Fatalf("capacity should be 10")
}
diff --git a/go/vt/vttablet/tabletserver/debugenv.go b/go/vt/vttablet/tabletserver/debugenv.go
index c780a28ed90..924d5acbebb 100644
--- a/go/vt/vttablet/tabletserver/debugenv.go
+++ b/go/vt/vttablet/tabletserver/debugenv.go
@@ -17,6 +17,7 @@ limitations under the License.
package tabletserver
import (
+ "context"
"encoding/json"
"fmt"
"html"
@@ -82,6 +83,17 @@ func debugEnvHandler(tsv *TabletServer, w http.ResponseWriter, r *http.Request)
f(ival)
msg = fmt.Sprintf("Setting %v to: %v", varname, value)
}
+ setIntValCtx := func(f func(context.Context, int) error) {
+ ival, err := strconv.Atoi(value)
+ if err == nil {
+ err = f(r.Context(), ival)
+ if err == nil {
+ msg = fmt.Sprintf("Setting %v to: %v", varname, value)
+ return
+ }
+ }
+ msg = fmt.Sprintf("Failed setting value for %v: %v", varname, err)
+ }
setInt64Val := func(f func(int64)) {
ival, err := strconv.ParseInt(value, 10, 64)
if err != nil {
@@ -111,11 +123,11 @@ func debugEnvHandler(tsv *TabletServer, w http.ResponseWriter, r *http.Request)
}
switch varname {
case "PoolSize":
- setIntVal(tsv.SetPoolSize)
+ setIntValCtx(tsv.SetPoolSize)
case "StreamPoolSize":
- setIntVal(tsv.SetStreamPoolSize)
+ setIntValCtx(tsv.SetStreamPoolSize)
case "TxPoolSize":
- setIntVal(tsv.SetTxPoolSize)
+ setIntValCtx(tsv.SetTxPoolSize)
case "MaxResultSize":
setIntVal(tsv.SetMaxResultSize)
case "WarnResultSize":
diff --git a/go/vt/vttablet/tabletserver/health_streamer_test.go b/go/vt/vttablet/tabletserver/health_streamer_test.go
index 14a1899d07b..ad764a970a2 100644
--- a/go/vt/vttablet/tabletserver/health_streamer_test.go
+++ b/go/vt/vttablet/tabletserver/health_streamer_test.go
@@ -249,6 +249,17 @@ func TestReloadSchema(t *testing.T) {
"product|BASE TABLE|1684735966||114688|114688",
"users|BASE TABLE|1684735966||114688|114688",
))
+
+ db.AddQuery(mysql.BaseShowTables,
+ sqltypes.MakeTestResult(
+ sqltypes.MakeTestFields(
+ "TABLE_NAME | TABLE_TYPE | UNIX_TIMESTAMP(t.create_time) | TABLE_COMMENT",
+ "varchar|varchar|int64|varchar",
+ ),
+ "product|BASE TABLE|1684735966|",
+ "users|BASE TABLE|1684735966|",
+ ))
+
db.AddQueryPattern("SELECT COLUMN_NAME as column_name.*", sqltypes.MakeTestResult(
sqltypes.MakeTestFields(
"column_name",
@@ -293,6 +304,16 @@ func TestReloadSchema(t *testing.T) {
"users|BASE TABLE|1684735967||114688|114688",
))
+ db.AddQuery(mysql.BaseShowTables,
+ sqltypes.MakeTestResult(
+ sqltypes.MakeTestFields(
+ "TABLE_NAME | TABLE_TYPE | UNIX_TIMESTAMP(t.create_time) | TABLE_COMMENT",
+ "varchar|varchar|int64|varchar",
+ ),
+ "product|BASE TABLE|1684735967|",
+ "users|BASE TABLE|1684735967|",
+ ))
+
var wg sync.WaitGroup
wg.Add(1)
go func() {
@@ -359,6 +380,13 @@ func TestReloadView(t *testing.T) {
"varchar|varchar|int64|varchar|int64|int64",
),
))
+ db.AddQuery(mysql.BaseShowTables,
+ sqltypes.MakeTestResult(
+ sqltypes.MakeTestFields(
+ "TABLE_NAME | TABLE_TYPE | UNIX_TIMESTAMP(t.create_time) | TABLE_COMMENT",
+ "varchar|varchar|int64|varchar",
+ ),
+ ))
db.AddQueryPattern("SELECT COLUMN_NAME as column_name.*", sqltypes.MakeTestResult(
sqltypes.MakeTestFields(
"column_name",
diff --git a/go/vt/vttablet/tabletserver/query_engine_test.go b/go/vt/vttablet/tabletserver/query_engine_test.go
index 44b58e3a336..eea3b886fd5 100644
--- a/go/vt/vttablet/tabletserver/query_engine_test.go
+++ b/go/vt/vttablet/tabletserver/query_engine_test.go
@@ -114,15 +114,26 @@ func TestGetPlanPanicDuetoEmptyQuery(t *testing.T) {
}
func addSchemaEngineQueries(db *fakesqldb.DB) {
- db.AddQueryPattern(baseShowTablesPattern, &sqltypes.Result{
- Fields: mysql.BaseShowTablesFields,
+ db.AddQueryPattern(baseShowTablesWithSizesPattern, &sqltypes.Result{
+ Fields: mysql.BaseShowTablesWithSizesFields,
Rows: [][]sqltypes.Value{
- mysql.BaseShowTablesRow("test_table_01", false, ""),
- mysql.BaseShowTablesRow("test_table_02", false, ""),
- mysql.BaseShowTablesRow("test_table_03", false, ""),
- mysql.BaseShowTablesRow("seq", false, "vitess_sequence"),
- mysql.BaseShowTablesRow("msg", false, "vitess_message,vt_ack_wait=30,vt_purge_after=120,vt_batch_size=1,vt_cache_size=10,vt_poller_interval=30"),
+ mysql.BaseShowTablesWithSizesRow("test_table_01", false, ""),
+ mysql.BaseShowTablesWithSizesRow("test_table_02", false, ""),
+ mysql.BaseShowTablesWithSizesRow("test_table_03", false, ""),
+ mysql.BaseShowTablesWithSizesRow("seq", false, "vitess_sequence"),
+ mysql.BaseShowTablesWithSizesRow("msg", false, "vitess_message,vt_ack_wait=30,vt_purge_after=120,vt_batch_size=1,vt_cache_size=10,vt_poller_interval=30"),
}})
+ db.AddQuery(mysql.BaseShowTables,
+ &sqltypes.Result{
+ Fields: mysql.BaseShowTablesFields,
+ Rows: [][]sqltypes.Value{
+ mysql.BaseShowTablesRow("test_table_01", false, ""),
+ mysql.BaseShowTablesRow("test_table_02", false, ""),
+ mysql.BaseShowTablesRow("test_table_03", false, ""),
+ mysql.BaseShowTablesRow("seq", false, "vitess_sequence"),
+ mysql.BaseShowTablesRow("msg", false, "vitess_message,vt_ack_wait=30,vt_purge_after=120,vt_batch_size=1,vt_cache_size=10,vt_poller_interval=30"),
+ },
+ })
db.AddQuery("show status like 'Innodb_rows_read'", sqltypes.MakeTestResult(sqltypes.MakeTestFields(
"Variable_name|Value",
"varchar|int64"),
diff --git a/go/vt/vttablet/tabletserver/query_executor.go b/go/vt/vttablet/tabletserver/query_executor.go
index 15a9cb0e25d..f072356d097 100644
--- a/go/vt/vttablet/tabletserver/query_executor.go
+++ b/go/vt/vttablet/tabletserver/query_executor.go
@@ -778,15 +778,7 @@ func (qre *QueryExecutor) getConn() (*connpool.PooledConn, error) {
defer func(start time.Time) {
qre.logStats.WaitingForConnection += time.Since(start)
}(time.Now())
- conn, err := qre.tsv.qe.conns.Get(ctx, qre.setting)
-
- switch err {
- case nil:
- return conn, nil
- case connpool.ErrConnPoolClosed:
- return nil, err
- }
- return nil, err
+ return qre.tsv.qe.conns.Get(ctx, qre.setting)
}
func (qre *QueryExecutor) getStreamConn() (*connpool.PooledConn, error) {
@@ -796,15 +788,7 @@ func (qre *QueryExecutor) getStreamConn() (*connpool.PooledConn, error) {
defer func(start time.Time) {
qre.logStats.WaitingForConnection += time.Since(start)
}(time.Now())
- conn, err := qre.tsv.qe.streamConns.Get(ctx, qre.setting)
-
- switch err {
- case nil:
- return conn, nil
- case connpool.ErrConnPoolClosed:
- return nil, err
- }
- return nil, err
+ return qre.tsv.qe.streamConns.Get(ctx, qre.setting)
}
// txFetch fetches from a TxConnection.
diff --git a/go/vt/vttablet/tabletserver/query_executor_test.go b/go/vt/vttablet/tabletserver/query_executor_test.go
index 3466a55133d..84dd00b8eb1 100644
--- a/go/vt/vttablet/tabletserver/query_executor_test.go
+++ b/go/vt/vttablet/tabletserver/query_executor_test.go
@@ -1589,18 +1589,27 @@ func setUpQueryExecutorTest(t *testing.T) *fakesqldb.DB {
return db
}
-const baseShowTablesPattern = `SELECT t\.table_name.*`
+const baseShowTablesWithSizesPattern = `SELECT t\.table_name.*SUM\(i\.file_size\).*`
func initQueryExecutorTestDB(db *fakesqldb.DB) {
addQueryExecutorSupportedQueries(db)
- db.AddQueryPattern(baseShowTablesPattern, &sqltypes.Result{
- Fields: mysql.BaseShowTablesFields,
+ db.AddQueryPattern(baseShowTablesWithSizesPattern, &sqltypes.Result{
+ Fields: mysql.BaseShowTablesWithSizesFields,
Rows: [][]sqltypes.Value{
- mysql.BaseShowTablesRow("test_table", false, ""),
- mysql.BaseShowTablesRow("seq", false, "vitess_sequence"),
- mysql.BaseShowTablesRow("msg", false, "vitess_message,vt_ack_wait=30,vt_purge_after=120,vt_batch_size=1,vt_cache_size=10,vt_poller_interval=30"),
+ mysql.BaseShowTablesWithSizesRow("test_table", false, ""),
+ mysql.BaseShowTablesWithSizesRow("seq", false, "vitess_sequence"),
+ mysql.BaseShowTablesWithSizesRow("msg", false, "vitess_message,vt_ack_wait=30,vt_purge_after=120,vt_batch_size=1,vt_cache_size=10,vt_poller_interval=30"),
},
})
+ db.AddQuery(mysql.BaseShowTables,
+ &sqltypes.Result{
+ Fields: mysql.BaseShowTablesFields,
+ Rows: [][]sqltypes.Value{
+ mysql.BaseShowTablesRow("test_table", false, ""),
+ mysql.BaseShowTablesRow("seq", false, "vitess_sequence"),
+ mysql.BaseShowTablesRow("msg", false, "vitess_message,vt_ack_wait=30,vt_purge_after=120,vt_batch_size=1,vt_cache_size=10,vt_poller_interval=30"),
+ },
+ })
db.AddQuery("show status like 'Innodb_rows_read'", sqltypes.MakeTestResult(sqltypes.MakeTestFields(
"Variable_name|Value",
"varchar|int64"),
diff --git a/go/vt/vttablet/tabletserver/schema/engine.go b/go/vt/vttablet/tabletserver/schema/engine.go
index 1995bd5472d..3e9b5eabd3e 100644
--- a/go/vt/vttablet/tabletserver/schema/engine.go
+++ b/go/vt/vttablet/tabletserver/schema/engine.go
@@ -257,7 +257,7 @@ func (se *Engine) Open() error {
}
se.notifiers = make(map[string]notifier)
- if err := se.reload(ctx, true); err != nil {
+ if err := se.reload(ctx, false); err != nil {
return err
}
if !se.SkipMetaCheck {
diff --git a/go/vt/vttablet/tabletserver/schema/engine_test.go b/go/vt/vttablet/tabletserver/schema/engine_test.go
index b9492cbd185..018c07c443a 100644
--- a/go/vt/vttablet/tabletserver/schema/engine_test.go
+++ b/go/vt/vttablet/tabletserver/schema/engine_test.go
@@ -49,7 +49,7 @@ import (
"vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv"
)
-const baseShowTablesPattern = `SELECT t\.table_name.*`
+const baseShowTablesWithSizesPattern = `SELECT t\.table_name.*SUM\(i\.file_size\).*`
var mustMatch = utils.MustMatchFn(".Mutex")
@@ -57,21 +57,23 @@ func TestOpenAndReload(t *testing.T) {
db := fakesqldb.New(t)
defer db.Close()
schematest.AddDefaultQueries(db)
- db.AddQueryPattern(baseShowTablesPattern,
- &sqltypes.Result{
- Fields: mysql.BaseShowTablesFields,
- RowsAffected: 0,
- InsertID: 0,
- Rows: [][]sqltypes.Value{
- mysql.BaseShowTablesRow("test_table_01", false, ""),
- mysql.BaseShowTablesRow("test_table_02", false, ""),
- mysql.BaseShowTablesRow("test_table_03", false, ""),
- mysql.BaseShowTablesRow("seq", false, "vitess_sequence"),
- mysql.BaseShowTablesRow("msg", false, "vitess_message,vt_ack_wait=30,vt_purge_after=120,vt_batch_size=1,vt_cache_size=10,vt_poller_interval=30"),
- },
- SessionStateChanges: "",
- StatusFlags: 0,
- })
+
+ db.RejectQueryPattern(baseShowTablesWithSizesPattern, "Opening schema engine should query tables without size information")
+
+ db.AddQuery(mysql.BaseShowTables, &sqltypes.Result{
+ Fields: mysql.BaseShowTablesFields,
+ RowsAffected: 0,
+ InsertID: 0,
+ Rows: [][]sqltypes.Value{
+ mysql.BaseShowTablesRow("test_table_01", false, ""),
+ mysql.BaseShowTablesRow("test_table_02", false, ""),
+ mysql.BaseShowTablesRow("test_table_03", false, ""),
+ mysql.BaseShowTablesRow("seq", false, "vitess_sequence"),
+ mysql.BaseShowTablesRow("msg", false, "vitess_message,vt_ack_wait=30,vt_purge_after=120,vt_batch_size=1,vt_cache_size=10,vt_poller_interval=30"),
+ },
+ SessionStateChanges: "",
+ StatusFlags: 0,
+ })
// advance to one second after the default 1427325875.
db.AddQuery("select unix_timestamp()", sqltypes.MakeTestResult(sqltypes.MakeTestFields(
@@ -87,8 +89,8 @@ func TestOpenAndReload(t *testing.T) {
want := initialSchema()
mustMatch(t, want, se.GetSchema())
- assert.Equal(t, int64(100), se.tableFileSizeGauge.Counts()["msg"])
- assert.Equal(t, int64(150), se.tableAllocatedSizeGauge.Counts()["msg"])
+ assert.Equal(t, int64(0), se.tableFileSizeGauge.Counts()["msg"])
+ assert.Equal(t, int64(0), se.tableAllocatedSizeGauge.Counts()["msg"])
// Advance time some more.
db.AddQuery("select unix_timestamp()", sqltypes.MakeTestResult(sqltypes.MakeTestFields(
@@ -101,11 +103,11 @@ func TestOpenAndReload(t *testing.T) {
// Modify test_table_03
// Add test_table_04
// Drop msg
- db.AddQueryPattern(baseShowTablesPattern, &sqltypes.Result{
- Fields: mysql.BaseShowTablesFields,
+ db.AddQueryPattern(baseShowTablesWithSizesPattern, &sqltypes.Result{
+ Fields: mysql.BaseShowTablesWithSizesFields,
Rows: [][]sqltypes.Value{
- mysql.BaseShowTablesRow("test_table_01", false, ""),
- mysql.BaseShowTablesRow("test_table_02", false, ""),
+ mysql.BaseShowTablesWithSizesRow("test_table_01", false, ""),
+ mysql.BaseShowTablesWithSizesRow("test_table_02", false, ""),
{
sqltypes.MakeTrusted(sqltypes.VarChar, []byte("test_table_03")), // table_name
sqltypes.MakeTrusted(sqltypes.VarChar, []byte("BASE TABLE")), // table_type
@@ -115,10 +117,13 @@ func TestOpenAndReload(t *testing.T) {
sqltypes.MakeTrusted(sqltypes.Int64, []byte("256")), // allocated_size
},
// test_table_04 will in spite of older timestamp because it doesn't exist yet.
- mysql.BaseShowTablesRow("test_table_04", false, ""),
- mysql.BaseShowTablesRow("seq", false, "vitess_sequence"),
+ mysql.BaseShowTablesWithSizesRow("test_table_04", false, ""),
+ mysql.BaseShowTablesWithSizesRow("seq", false, "vitess_sequence"),
},
})
+
+ db.AddRejectedQuery(mysql.BaseShowTables, fmt.Errorf("Reloading schema engine should query tables with size information"))
+
db.MockQueriesForTable("test_table_03", &sqltypes.Result{
Fields: []*querypb.Field{{
Name: "pk1",
@@ -174,6 +179,15 @@ func TestOpenAndReload(t *testing.T) {
assert.EqualValues(t, secondReadRowsValue, se.innoDbReadRowsCounter.Get())
+ want["seq"].FileSize = 100
+ want["seq"].AllocatedSize = 150
+
+ want["test_table_01"].FileSize = 100
+ want["test_table_01"].AllocatedSize = 150
+
+ want["test_table_02"].FileSize = 100
+ want["test_table_02"].AllocatedSize = 150
+
want["test_table_03"] = &Table{
Name: sqlparser.NewIdentifierCS("test_table_03"),
Fields: []*querypb.Field{{
@@ -222,7 +236,17 @@ func TestOpenAndReload(t *testing.T) {
require.NoError(t, err)
assert.Equal(t, want, se.GetSchema())
- db.AddQueryPattern(baseShowTablesPattern, &sqltypes.Result{
+ db.AddQueryPattern(baseShowTablesWithSizesPattern, &sqltypes.Result{
+ Fields: mysql.BaseShowTablesWithSizesFields,
+ Rows: [][]sqltypes.Value{
+ mysql.BaseShowTablesWithSizesRow("test_table_01", false, ""),
+ mysql.BaseShowTablesWithSizesRow("test_table_02", false, ""),
+ mysql.BaseShowTablesWithSizesRow("test_table_04", false, ""),
+ mysql.BaseShowTablesWithSizesRow("seq", false, "vitess_sequence"),
+ },
+ })
+
+ db.AddQuery(mysql.BaseShowTables, &sqltypes.Result{
Fields: mysql.BaseShowTablesFields,
Rows: [][]sqltypes.Value{
mysql.BaseShowTablesRow("test_table_01", false, ""),
@@ -231,6 +255,7 @@ func TestOpenAndReload(t *testing.T) {
mysql.BaseShowTablesRow("seq", false, "vitess_sequence"),
},
})
+
db.AddQuery(mysql.BaseShowPrimary, &sqltypes.Result{
Fields: mysql.ShowPrimaryFields,
Rows: [][]sqltypes.Value{
@@ -254,21 +279,23 @@ func TestReloadWithSwappedTables(t *testing.T) {
db := fakesqldb.New(t)
defer db.Close()
schematest.AddDefaultQueries(db)
- db.AddQueryPattern(baseShowTablesPattern,
- &sqltypes.Result{
- Fields: mysql.BaseShowTablesFields,
- RowsAffected: 0,
- InsertID: 0,
- Rows: [][]sqltypes.Value{
- mysql.BaseShowTablesRow("test_table_01", false, ""),
- mysql.BaseShowTablesRow("test_table_02", false, ""),
- mysql.BaseShowTablesRow("test_table_03", false, ""),
- mysql.BaseShowTablesRow("seq", false, "vitess_sequence"),
- mysql.BaseShowTablesRow("msg", false, "vitess_message,vt_ack_wait=30,vt_purge_after=120,vt_batch_size=1,vt_cache_size=10,vt_poller_interval=30"),
- },
- SessionStateChanges: "",
- StatusFlags: 0,
- })
+
+ db.RejectQueryPattern(baseShowTablesWithSizesPattern, "Opening schema engine should query tables without size information")
+
+ db.AddQuery(mysql.BaseShowTables, &sqltypes.Result{
+ Fields: mysql.BaseShowTablesFields,
+ RowsAffected: 0,
+ InsertID: 0,
+ Rows: [][]sqltypes.Value{
+ mysql.BaseShowTablesWithSizesRow("test_table_01", false, ""),
+ mysql.BaseShowTablesWithSizesRow("test_table_02", false, ""),
+ mysql.BaseShowTablesWithSizesRow("test_table_03", false, ""),
+ mysql.BaseShowTablesWithSizesRow("seq", false, "vitess_sequence"),
+ mysql.BaseShowTablesWithSizesRow("msg", false, "vitess_message,vt_ack_wait=30,vt_purge_after=120,vt_batch_size=1,vt_cache_size=10,vt_poller_interval=30"),
+ },
+ SessionStateChanges: "",
+ StatusFlags: 0,
+ })
firstReadRowsValue := 12
AddFakeInnoDBReadRowsResult(db, firstReadRowsValue)
@@ -285,12 +312,12 @@ func TestReloadWithSwappedTables(t *testing.T) {
"int64"),
"1427325876",
))
- db.AddQueryPattern(baseShowTablesPattern, &sqltypes.Result{
- Fields: mysql.BaseShowTablesFields,
+ db.AddQueryPattern(baseShowTablesWithSizesPattern, &sqltypes.Result{
+ Fields: mysql.BaseShowTablesWithSizesFields,
Rows: [][]sqltypes.Value{
- mysql.BaseShowTablesRow("test_table_01", false, ""),
- mysql.BaseShowTablesRow("test_table_02", false, ""),
- mysql.BaseShowTablesRow("test_table_03", false, ""),
+ mysql.BaseShowTablesWithSizesRow("test_table_01", false, ""),
+ mysql.BaseShowTablesWithSizesRow("test_table_02", false, ""),
+ mysql.BaseShowTablesWithSizesRow("test_table_03", false, ""),
{
sqltypes.MakeTrusted(sqltypes.VarChar, []byte("test_table_04")),
sqltypes.MakeTrusted(sqltypes.VarChar, []byte("BASE TABLE")),
@@ -299,8 +326,8 @@ func TestReloadWithSwappedTables(t *testing.T) {
sqltypes.MakeTrusted(sqltypes.Int64, []byte("128")), // file_size
sqltypes.MakeTrusted(sqltypes.Int64, []byte("256")), // allocated_size
},
- mysql.BaseShowTablesRow("seq", false, "vitess_sequence"),
- mysql.BaseShowTablesRow("msg", false, "vitess_message,vt_ack_wait=30,vt_purge_after=120,vt_batch_size=1,vt_cache_size=10,vt_poller_interval=30"),
+ mysql.BaseShowTablesWithSizesRow("seq", false, "vitess_sequence"),
+ mysql.BaseShowTablesWithSizesRow("msg", false, "vitess_message,vt_ack_wait=30,vt_purge_after=120,vt_batch_size=1,vt_cache_size=10,vt_poller_interval=30"),
},
})
db.MockQueriesForTable("test_table_04", &sqltypes.Result{
@@ -322,6 +349,22 @@ func TestReloadWithSwappedTables(t *testing.T) {
})
err := se.Reload(context.Background())
require.NoError(t, err)
+
+ want["msg"].FileSize = 100
+ want["msg"].AllocatedSize = 150
+
+ want["seq"].FileSize = 100
+ want["seq"].AllocatedSize = 150
+
+ want["test_table_01"].FileSize = 100
+ want["test_table_01"].AllocatedSize = 150
+
+ want["test_table_02"].FileSize = 100
+ want["test_table_02"].AllocatedSize = 150
+
+ want["test_table_03"].FileSize = 100
+ want["test_table_03"].AllocatedSize = 150
+
want["test_table_04"] = &Table{
Name: sqlparser.NewIdentifierCS("test_table_04"),
Fields: []*querypb.Field{{
@@ -343,11 +386,11 @@ func TestReloadWithSwappedTables(t *testing.T) {
"int64"),
"1427325877",
))
- db.AddQueryPattern(baseShowTablesPattern, &sqltypes.Result{
- Fields: mysql.BaseShowTablesFields,
+ db.AddQueryPattern(baseShowTablesWithSizesPattern, &sqltypes.Result{
+ Fields: mysql.BaseShowTablesWithSizesFields,
Rows: [][]sqltypes.Value{
- mysql.BaseShowTablesRow("test_table_01", false, ""),
- mysql.BaseShowTablesRow("test_table_02", false, ""),
+ mysql.BaseShowTablesWithSizesRow("test_table_01", false, ""),
+ mysql.BaseShowTablesWithSizesRow("test_table_02", false, ""),
{
sqltypes.MakeTrusted(sqltypes.VarChar, []byte("test_table_03")),
sqltypes.MakeTrusted(sqltypes.VarChar, []byte("BASE TABLE")),
@@ -356,9 +399,9 @@ func TestReloadWithSwappedTables(t *testing.T) {
sqltypes.MakeTrusted(sqltypes.Int64, []byte("128")), // file_size
sqltypes.MakeTrusted(sqltypes.Int64, []byte("256")), // allocated_size
},
- mysql.BaseShowTablesRow("test_table_04", false, ""),
- mysql.BaseShowTablesRow("seq", false, "vitess_sequence"),
- mysql.BaseShowTablesRow("msg", false, "vitess_message,vt_ack_wait=30,vt_purge_after=120,vt_batch_size=1,vt_cache_size=10,vt_poller_interval=30"),
+ mysql.BaseShowTablesWithSizesRow("test_table_04", false, ""),
+ mysql.BaseShowTablesWithSizesRow("seq", false, "vitess_sequence"),
+ mysql.BaseShowTablesWithSizesRow("msg", false, "vitess_message,vt_ack_wait=30,vt_purge_after=120,vt_batch_size=1,vt_cache_size=10,vt_poller_interval=30"),
},
})
db.MockQueriesForTable("test_table_03", &sqltypes.Result{
@@ -421,7 +464,7 @@ func TestOpenFailedDueToExecErr(t *testing.T) {
defer db.Close()
schematest.AddDefaultQueries(db)
want := "injected error"
- db.RejectQueryPattern(baseShowTablesPattern, want)
+ db.AddRejectedQuery(mysql.BaseShowTables, fmt.Errorf(want))
se := newEngine(1*time.Second, 1*time.Second, 0, db)
err := se.Open()
if err == nil || !strings.Contains(err.Error(), want) {
@@ -436,11 +479,11 @@ func TestOpenFailedDueToLoadTableErr(t *testing.T) {
db := fakesqldb.New(t)
defer db.Close()
schematest.AddDefaultQueries(db)
- db.AddQueryPattern(baseShowTablesPattern, &sqltypes.Result{
+ db.AddQuery(mysql.BaseShowTables, &sqltypes.Result{
Fields: mysql.BaseShowTablesFields,
Rows: [][]sqltypes.Value{
- mysql.BaseShowTablesRow("test_table", false, ""),
- mysql.BaseShowTablesRow("test_view", true, "VIEW"),
+ mysql.BaseShowTablesWithSizesRow("test_table", false, ""),
+ mysql.BaseShowTablesWithSizesRow("test_view", true, "VIEW"),
},
})
// this will cause NewTable error, as it expects zero rows.
@@ -471,11 +514,11 @@ func TestOpenNoErrorDueToInvalidViews(t *testing.T) {
db := fakesqldb.New(t)
defer db.Close()
schematest.AddDefaultQueries(db)
- db.AddQueryPattern(baseShowTablesPattern, &sqltypes.Result{
+ db.AddQuery(mysql.BaseShowTables, &sqltypes.Result{
Fields: mysql.BaseShowTablesFields,
Rows: [][]sqltypes.Value{
- mysql.BaseShowTablesRow("foo_view", true, "VIEW"),
- mysql.BaseShowTablesRow("bar_view", true, "VIEW"),
+ mysql.BaseShowTablesWithSizesRow("foo_view", true, "VIEW"),
+ mysql.BaseShowTablesWithSizesRow("bar_view", true, "VIEW"),
},
})
@@ -529,17 +572,17 @@ func TestSchemaEngineCloseTickRace(t *testing.T) {
db := fakesqldb.New(t)
defer db.Close()
schematest.AddDefaultQueries(db)
- db.AddQueryPattern(baseShowTablesPattern,
+ db.AddQuery(mysql.BaseShowTables,
&sqltypes.Result{
Fields: mysql.BaseShowTablesFields,
RowsAffected: 0,
InsertID: 0,
Rows: [][]sqltypes.Value{
- mysql.BaseShowTablesRow("test_table_01", false, ""),
- mysql.BaseShowTablesRow("test_table_02", false, ""),
- mysql.BaseShowTablesRow("test_table_03", false, ""),
- mysql.BaseShowTablesRow("seq", false, "vitess_sequence"),
- mysql.BaseShowTablesRow("msg", false, "vitess_message,vt_ack_wait=30,vt_purge_after=120,vt_batch_size=1,vt_cache_size=10,vt_poller_interval=30"),
+ mysql.BaseShowTablesWithSizesRow("test_table_01", false, ""),
+ mysql.BaseShowTablesWithSizesRow("test_table_02", false, ""),
+ mysql.BaseShowTablesWithSizesRow("test_table_03", false, ""),
+ mysql.BaseShowTablesWithSizesRow("seq", false, "vitess_sequence"),
+ mysql.BaseShowTablesWithSizesRow("msg", false, "vitess_message,vt_ack_wait=30,vt_purge_after=120,vt_batch_size=1,vt_cache_size=10,vt_poller_interval=30"),
},
SessionStateChanges: "",
StatusFlags: 0,
@@ -604,8 +647,8 @@ func initialSchema() map[string]*Table {
}},
PKColumns: []int{0},
CreateTime: 1427325875,
- FileSize: 0x64,
- AllocatedSize: 0x96,
+ FileSize: 0,
+ AllocatedSize: 0,
},
"test_table_02": {
Name: sqlparser.NewIdentifierCS("test_table_02"),
@@ -615,8 +658,8 @@ func initialSchema() map[string]*Table {
}},
PKColumns: []int{0},
CreateTime: 1427325875,
- FileSize: 0x64,
- AllocatedSize: 0x96,
+ FileSize: 0,
+ AllocatedSize: 0,
},
"test_table_03": {
Name: sqlparser.NewIdentifierCS("test_table_03"),
@@ -626,8 +669,8 @@ func initialSchema() map[string]*Table {
}},
PKColumns: []int{0},
CreateTime: 1427325875,
- FileSize: 0x64,
- AllocatedSize: 0x96,
+ FileSize: 0,
+ AllocatedSize: 0,
},
"seq": {
Name: sqlparser.NewIdentifierCS("seq"),
@@ -647,8 +690,8 @@ func initialSchema() map[string]*Table {
}},
PKColumns: []int{0},
CreateTime: 1427325875,
- FileSize: 0x64,
- AllocatedSize: 0x96,
+ FileSize: 0,
+ AllocatedSize: 0,
SequenceInfo: &SequenceInfo{},
},
"msg": {
@@ -675,8 +718,8 @@ func initialSchema() map[string]*Table {
}},
PKColumns: []int{0},
CreateTime: 1427325875,
- FileSize: 0x64,
- AllocatedSize: 0x96,
+ FileSize: 0,
+ AllocatedSize: 0,
MessageInfo: &MessageInfo{
Fields: []*querypb.Field{{
Name: "id",
diff --git a/go/vt/vttablet/tabletserver/schema/main_test.go b/go/vt/vttablet/tabletserver/schema/main_test.go
index 0948c1313fc..7eaca5f18e5 100644
--- a/go/vt/vttablet/tabletserver/schema/main_test.go
+++ b/go/vt/vttablet/tabletserver/schema/main_test.go
@@ -34,7 +34,8 @@ func getTestSchemaEngine(t *testing.T, schemaMaxAgeSeconds int64) (*Engine, *fak
"int64"),
"1427325876",
))
- db.AddQueryPattern(baseShowTablesPattern, &sqltypes.Result{})
+ db.AddQueryPattern(baseShowTablesWithSizesPattern, &sqltypes.Result{})
+ db.AddQuery(mysql.BaseShowTables, &sqltypes.Result{})
db.AddQuery(mysql.BaseShowPrimary, &sqltypes.Result{})
AddFakeInnoDBReadRowsResult(db, 1)
se := newEngine(10*time.Second, 10*time.Second, schemaMaxAgeSeconds, db)
diff --git a/go/vt/vttablet/tabletserver/tabletenv/config.go b/go/vt/vttablet/tabletserver/tabletenv/config.go
index 233f8951227..d01e2cc5055 100644
--- a/go/vt/vttablet/tabletserver/tabletenv/config.go
+++ b/go/vt/vttablet/tabletserver/tabletenv/config.go
@@ -414,8 +414,6 @@ func (cfg *TabletConfig) UnmarshalJSON(data []byte) (err error) {
if err != nil {
return err
}
- } else {
- cfg.SchemaReloadInterval = 0
}
if tmp.SignalSchemaChangeReloadInterval != "" {
@@ -432,8 +430,6 @@ func (cfg *TabletConfig) UnmarshalJSON(data []byte) (err error) {
if err != nil {
return err
}
- } else {
- cfg.SchemaChangeReloadTimeout = 0
}
return nil
diff --git a/go/vt/vttablet/tabletserver/tabletenv/config_test.go b/go/vt/vttablet/tabletserver/tabletenv/config_test.go
index 3121c14bbcf..6311384162d 100644
--- a/go/vt/vttablet/tabletserver/tabletenv/config_test.go
+++ b/go/vt/vttablet/tabletserver/tabletenv/config_test.go
@@ -56,6 +56,8 @@ func TestConfigParse(t *testing.T) {
MaxInnoDBTrxHistLen: 1000,
MaxMySQLReplLagSecs: 400,
},
+ SchemaChangeReloadTimeout: 30 * time.Second,
+ SchemaReloadInterval: 30 * time.Minute,
}
gotBytes, err := yaml2.Marshal(&cfg)
@@ -91,6 +93,8 @@ replicationTracker: {}
rowStreamer:
maxInnoDBTrxHistLen: 1000
maxMySQLReplLagSecs: 400
+schemaChangeReloadTimeout: 30s
+schemaReloadIntervalSeconds: 30m0s
txPool: {}
`
assert.Equal(t, wantBytes, string(gotBytes))
diff --git a/go/vt/vttablet/tabletserver/tabletserver.go b/go/vt/vttablet/tabletserver/tabletserver.go
index 6ecc46c68ab..6ecb5eff44d 100644
--- a/go/vt/vttablet/tabletserver/tabletserver.go
+++ b/go/vt/vttablet/tabletserver/tabletserver.go
@@ -2001,11 +2001,11 @@ func (tsv *TabletServer) EnableHistorian(enabled bool) {
}
// SetPoolSize changes the pool size to the specified value.
-func (tsv *TabletServer) SetPoolSize(val int) {
+func (tsv *TabletServer) SetPoolSize(ctx context.Context, val int) error {
if val <= 0 {
- return
+ return nil
}
- tsv.qe.conns.SetCapacity(int64(val))
+ return tsv.qe.conns.SetCapacity(ctx, int64(val))
}
// PoolSize returns the pool size.
@@ -2014,8 +2014,8 @@ func (tsv *TabletServer) PoolSize() int {
}
// SetStreamPoolSize changes the pool size to the specified value.
-func (tsv *TabletServer) SetStreamPoolSize(val int) {
- tsv.qe.streamConns.SetCapacity(int64(val))
+func (tsv *TabletServer) SetStreamPoolSize(ctx context.Context, val int) error {
+ return tsv.qe.streamConns.SetCapacity(ctx, int64(val))
}
// SetStreamConsolidationBlocking sets whether the stream consolidator should wait for slow clients
@@ -2029,8 +2029,8 @@ func (tsv *TabletServer) StreamPoolSize() int {
}
// SetTxPoolSize changes the tx pool size to the specified value.
-func (tsv *TabletServer) SetTxPoolSize(val int) {
- tsv.te.txPool.scp.conns.SetCapacity(int64(val))
+func (tsv *TabletServer) SetTxPoolSize(ctx context.Context, val int) error {
+ return tsv.te.txPool.scp.conns.SetCapacity(ctx, int64(val))
}
// TxPoolSize returns the tx pool size.
diff --git a/go/vt/vttablet/tabletserver/tabletserver_test.go b/go/vt/vttablet/tabletserver/tabletserver_test.go
index 97777c0245f..11bfd531597 100644
--- a/go/vt/vttablet/tabletserver/tabletserver_test.go
+++ b/go/vt/vttablet/tabletserver/tabletserver_test.go
@@ -2100,7 +2100,9 @@ func TestConfigChanges(t *testing.T) {
newSize := 10
newDuration := time.Duration(10 * time.Millisecond)
- tsv.SetPoolSize(newSize)
+ err := tsv.SetPoolSize(context.Background(), newSize)
+ require.NoError(t, err)
+
if val := tsv.PoolSize(); val != newSize {
t.Errorf("PoolSize: %d, want %d", val, newSize)
}
@@ -2108,7 +2110,9 @@ func TestConfigChanges(t *testing.T) {
t.Errorf("tsv.qe.connPool.Capacity: %d, want %d", val, newSize)
}
- tsv.SetStreamPoolSize(newSize)
+ err = tsv.SetStreamPoolSize(context.Background(), newSize)
+ require.NoError(t, err)
+
if val := tsv.StreamPoolSize(); val != newSize {
t.Errorf("StreamPoolSize: %d, want %d", val, newSize)
}
@@ -2116,7 +2120,9 @@ func TestConfigChanges(t *testing.T) {
t.Errorf("tsv.qe.streamConnPool.Capacity: %d, want %d", val, newSize)
}
- tsv.SetTxPoolSize(newSize)
+ err = tsv.SetTxPoolSize(context.Background(), newSize)
+ require.NoError(t, err)
+
if val := tsv.TxPoolSize(); val != newSize {
t.Errorf("TxPoolSize: %d, want %d", val, newSize)
}
@@ -2579,13 +2585,21 @@ func setupTabletServerTestCustom(t testing.TB, ctx context.Context, cfg *tablete
func setupFakeDB(t testing.TB) *fakesqldb.DB {
db := fakesqldb.New(t)
addTabletServerSupportedQueries(db)
- db.AddQueryPattern(baseShowTablesPattern, &sqltypes.Result{
- Fields: mysql.BaseShowTablesFields,
+ db.AddQueryPattern(baseShowTablesWithSizesPattern, &sqltypes.Result{
+ Fields: mysql.BaseShowTablesWithSizesFields,
Rows: [][]sqltypes.Value{
- mysql.BaseShowTablesRow("test_table", false, ""),
- mysql.BaseShowTablesRow("msg", false, "vitess_message,vt_ack_wait=30,vt_purge_after=120,vt_batch_size=1,vt_cache_size=10,vt_poller_interval=30"),
+ mysql.BaseShowTablesWithSizesRow("test_table", false, ""),
+ mysql.BaseShowTablesWithSizesRow("msg", false, "vitess_message,vt_ack_wait=30,vt_purge_after=120,vt_batch_size=1,vt_cache_size=10,vt_poller_interval=30"),
},
})
+ db.AddQuery(mysql.BaseShowTables,
+ &sqltypes.Result{
+ Fields: mysql.BaseShowTablesFields,
+ Rows: [][]sqltypes.Value{
+ mysql.BaseShowTablesRow("test_table", false, ""),
+ mysql.BaseShowTablesRow("msg", false, "vitess_message,vt_ack_wait=30,vt_purge_after=120,vt_batch_size=1,vt_cache_size=10,vt_poller_interval=30"),
+ },
+ })
db.AddQuery("show status like 'Innodb_rows_read'", sqltypes.MakeTestResult(sqltypes.MakeTestFields(
"Variable_name|Value",
"varchar|int64"),
diff --git a/go/vt/vttablet/tabletserver/tx_pool_test.go b/go/vt/vttablet/tabletserver/tx_pool_test.go
index 37500ada79a..aa2d5b69e89 100644
--- a/go/vt/vttablet/tabletserver/tx_pool_test.go
+++ b/go/vt/vttablet/tabletserver/tx_pool_test.go
@@ -216,7 +216,8 @@ func primeTxPoolWithConnection(t *testing.T, ctx context.Context) (*fakesqldb.DB
db := fakesqldb.New(t)
txPool, _ := newTxPool()
// Set the capacity to 1 to ensure that the db connection is reused.
- txPool.scp.conns.SetCapacity(1)
+ err := txPool.scp.conns.SetCapacity(context.Background(), 1)
+ require.NoError(t, err)
params := dbconfigs.New(db.ConnParams())
txPool.Open(params, params, params)
diff --git a/go/vt/vttest/environment.go b/go/vt/vttest/environment.go
index 7f3ea88437a..36cf0a036f8 100644
--- a/go/vt/vttest/environment.go
+++ b/go/vt/vttest/environment.go
@@ -19,8 +19,10 @@ package vttest
import (
"fmt"
"math/rand"
+ "net"
"os"
"path"
+ "strconv"
"strings"
"vitess.io/vitess/go/vt/proto/vttest"
@@ -230,9 +232,26 @@ func tmpdir(dataroot string) (dir string, err error) {
return
}
+// randomPort gets a random port that is available for a TCP connection.
+// After we generate a random port, we try to establish tcp connections on it and the next 5 values.
+// If any of them fail, then we try a different port.
func randomPort() int {
- v := rand.Int31n(20000)
- return int(v + 10000)
+ for {
+ port := int(rand.Int31n(20000) + 10000)
+ portInUse := false
+ for i := 0; i < 6; i++ {
+ ln, err := net.Listen("tcp", net.JoinHostPort("127.0.0.1", strconv.Itoa(port+i)))
+ if err != nil {
+ portInUse = true
+ break
+ }
+ ln.Close()
+ }
+ if portInUse {
+ continue
+ }
+ return port
+ }
}
// NewLocalTestEnv returns an instance of the default test environment used
diff --git a/java/client/pom.xml b/java/client/pom.xml
index 0492dbad174..0b40e76be7b 100644
--- a/java/client/pom.xml
+++ b/java/client/pom.xml
@@ -5,7 +5,7 @@
io.vitess
vitess-parent
- 19.0.4
+ 19.0.5
vitess-client
diff --git a/java/example/pom.xml b/java/example/pom.xml
index ab2061462ec..efb327f363f 100644
--- a/java/example/pom.xml
+++ b/java/example/pom.xml
@@ -5,7 +5,7 @@
io.vitess
vitess-parent
- 19.0.4
+ 19.0.5
vitess-example
diff --git a/java/grpc-client/pom.xml b/java/grpc-client/pom.xml
index 457ae4ca550..5114fd48def 100644
--- a/java/grpc-client/pom.xml
+++ b/java/grpc-client/pom.xml
@@ -5,7 +5,7 @@
io.vitess
vitess-parent
- 19.0.4
+ 19.0.5
vitess-grpc-client
diff --git a/java/jdbc/pom.xml b/java/jdbc/pom.xml
index edb7775d60c..eee476ea4df 100644
--- a/java/jdbc/pom.xml
+++ b/java/jdbc/pom.xml
@@ -5,7 +5,7 @@
io.vitess
vitess-parent
- 19.0.4
+ 19.0.5
vitess-jdbc
diff --git a/java/pom.xml b/java/pom.xml
index dfeb0625d43..9f1d57cfede 100644
--- a/java/pom.xml
+++ b/java/pom.xml
@@ -11,7 +11,7 @@
io.vitess
vitess-parent
- 19.0.4
+ 19.0.5
pom
Vitess Java Client libraries [Parent]
diff --git a/test/ci_workflow_gen.go b/test/ci_workflow_gen.go
index 4c93d249703..bc36ad3753e 100644
--- a/test/ci_workflow_gen.go
+++ b/test/ci_workflow_gen.go
@@ -60,6 +60,7 @@ const (
dockerFileTemplate = "templates/dockerfile.tpl"
clusterTestSelfHostedTemplate = "templates/cluster_endtoend_test_self_hosted.tpl"
clusterTestDockerTemplate = "templates/cluster_endtoend_test_docker.tpl"
+ clusterVitessTesterTemplate = "templates/cluster_vitess_tester.tpl"
)
var (
@@ -124,6 +125,10 @@ var (
"vttablet_prscomplex",
}
+ vitessTesterMap = map[string]string{
+ "vtgate": "./go/test/endtoend/vtgate/vitess_tester",
+ }
+
clusterSelfHostedList = []string{}
clusterDockerList = []string{}
clustersRequiringXtraBackup = []string{
@@ -168,6 +173,12 @@ type selfHostedTest struct {
MakeTools, InstallXtraBackup, Docker bool
}
+type vitessTesterTest struct {
+ FileName string
+ Name string
+ Path string
+}
+
// clusterMySQLVersions return list of mysql versions (one or more) that this cluster needs to test against
func clusterMySQLVersions(clusterName string) mysqlVersions {
switch {
@@ -213,6 +224,7 @@ func mergeBlankLines(buf *bytes.Buffer) string {
func main() {
generateUnitTestWorkflows()
+ generateVitessTesterWorkflows(vitessTesterMap, clusterVitessTesterTemplate)
generateClusterWorkflows(clusterList, clusterTestTemplate)
generateClusterWorkflows(clusterDockerList, clusterTestDockerTemplate)
@@ -329,6 +341,23 @@ func generateSelfHostedClusterWorkflows() error {
return nil
}
+func generateVitessTesterWorkflows(mp map[string]string, tpl string) {
+ for test, testPath := range mp {
+ tt := &vitessTesterTest{
+ Name: fmt.Sprintf("Vitess Tester (%v)", test),
+ Path: testPath,
+ }
+
+ templateFileName := tpl
+ tt.FileName = fmt.Sprintf("vitess_tester_%s.yml", test)
+ workflowPath := fmt.Sprintf("%s/%s", workflowConfigDir, tt.FileName)
+ err := writeFileFromTemplate(templateFileName, workflowPath, tt)
+ if err != nil {
+ log.Print(err)
+ }
+ }
+}
+
func generateClusterWorkflows(list []string, tpl string) {
clusters := canonnizeList(list)
for _, cluster := range clusters {
diff --git a/test/templates/cluster_endtoend_test_mysql57.tpl b/test/templates/cluster_endtoend_test_mysql57.tpl
index f74bdd425f6..6ac17b2c395 100644
--- a/test/templates/cluster_endtoend_test_mysql57.tpl
+++ b/test/templates/cluster_endtoend_test_mysql57.tpl
@@ -138,7 +138,7 @@ jobs:
wget "https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb"
sudo apt-get install -y gnupg2
sudo dpkg -i "percona-release_latest.$(lsb_release -sc)_all.deb"
- sudo percona-release enable-only pxb-24
+ sudo percona-release enable-only tools
sudo apt-get update
if [[ -n $XTRABACKUP_VERSION ]]; then
debfile="percona-xtrabackup-24_$XTRABACKUP_VERSION.$(lsb_release -sc)_amd64.deb"
diff --git a/test/templates/cluster_vitess_tester.tpl b/test/templates/cluster_vitess_tester.tpl
new file mode 100644
index 00000000000..2b6ecddb730
--- /dev/null
+++ b/test/templates/cluster_vitess_tester.tpl
@@ -0,0 +1,176 @@
+name: {{.Name}}
+on: [push, pull_request]
+concurrency:
+ group: format('{0}-{1}', ${{"{{"}} github.ref {{"}}"}}, '{{.Name}}')
+ cancel-in-progress: true
+
+permissions: read-all
+
+env:
+ LAUNCHABLE_ORGANIZATION: "vitess"
+ LAUNCHABLE_WORKSPACE: "vitess-app"
+ GITHUB_PR_HEAD_SHA: "${{`{{ github.event.pull_request.head.sha }}`}}"
+ GOPRIVATE: github.com/slackhq/vitess-addons
+ GH_ACCESS_TOKEN: "${{ secrets.GH_ACCESS_TOKEN }}"
+
+jobs:
+ build:
+ name: Run endtoend tests on {{.Name}}
+ runs-on:
+ group: vitess-ubuntu20
+
+ steps:
+ - name: Skip CI
+ run: |
+ if [[ "{{"${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}"}}" == "true" ]]; then
+ echo "skipping CI due to the 'Skip CI' label"
+ exit 1
+ fi
+
+ - name: Check if workflow needs to be skipped
+ id: skip-workflow
+ run: |
+ skip='false'
+ if [[ "{{"${{github.event.pull_request}}"}}" == "" ]] && [[ "{{"${{github.ref}}"}}" != "refs/heads/main" ]] && [[ ! "{{"${{github.ref}}"}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "{{"${{github.ref}}"}}" =~ "refs/tags/.*" ]]; then
+ skip='true'
+ fi
+ echo Skip ${skip}
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
+
+ PR_DATA=$(curl -s\
+ -H "{{"Authorization: token ${{ secrets.GITHUB_TOKEN }}"}}" \
+ -H "Accept: application/vnd.github.v3+json" \
+ "{{"https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}"}}")
+ draft=$(echo "$PR_DATA" | jq .draft -r)
+ echo "is_draft=${draft}" >> $GITHUB_OUTPUT
+
+ - name: Check out code
+ if: steps.skip-workflow.outputs.skip-workflow == 'false'
+ uses: actions/checkout@v4
+
+ - name: Check for changes in relevant files
+ if: steps.skip-workflow.outputs.skip-workflow == 'false'
+ uses: dorny/paths-filter@v3.0.1
+ id: changes
+ with:
+ token: ''
+ filters: |
+ end_to_end:
+ - 'go/**/*.go'
+ - 'go/vt/sidecardb/**/*.sql'
+ - 'go/test/endtoend/onlineddl/vrepl_suite/**'
+ - 'test.go'
+ - 'Makefile'
+ - 'build.env'
+ - 'go.sum'
+ - 'go.mod'
+ - 'proto/*.proto'
+ - 'tools/**'
+ - 'config/**'
+ - 'bootstrap.sh'
+ - '.github/workflows/{{.FileName}}'
+
+ - name: Set up Go
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ uses: actions/setup-go@v5
+ with:
+ go-version: 1.22.5
+
+ - name: Setup github.com/slackhq/vitess-addons access token
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ run: git config --global url.https://$GH_ACCESS_TOKEN@github.com/.insteadOf https://github.com/
+
+ - name: Set up python
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ uses: actions/setup-python@v5
+
+ - name: Tune the OS
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ run: |
+ # Limit local port range to not use ports that overlap with server side
+ # ports that we listen on.
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535"
+ # Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio
+ echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf
+ sudo sysctl -p /etc/sysctl.conf
+
+ - name: Get dependencies
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ run: |
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
+ sudo apt-get -qq update
+ # Install everything else we need, and configure
+ sudo apt-get -qq install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5
+
+ sudo service mysql stop
+ sudo service etcd stop
+ sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
+ sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld
+ go mod download
+
+ # install JUnit report formatter
+ go install github.com/vitessio/go-junit-report@HEAD
+
+ # install vitess tester
+ go install github.com/vitessio/vitess-tester@eb953122baba163ed8ccaa6642458ee984f5d7e4
+
+ - name: Setup launchable dependencies
+ if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
+ run: |
+ # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
+ pip3 install --user launchable~=1.0 > /dev/null
+
+ # verify that launchable setup is all correct.
+ launchable verify || true
+
+ # Tell Launchable about the build you are producing and testing
+ launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source .
+
+ - name: Run cluster endtoend test
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ timeout-minutes: 45
+ run: |
+ # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
+ # which musn't be more than 107 characters long.
+ export VTDATAROOT="/tmp/"
+ source build.env
+ make build
+
+ set -exo pipefail
+
+ i=1
+ for dir in {{.Path}}/*/; do
+ # We go over all the directories in the given path.
+ # If there is a vschema file there, we use it, otherwise we let vitess-tester autogenerate it.
+ if [ -f $dir/vschema.json ]; then
+ vitess-tester --sharded --xunit --test-dir $dir --vschema "$dir"vschema.json
+ else
+ vitess-tester --sharded --xunit --test-dir $dir
+ fi
+ # Number the reports by changing their file names.
+ mv report.xml report"$i".xml
+ i=$((i+1))
+ done
+
+ - name: Print test output and Record test result in launchable if PR is not a draft
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
+ run: |
+ if [[ "{{"${{steps.skip-workflow.outputs.is_draft}}"}}" == "false" ]]; then
+ # send recorded tests to launchable
+ launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ fi
+
+ # print test output
+ cat report*.xml
+
+ - name: Test Summary
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
+ uses: test-summary/action@v2
+ with:
+ paths: "report*.xml"
+ show: "fail, skip"
diff --git a/web/vtadmin/build.sh b/web/vtadmin/build.sh
index a85e6a6fa44..8025380ea68 100755
--- a/web/vtadmin/build.sh
+++ b/web/vtadmin/build.sh
@@ -19,7 +19,9 @@ function output() {
}
script_dir="$(dirname "${BASH_SOURCE[0]:-$0}")"
-source "${script_dir}/../../build.env"
+pushd ${VTROOT}
+source "./build.env"
+popd
web_dir="${script_dir}"
vtadmin_api_port=14200
@@ -30,6 +32,8 @@ then
output "\n\033[1;32mhostname was empty, set it to \"${hostname}\"\033[0m"
fi
+case_insensitive_hostname=$(echo "$hostname" | tr '[:upper:]' '[:lower:]')
+
# Download nvm and node
if [[ -z ${NVM_DIR} ]]; then
export NVM_DIR="$HOME/.nvm"
@@ -56,9 +60,9 @@ npm --prefix "$web_dir" --silent install
export PATH=$PATH:$web_dir/node_modules/.bin/
-vite_vtadmin_api_address="http://${hostname}:${vtadmin_api_port}"
+vite_vtadmin_api_address="http://${case_insensitive_hostname}:${vtadmin_api_port}"
output "\n\033[1;32mSetting VITE_VTADMIN_API_ADDRESS to \"${vite_vtadmin_api_address}\"\033[0m"
-VITE_VTADMIN_API_ADDRESS="http://${hostname}:${vtadmin_api_port}" \
+VITE_VTADMIN_API_ADDRESS="http://${case_insensitive_hostname}:${vtadmin_api_port}" \
VITE_ENABLE_EXPERIMENTAL_TABLET_DEBUG_VARS="true" \
npm run --prefix "$web_dir" build
diff --git a/web/vtadmin/package-lock.json b/web/vtadmin/package-lock.json
index 6caa41e6011..7f5755e5cc4 100644
--- a/web/vtadmin/package-lock.json
+++ b/web/vtadmin/package-lock.json
@@ -5655,11 +5655,12 @@
}
},
"node_modules/braces": {
- "version": "3.0.2",
- "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz",
- "integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==",
+ "version": "3.0.3",
+ "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz",
+ "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==",
+ "license": "MIT",
"dependencies": {
- "fill-range": "^7.0.1"
+ "fill-range": "^7.1.1"
},
"engines": {
"node": ">=8"
@@ -8094,9 +8095,10 @@
}
},
"node_modules/fill-range": {
- "version": "7.0.1",
- "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz",
- "integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==",
+ "version": "7.1.1",
+ "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz",
+ "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==",
+ "license": "MIT",
"dependencies": {
"to-regex-range": "^5.0.1"
},
@@ -9042,6 +9044,7 @@
"version": "7.0.0",
"resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz",
"integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==",
+ "license": "MIT",
"engines": {
"node": ">=0.12.0"
}
@@ -16612,6 +16615,7 @@
"version": "5.0.1",
"resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz",
"integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==",
+ "license": "MIT",
"dependencies": {
"is-number": "^7.0.0"
},
@@ -21461,11 +21465,11 @@
}
},
"braces": {
- "version": "3.0.2",
- "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz",
- "integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==",
+ "version": "3.0.3",
+ "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz",
+ "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==",
"requires": {
- "fill-range": "^7.0.1"
+ "fill-range": "^7.1.1"
}
},
"broadcast-channel": {
@@ -23229,9 +23233,9 @@
}
},
"fill-range": {
- "version": "7.0.1",
- "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz",
- "integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==",
+ "version": "7.1.1",
+ "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz",
+ "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==",
"requires": {
"to-regex-range": "^5.0.1"
}