diff --git a/.bazelversion b/.bazelversion
index 815da58b7a9ed..bfe365e7779da 100644
--- a/.bazelversion
+++ b/.bazelversion
@@ -1 +1 @@
-7.4.1
+7.7.1
\ No newline at end of file
diff --git a/DEPS.bzl b/DEPS.bzl
index 7b3a30bcb0507..664e35f3b2d36 100644
--- a/DEPS.bzl
+++ b/DEPS.bzl
@@ -5542,13 +5542,13 @@ def go_deps():
name = "com_github_petermattis_goid",
build_file_proto_mode = "disable_global",
importpath = "github.com/petermattis/goid",
- sha256 = "3f47ab8e5713c36ec5b4295956a5ef012a192bc19198ae1b6591408c061e97ab",
- strip_prefix = "github.com/petermattis/goid@v0.0.0-20240813172612-4fcff4a6cae7",
+ sha256 = "9bf0da79dd558ac6695d99ae547993fb34502eb255ce7cc6494fb03131b959c2",
+ strip_prefix = "github.com/petermattis/goid@v0.0.0-20250813065127-a731cc31b4fe",
urls = [
- "http://bazel-cache.pingcap.net:8080/gomod/github.com/petermattis/goid/com_github_petermattis_goid-v0.0.0-20240813172612-4fcff4a6cae7.zip",
- "http://ats.apps.svc/gomod/github.com/petermattis/goid/com_github_petermattis_goid-v0.0.0-20240813172612-4fcff4a6cae7.zip",
- "https://cache.hawkingrei.com/gomod/github.com/petermattis/goid/com_github_petermattis_goid-v0.0.0-20240813172612-4fcff4a6cae7.zip",
- "https://storage.googleapis.com/pingcapmirror/gomod/github.com/petermattis/goid/com_github_petermattis_goid-v0.0.0-20240813172612-4fcff4a6cae7.zip",
+ "http://bazel-cache.pingcap.net:8080/gomod/github.com/petermattis/goid/com_github_petermattis_goid-v0.0.0-20250813065127-a731cc31b4fe.zip",
+ "http://ats.apps.svc/gomod/github.com/petermattis/goid/com_github_petermattis_goid-v0.0.0-20250813065127-a731cc31b4fe.zip",
+ "https://cache.hawkingrei.com/gomod/github.com/petermattis/goid/com_github_petermattis_goid-v0.0.0-20250813065127-a731cc31b4fe.zip",
+ "https://storage.googleapis.com/pingcapmirror/gomod/github.com/petermattis/goid/com_github_petermattis_goid-v0.0.0-20250813065127-a731cc31b4fe.zip",
],
)
go_repository(
@@ -6218,13 +6218,13 @@ def go_deps():
name = "com_github_sasha_s_go_deadlock",
build_file_proto_mode = "disable_global",
importpath = "github.com/sasha-s/go-deadlock",
- sha256 = "b927f67dd9a6dc183bac7249c019775e689aee67dc52bfa53354137139d722a1",
- strip_prefix = "github.com/sasha-s/go-deadlock@v0.3.5",
+ sha256 = "3fc61a5bf78d5d069069dc3d531dfff4ac90406bdf28943d11cb5a3faaf77fc8",
+ strip_prefix = "github.com/sasha-s/go-deadlock@v0.3.6",
urls = [
- "http://bazel-cache.pingcap.net:8080/gomod/github.com/sasha-s/go-deadlock/com_github_sasha_s_go_deadlock-v0.3.5.zip",
- "http://ats.apps.svc/gomod/github.com/sasha-s/go-deadlock/com_github_sasha_s_go_deadlock-v0.3.5.zip",
- "https://cache.hawkingrei.com/gomod/github.com/sasha-s/go-deadlock/com_github_sasha_s_go_deadlock-v0.3.5.zip",
- "https://storage.googleapis.com/pingcapmirror/gomod/github.com/sasha-s/go-deadlock/com_github_sasha_s_go_deadlock-v0.3.5.zip",
+ "http://bazel-cache.pingcap.net:8080/gomod/github.com/sasha-s/go-deadlock/com_github_sasha_s_go_deadlock-v0.3.6.zip",
+ "http://ats.apps.svc/gomod/github.com/sasha-s/go-deadlock/com_github_sasha_s_go_deadlock-v0.3.6.zip",
+ "https://cache.hawkingrei.com/gomod/github.com/sasha-s/go-deadlock/com_github_sasha_s_go_deadlock-v0.3.6.zip",
+ "https://storage.googleapis.com/pingcapmirror/gomod/github.com/sasha-s/go-deadlock/com_github_sasha_s_go_deadlock-v0.3.6.zip",
],
)
go_repository(
diff --git a/Makefile b/Makefile
index e904eec07efb3..d082b0770dd0b 100644
--- a/Makefile
+++ b/Makefile
@@ -47,7 +47,7 @@ check-setup:tools/bin/revive
precheck: fmt bazel_prepare
.PHONY: check
-check: check-bazel-prepare parser_yacc check-parallel lint tidy testSuite errdoc license
+check: check-bazel-prepare parser_yacc check-parallel lint tidy testSuite errdoc license bazel_check_abi
.PHONY: fmt
fmt:
@@ -831,3 +831,8 @@ bazel_sync:
.PHONY: bazel_mirror_upload
bazel_mirror_upload:
bazel $(BAZEL_GLOBAL_CONFIG) run $(BAZEL_CMD_CONFIG) //cmd/mirror -- --mirror --upload
+
+.PHONY: bazel_check_abi
+bazel_check_abi:
+ @echo "check ABI compatibility"
+ ./tools/check/bazel-check-abi.sh
diff --git a/Makefile.common b/Makefile.common
index 8a59f8a9fe96f..a5041b692e793 100644
--- a/Makefile.common
+++ b/Makefile.common
@@ -26,6 +26,7 @@ FAIL_ON_STDOUT := awk '{ print } END { if (NR > 0) { exit 1 } }'
CURDIR := $(shell pwd)
path_to_add := $(addsuffix /bin,$(subst :,/bin:,$(GOPATH))):$(PWD)/tools/bin
export PATH := $(path_to_add):$(PATH)
+export GOTOOLCHAIN := go1.25.5
GO := GO111MODULE=on go
BUILD_FLAG := -tags codes
diff --git a/WORKSPACE b/WORKSPACE
index b2e63a9aa51ff..64e4b9d8e4642 100644
--- a/WORKSPACE
+++ b/WORKSPACE
@@ -1,15 +1,17 @@
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
-
-# Required by toolchains_protoc.
http_archive(
name = "platforms",
- sha256 = "218efe8ee736d26a3572663b374a253c012b716d8af0c07e842e82f238a0a7ee",
urls = [
- "https://mirror.bazel.build/github.com/bazelbuild/platforms/releases/download/0.0.10/platforms-0.0.10.tar.gz",
- "https://github.com/bazelbuild/platforms/releases/download/0.0.10/platforms-0.0.10.tar.gz",
+ "https://mirror.bazel.build/github.com/bazelbuild/platforms/releases/download/1.0.0/platforms-1.0.0.tar.gz",
+ "https://github.com/bazelbuild/platforms/releases/download/1.0.0/platforms-1.0.0.tar.gz",
],
+ sha256 = "3384eb1c30762704fbe38e440204e114154086c8fc8a8c2e3e28441028c019a8",
)
+# To use the new Starlark host platform in @platforms, also include the following snippet:
+load("@platforms//host:extension.bzl", "host_platform_repo")
+host_platform_repo(name = "host_platform")
+
http_archive(
name = "bazel_features",
sha256 = "ba1282c1aa1d1fffdcf994ab32131d7c7551a9bc960fbf05f42d55a1b930cbfb",
@@ -33,42 +35,61 @@ http_archive(
)
load("@bazel_skylib//lib:versions.bzl", "versions")
+
versions.check(minimum_bazel_version = "6.0.0")
http_archive(
name = "io_bazel_rules_go",
- sha256 = "f4a9314518ca6acfa16cc4ab43b0b8ce1e4ea64b81c38d8a3772883f153346b8",
+ sha256 = "68af54cb97fbdee5e5e8fe8d210d15a518f9d62abfd71620c3eaff3b26a5ff86",
urls = [
- "http://bazel-cache.pingcap.net:8080/bazelbuild/rules_go/releases/download/v0.50.1/rules_go-v0.50.1.zip",
- "http://ats.apps.svc/bazelbuild/rules_go/releases/download/v0.50.1/rules_go-v0.50.1.zip",
- "https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.50.1/rules_go-v0.50.1.zip",
- "https://github.com/bazelbuild/rules_go/releases/download/v0.50.1/rules_go-v0.50.1.zip",
+ "http://bazel-cache.pingcap.net:8080/bazel-contrib/rules_go/releases/download/v0.59.0/rules_go-v0.59.0.zip",
+ "http://ats.apps.svc/bazel-contrib/rules_go/releases/download/v0.59.0/rules_go-v0.59.0.zip",
+ "https://cache.hawkingrei.com/bazel-contrib/rules_go/releases/download/v0.59.0/rules_go-v0.59.0.zip",
+ "https://mirror.bazel.build/github.com/bazel-contrib/rules_go/releases/download/v0.59.0/rules_go-v0.59.0.zip",
+ "https://github.com/bazel-contrib/rules_go/releases/download/v0.59.0/rules_go-v0.59.0.zip",
],
)
http_archive(
name = "bazel_gazelle",
- sha256 = "8ad77552825b078a10ad960bec6ef77d2ff8ec70faef2fd038db713f410f5d87",
+ sha256 = "675114d8b433d0a9f54d81171833be96ebc4113115664b791e6f204d58e93446",
urls = [
- "http://bazel-cache.pingcap.net:8080/bazelbuild/bazel-gazelle/releases/download/v0.38.0/bazel-gazelle-v0.38.0.tar.gz",
- "https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.38.0/bazel-gazelle-v0.38.0.tar.gz",
- "http://ats.apps.svc/bazelbuild/bazel-gazelle/releases/download/v0.38.0/bazel-gazelle-v0.38.0.tar.gz",
+ "http://bazel-cache.pingcap.net:8080/bazel-contrib/bazel-gazelle/releases/download/v0.47.0/bazel-gazelle-v0.47.0.tar.gz",
+ "https://github.com/bazel-contrib/bazel-gazelle/releases/download/v0.47.0/bazel-gazelle-v0.47.0.tar.gz",
+ "http://ats.apps.svc/bazel-contrib/bazel-gazelle/releases/download/v0.47.0/bazel-gazelle-v0.47.0.tar.gz",
+ "https://cache.hawkingrei.com/bazel-contrib/bazel-gazelle/releases/download/v0.47.0/bazel-gazelle-v0.47.0.tar.gz",
],
)
http_archive(
name = "rules_cc",
+ sha256 = "d62624b45e0912713dcd3b8e30ba6ae55418ed6bf99e6d135cd61b8addae312b",
+ strip_prefix = "rules_cc-0.1.2",
urls = [
- "http://bazel-cache.pingcap.net:8080/bazelbuild/rules_cc/releases/download/0.0.6/rules_cc-0.0.6.tar.gz",
- "https://github.com/bazelbuild/rules_cc/releases/download/0.0.6/rules_cc-0.0.6.tar.gz",
- "http://ats.apps.svc/bazelbuild/rules_cc/releases/download/0.0.6/rules_cc-0.0.6.tar.gz",
+ "http://bazel-cache.pingcap.net:8080/bazelbuild/rules_cc/releases/download/0.1.2/rules_cc-0.1.2.tar.gz",
+ "https://github.com/bazelbuild/rules_cc/releases/download/0.1.2/rules_cc-0.1.2.tar.gz",
+ "http://ats.apps.svc/bazelbuild/rules_cc/releases/download/0.1.2/rules_cc-0.1.2.tar.gz",
],
- sha256 = "3d9e271e2876ba42e114c9b9bc51454e379cbf0ec9ef9d40e2ae4cec61a31b40",
- strip_prefix = "rules_cc-0.0.6",
)
-load("@io_bazel_rules_go//go:deps.bzl", "go_download_sdk", "go_register_toolchains", "go_rules_dependencies")
+http_archive(
+ name = "rules_python",
+ sha256 = "9f9f3b300a9264e4c77999312ce663be5dee9a56e361a1f6fe7ec60e1beef9a3",
+ strip_prefix = "rules_python-1.4.1",
+ urls = [
+ "http://bazel-cache.pingcap.net:8080/bazel-contrib/rules_python/releases/download/1.4.1/rules_python-1.4.1.tar.gz",
+ "https://github.com/bazel-contrib/rules_python/releases/download/1.4.1/rules_python-1.4.1.tar.gz",
+ "http://ats.apps.svc/bazel-contrib/rules_python/releases/download/1.4.1/rules_python-1.4.1.tar.gz",
+ "https://cache.hawkingrei.com/bazel-contrib/rules_python/releases/download/1.4.1/rules_python-1.4.1.tar.gz",
+ ],
+)
+
+load("@rules_python//python:repositories.bzl", "py_repositories")
+
+py_repositories()
+
load("@bazel_gazelle//:deps.bzl", "gazelle_dependencies")
+load("@io_bazel_rules_go//go:deps.bzl", "go_download_sdk", "go_register_toolchains", "go_rules_dependencies")
load("//:DEPS.bzl", "go_deps")
# gazelle:repository_macro DEPS.bzl%go_deps
@@ -85,24 +106,23 @@ go_download_sdk(
"https://mirrors.aliyun.com/golang/{}",
"https://dl.google.com/go/{}",
],
- version = "1.23.6",
+ version = "1.25.5",
)
+gazelle_dependencies(go_sdk = "go_sdk")
+
go_register_toolchains(
nogo = "@//build:tidb_nogo",
)
-gazelle_dependencies()
-
http_archive(
name = "com_google_protobuf",
- sha256 = "bc3dbf1f09dba1b2eb3f2f70352ee97b9049066c9040ce0c9b67fb3294e91e4b",
- strip_prefix = "protobuf-3.15.5",
+ integrity = "sha256-zl0At4RQoMpAC/NgrADA1ZnMIl8EnZhqJ+mk45bFqEo=",
+ strip_prefix = "protobuf-29.0-rc2",
# latest, as of 2021-03-08
urls = [
- "http://bazel-cache.pingcap.net:8080/gomod/rules/protobuf-3.15.5.tar.gz ",
- "https://mirror.bazel.build/github.com/protocolbuffers/protobuf/archive/v3.15.5.tar.gz",
- "https://github.com/protocolbuffers/protobuf/archive/v3.15.5.tar.gz",
+ "https://github.com/protocolbuffers/protobuf/archive/v29.0-rc2.tar.gz",
+ "https://mirror.bazel.build/github.com/protocolbuffers/protobuf/archive/v29.0-rc2.tar.gz",
],
)
@@ -114,10 +134,10 @@ http_archive(
name = "remote_java_tools",
sha256 = "f58a358ca694a41416a9b6a92b852935ad301d8882e5d22f4f11134f035317d5",
urls = [
- "http://bazel-cache.pingcap.net:8080/gomod/rules/java_tools-v12.6.zip",
- "http://ats.apps.svc/gomod/rules/java_tools-v12.6.zip",
- "https://mirror.bazel.build/bazel_java_tools/releases/java/v12.6/java_tools-v12.6.zip",
- "https://github.com/bazelbuild/java_tools/releases/download/java_v12.6/java_tools-v12.6.zip",
+ "http://bazel-cache.pingcap.net:8080/gomod/rules/java_tools-v12.6.zip",
+ "http://ats.apps.svc/gomod/rules/java_tools-v12.6.zip",
+ "https://mirror.bazel.build/bazel_java_tools/releases/java/v12.6/java_tools-v12.6.zip",
+ "https://github.com/bazelbuild/java_tools/releases/download/java_v12.6/java_tools-v12.6.zip",
],
)
@@ -125,10 +145,10 @@ http_archive(
name = "remote_java_tools_linux",
sha256 = "64294e91fe940c77e6d35818b4c3a1f07d78e33add01e330188d907032687066",
urls = [
- "http://bazel-cache.pingcap.net:8080/gomod/rules/java_tools_linux-v12.6.zip",
- "http://ats.apps.svc/gomod/rules/java_tools_linux-v12.6.zip",
- "https://mirror.bazel.build/bazel_java_tools/releases/java/v12.6/java_tools_linux-v12.6.zip",
- "https://github.com/bazelbuild/java_tools/releases/download/java_v12.6/java_tools_linux-v12.6.zip",
+ "http://bazel-cache.pingcap.net:8080/gomod/rules/java_tools_linux-v12.6.zip",
+ "http://ats.apps.svc/gomod/rules/java_tools_linux-v12.6.zip",
+ "https://mirror.bazel.build/bazel_java_tools/releases/java/v12.6/java_tools_linux-v12.6.zip",
+ "https://github.com/bazelbuild/java_tools/releases/download/java_v12.6/java_tools_linux-v12.6.zip",
],
)
diff --git a/build/image/base b/build/image/base
index 630e90088647c..c3126a48323bc 100644
--- a/build/image/base
+++ b/build/image/base
@@ -18,7 +18,7 @@ FROM quay.io/rockylinux/rockylinux:8.10.20240528
# setup mariadb repo
# ref: https://mariadb.com/docs/server/connect/clients/mariadb-client/#Linux_(Repository)
RUN curl -LsSO https://r.mariadb.com/downloads/mariadb_repo_setup \
- && echo "6083ef1974d11f49d42ae668fb9d513f7dc2c6276ffa47caed488c4b47268593 mariadb_repo_setup" | sha256sum -c - \
+ && echo "7a3e1610fee91347e198214e3672a6d3932ccbbf67905d9e892e9255baaec292 mariadb_repo_setup" | sha256sum -c - \
&& chmod +x mariadb_repo_setup \
&& ./mariadb_repo_setup \
&& rm mariadb_repo_setup
@@ -30,7 +30,7 @@ RUN --mount=type=cache,target=/var/cache/dnf \
# install golang toolchain
# renovate: datasource=docker depName=golang
-ARG GOLANG_VERSION=1.23.6
+ARG GOLANG_VERSION=1.25.5
RUN OS=linux; ARCH=$([ "$(arch)" = "x86_64" ] && echo amd64 || echo arm64); \
curl -fsSL https://dl.google.com/go/go${GOLANG_VERSION}.linux-${ARCH}.tar.gz | tar -C /usr/local -xz
ENV PATH /usr/local/go/bin/:$PATH
@@ -62,7 +62,7 @@ ENV PATH=$PATH:/opt/gradle-${GRADLE_VER}/bin
#### install tools: bazelisk, codecov, oras
# renovate: datasource=github-tags depName=bazelbuild/bazelisk
-ADD https://github.com/bazelbuild/bazel/releases/download/6.5.0/bazel-6.5.0-linux-x86_64 /usr/bin/bazel
+ADD https://github.com/bazelbuild/bazel/releases/download/7.7.1/bazel-7.7.1-linux-x86_64 /usr/bin/bazel
RUN chmod +x /usr/bin/bazel
# codecov tool
@@ -74,4 +74,4 @@ RUN folder=$([ "$(arch)" = "x86_64" ] && echo linux || echo aarch64); \
# oras tool
# renovate: datasource=github-tags depName=oras-project/oras
-COPY --from=bitnami/oras:1.2.0 /oras /usr/local/bin/oras
+COPY --from=public.ecr.aws/bitnami/oras:1.2.0 /oras /usr/local/bin/oras
diff --git a/build/image/centos7_jenkins b/build/image/centos7_jenkins
index a1e980f086b4c..38c1c5d9ec6f1 100644
--- a/build/image/centos7_jenkins
+++ b/build/image/centos7_jenkins
@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-FROM hub.pingcap.net/wangweizhen/base_image:go12320241009
+FROM hub.pingcap.net/wangweizhen/base_image:go12520251210
ENV GOPATH /go
ENV GOROOT /usr/local/go
ENV PATH $GOPATH/bin:$GOROOT/bin:$PATH
diff --git a/build/image/parser_test b/build/image/parser_test
index dd4218a4a8f59..42e4f9d3d2967 100644
--- a/build/image/parser_test
+++ b/build/image/parser_test
@@ -14,7 +14,7 @@
FROM rockylinux:9
-ENV GOLANG_VERSION 1.23.6
+ENV GOLANG_VERSION 1.25.5
ENV ARCH amd64
ENV GOLANG_DOWNLOAD_URL https://dl.google.com/go/go$GOLANG_VERSION.linux-$ARCH.tar.gz
ENV GOPATH /home/prow/go
diff --git a/dumpling/README.md b/dumpling/README.md
index 389259ab9b733..9ad75688e79a0 100644
--- a/dumpling/README.md
+++ b/dumpling/README.md
@@ -28,8 +28,7 @@ Any questions? Let's discuss on [TiDB Internals forum](https://internals.tidb.io
Building
--------
-0. Under directory `tidb`
-1. Install Go 1.23.6 or above
+1. Under directory `tidb`
2. Run `make build_dumpling` to compile. The output is in `bin/dumpling`.
3. Run `make dumpling_unit_test` to run the unit tests.
4. Run `make dumpling_integration_test` to run integration tests. For integration test:
diff --git a/go.mod b/go.mod
index 32a2d47490f04..d7b59f562ecbc 100644
--- a/go.mod
+++ b/go.mod
@@ -1,6 +1,6 @@
module github.com/pingcap/tidb
-go 1.23.6
+go 1.25.5
require (
cloud.google.com/go/kms v1.15.8
@@ -98,7 +98,7 @@ require (
github.com/prometheus/prometheus v0.50.1
github.com/qri-io/jsonschema v0.2.1
github.com/robfig/cron/v3 v3.0.1
- github.com/sasha-s/go-deadlock v0.3.5
+ github.com/sasha-s/go-deadlock v0.3.6
github.com/shirou/gopsutil/v3 v3.24.5
github.com/shurcooL/httpgzip v0.0.0-20190720172056-320755c1c1b0
github.com/soheilhy/cmux v0.1.5
@@ -258,7 +258,7 @@ require (
github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354 // indirect
github.com/ncw/directio v1.0.5 // indirect
github.com/olekukonko/tablewriter v0.0.5 // indirect
- github.com/petermattis/goid v0.0.0-20240813172612-4fcff4a6cae7 // indirect
+ github.com/petermattis/goid v0.0.0-20250813065127-a731cc31b4fe // indirect
github.com/pingcap/goleveldb v0.0.0-20191226122134-f82aafb29989 // indirect
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
github.com/pkg/errors v0.9.1
diff --git a/go.sum b/go.sum
index b3e9b801ef12f..ecf3b8f43f094 100644
--- a/go.sum
+++ b/go.sum
@@ -591,8 +591,8 @@ github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT9
github.com/otiai10/mint v1.3.1 h1:BCmzIS3n71sGfHB5NMNDB3lHYPz8fWSkCAErHed//qc=
github.com/otiai10/mint v1.3.1/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
-github.com/petermattis/goid v0.0.0-20240813172612-4fcff4a6cae7 h1:Dx7Ovyv/SFnMFw3fD4oEoeorXc6saIiQ23LrGLth0Gw=
-github.com/petermattis/goid v0.0.0-20240813172612-4fcff4a6cae7/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4=
+github.com/petermattis/goid v0.0.0-20250813065127-a731cc31b4fe h1:vHpqOnPlnkba8iSxU4j/CvDSS9J4+F4473esQsYLGoE=
+github.com/petermattis/goid v0.0.0-20250813065127-a731cc31b4fe/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4=
github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2 h1:JhzVVoYvbOACxoUmOs6V/G4D5nPVUW73rKvXxP4XUJc=
github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE=
github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ=
@@ -676,8 +676,8 @@ github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46 h1:GHRpF1pTW19a8tTFrMLUcfWwyC0pnifVo2ClaLq+hP8=
github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46/go.mod h1:uAQ5PCi+MFsC7HjREoAz1BU+Mq60+05gifQSsHSDG/8=
-github.com/sasha-s/go-deadlock v0.3.5 h1:tNCOEEDG6tBqrNDOX35j/7hL5FcFViG6awUGROb2NsU=
-github.com/sasha-s/go-deadlock v0.3.5/go.mod h1:bugP6EGbdGYObIlx7pUZtWqlvo8k9H6vCBBsiChJQ5U=
+github.com/sasha-s/go-deadlock v0.3.6 h1:TR7sfOnZ7x00tWPfD397Peodt57KzMDo+9Ae9rMiUmw=
+github.com/sasha-s/go-deadlock v0.3.6/go.mod h1:CUqNyyvMxTyjFqDT7MRg9mb4Dv/btmGTqSR+rky/UXo=
github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys=
github.com/segmentio/asm v1.2.0/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs=
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
diff --git a/pkg/executor/BUILD.bazel b/pkg/executor/BUILD.bazel
index 746d27d3cee3d..2109f4faac2d2 100644
--- a/pkg/executor/BUILD.bazel
+++ b/pkg/executor/BUILD.bazel
@@ -485,7 +485,6 @@ go_test(
"//pkg/util/topsql/state",
"@com_github_docker_go_units//:go-units",
"@com_github_gorilla_mux//:mux",
- "@com_github_hashicorp_go_version//:go-version",
"@com_github_pingcap_errors//:errors",
"@com_github_pingcap_failpoint//:failpoint",
"@com_github_pingcap_fn//:fn",
diff --git a/pkg/executor/aggfuncs/aggfuncs.go b/pkg/executor/aggfuncs/aggfuncs.go
index 0f46bcf1dbf5e..07d36a1b9732d 100644
--- a/pkg/executor/aggfuncs/aggfuncs.go
+++ b/pkg/executor/aggfuncs/aggfuncs.go
@@ -21,6 +21,7 @@ import (
"github.com/pingcap/tidb/pkg/expression/exprctx"
"github.com/pingcap/tidb/pkg/types"
"github.com/pingcap/tidb/pkg/util/chunk"
+ "github.com/pingcap/tidb/pkg/util/hack"
)
// All the AggFunc implementations are listed here for navigation.
@@ -144,7 +145,17 @@ const (
type PartialResult unsafe.Pointer
// AggPartialResultMapper contains aggregate function results
-type AggPartialResultMapper map[string][]PartialResult
+type AggPartialResultMapper = *hack.MemAwareMap[string, []PartialResult]
+
+// NewAggPartialResultMapper creates a new AggPartialResultMapper
+func NewAggPartialResultMapper() AggPartialResultMapper {
+ return NewAggPartialResultMapperWithCap(0)
+}
+
+// NewAggPartialResultMapperWithCap creates a new AggPartialResultMapper with specified capacity
+func NewAggPartialResultMapperWithCap(capacity int) AggPartialResultMapper {
+ return hack.NewMemAwareMap[string, []PartialResult](capacity)
+}
type serializer interface {
// SerializePartialResult will serialize meta data of aggregate function into bytes and put them into chunk.
diff --git a/pkg/executor/aggfuncs/func_json_objectagg.go b/pkg/executor/aggfuncs/func_json_objectagg.go
index 9e2f23a0c7f45..8bf80f3dc4d90 100644
--- a/pkg/executor/aggfuncs/func_json_objectagg.go
+++ b/pkg/executor/aggfuncs/func_json_objectagg.go
@@ -36,31 +36,28 @@ type jsonObjectAgg struct {
}
type partialResult4JsonObjectAgg struct {
- entries map[string]any
- bInMap int // indicate there are 2^bInMap buckets in entries.
+ entries hack.MemAwareMap[string, any]
}
func (*jsonObjectAgg) AllocPartialResult() (pr PartialResult, memDelta int64) {
p := partialResult4JsonObjectAgg{}
- p.entries = make(map[string]any)
- p.bInMap = 0
- return PartialResult(&p), DefPartialResult4JsonObjectAgg + (1<
(1< (1< (1< 0 {
+ if _, ok := e.groupSet.M[groupKey]; !ok {
+ if atomic.LoadUint32(&e.inSpillMode) == 1 && len(e.groupSet.M) > 0 {
sel = append(sel, j)
continue
}
@@ -846,7 +838,7 @@ func (e *HashAggExec) getNextChunk(ctx context.Context) (err error) {
}
func (e *HashAggExec) getPartialResults(groupKey string) []aggfuncs.PartialResult {
- partialResults, ok := e.partialResultMap[groupKey]
+ partialResults, ok := e.partialResultMap.M[groupKey]
allMemDelta := int64(0)
if !ok {
partialResults = make([]aggfuncs.PartialResult, 0, len(e.PartialAggFuncs))
@@ -855,13 +847,11 @@ func (e *HashAggExec) getPartialResults(groupKey string) []aggfuncs.PartialResul
partialResults = append(partialResults, partialResult)
allMemDelta += memDelta
}
- // Map will expand when count > bucketNum * loadFactor. The memory usage will doubled.
- if len(e.partialResultMap)+1 > (1< 0 {
+ e.memTracker.Consume(deltaBytes)
+ }
}
failpoint.Inject("ConsumeRandomPanic", nil)
e.memTracker.Consume(allMemDelta)
diff --git a/pkg/executor/aggregate/agg_hash_final_worker.go b/pkg/executor/aggregate/agg_hash_final_worker.go
index 5f30781b66b08..006ceab9ad75e 100644
--- a/pkg/executor/aggregate/agg_hash_final_worker.go
+++ b/pkg/executor/aggregate/agg_hash_final_worker.go
@@ -24,7 +24,6 @@ import (
"github.com/pingcap/tidb/pkg/executor/aggfuncs"
"github.com/pingcap/tidb/pkg/sessionctx"
"github.com/pingcap/tidb/pkg/util/chunk"
- "github.com/pingcap/tidb/pkg/util/hack"
"github.com/pingcap/tidb/pkg/util/logutil"
"go.uber.org/zap"
)
@@ -42,8 +41,7 @@ type HashAggFinalWorker struct {
baseHashAggWorker
partialResultMap aggfuncs.AggPartialResultMapper
- BInMap int
- inputCh chan *aggfuncs.AggPartialResultMapper
+ inputCh chan aggfuncs.AggPartialResultMapper
outputCh chan *AfFinalResult
finalResultHolderCh chan *chunk.Chunk
@@ -58,7 +56,7 @@ func (w *HashAggFinalWorker) getInputFromDisk(sctx sessionctx.Context) (ret aggf
return ret, restoredMem, err
}
-func (w *HashAggFinalWorker) getPartialInput() (input *aggfuncs.AggPartialResultMapper, ok bool) {
+func (w *HashAggFinalWorker) getPartialInput() (input aggfuncs.AggPartialResultMapper, ok bool) {
waitStart := time.Now()
defer updateWaitTime(w.stats, waitStart)
select {
@@ -72,30 +70,23 @@ func (w *HashAggFinalWorker) getPartialInput() (input *aggfuncs.AggPartialResult
return
}
-func (w *HashAggFinalWorker) initBInMap() {
- w.BInMap = 0
- mapLen := len(w.partialResultMap)
- for mapLen > (1< 0 {
+ w.memTracker.Consume(deltaBytes)
+ }
continue
}
@@ -112,14 +103,6 @@ func (w *HashAggFinalWorker) mergeInputIntoResultMap(sctx sessionctx.Context, in
return nil
}
-func (w *HashAggFinalWorker) handleNewGroupKey(key string, value []aggfuncs.PartialResult) {
- if len(w.partialResultMap)+1 > (1< bucketNum * loadFactor. The memory usage will double.
- if len(mapper[finalWorkerIdx])+1 > (1< 0 {
+ w.partialResultsMapMem.Add(delta)
+ w.memTracker.Consume(delta)
+ }
}
w.partialResultsMapMem.Add(allMemDelta)
w.memTracker.Consume(allMemDelta)
@@ -291,8 +285,8 @@ func (w *HashAggPartialWorker) updatePartialResult(ctx sessionctx.Context, chk *
}
func (w *HashAggPartialWorker) shuffleIntermData(finalConcurrency int) {
- for i := 0; i < finalConcurrency; i++ {
- w.outputChs[i] <- &w.partialResultsMap[i]
+ for i := range finalConcurrency {
+ w.outputChs[i] <- w.partialResultsMap[i]
}
}
@@ -332,19 +326,16 @@ func (w *HashAggPartialWorker) spillDataToDiskImpl() error {
// Clear the partialResultsMap
w.partialResultsMap = make([]aggfuncs.AggPartialResultMapper, len(w.partialResultsMap))
for i := range w.partialResultsMap {
- w.partialResultsMap[i] = make(aggfuncs.AggPartialResultMapper)
+ w.partialResultsMap[i] = aggfuncs.NewAggPartialResultMapper()
}
w.memTracker.Consume(-w.partialResultsMapMem.Load())
w.partialResultsMapMem.Store(0)
- for i := range w.BInMaps {
- w.BInMaps[i] = 0
- }
}()
w.prepareForSpill()
for _, partialResultsMap := range w.partialResultsMap {
- for key, partialResults := range partialResultsMap {
+ for key, partialResults := range partialResultsMap.M {
partitionNum := int(murmur3.Sum32(hack.Slice(key))) % spilledPartitionNum
// Spill data when tmp chunk is full
diff --git a/pkg/executor/aggregate/agg_spill.go b/pkg/executor/aggregate/agg_spill.go
index dc281420d602c..b3a007017c387 100644
--- a/pkg/executor/aggregate/agg_spill.go
+++ b/pkg/executor/aggregate/agg_spill.go
@@ -24,7 +24,6 @@ import (
"github.com/pingcap/tidb/pkg/types"
"github.com/pingcap/tidb/pkg/util/chunk"
"github.com/pingcap/tidb/pkg/util/disk"
- "github.com/pingcap/tidb/pkg/util/hack"
"github.com/pingcap/tidb/pkg/util/logutil"
"github.com/pingcap/tidb/pkg/util/memory"
"go.uber.org/zap"
@@ -221,10 +220,8 @@ func (p *parallelHashAggSpillHelper) setError() {
}
func (p *parallelHashAggSpillHelper) restoreOnePartition(ctx sessionctx.Context) (aggfuncs.AggPartialResultMapper, int64, error) {
- restoredData := make(aggfuncs.AggPartialResultMapper)
- bInMap := 0
+ restoredData := aggfuncs.NewAggPartialResultMapper()
restoredMem := int64(0)
-
restoredPartitionIdx, isSuccess := p.getNextPartition()
if !isSuccess {
return nil, restoredMem, nil
@@ -232,7 +229,7 @@ func (p *parallelHashAggSpillHelper) restoreOnePartition(ctx sessionctx.Context)
spilledFilesIO := p.getListInDisks(restoredPartitionIdx)
for _, spilledFile := range spilledFilesIO {
- memDelta, expandMem, err := p.restoreFromOneSpillFile(ctx, &restoredData, spilledFile, &bInMap)
+ memDelta, expandMem, err := p.restoreFromOneSpillFile(ctx, restoredData, spilledFile)
if err != nil {
return nil, restoredMem, err
}
@@ -249,12 +246,11 @@ type processRowContext struct {
rowPos int
keyColPos int
aggFuncNum int
- restoreadData *aggfuncs.AggPartialResultMapper
+ restoreadData aggfuncs.AggPartialResultMapper
partialResultsRestored [][]aggfuncs.PartialResult
- bInMap *int
}
-func (p *parallelHashAggSpillHelper) restoreFromOneSpillFile(ctx sessionctx.Context, restoreadData *aggfuncs.AggPartialResultMapper, diskIO *chunk.DataInDiskByChunks, bInMap *int) (totalMemDelta int64, totalExpandMem int64, err error) {
+func (p *parallelHashAggSpillHelper) restoreFromOneSpillFile(ctx sessionctx.Context, restoreadData aggfuncs.AggPartialResultMapper, diskIO *chunk.DataInDiskByChunks) (totalMemDelta int64, totalExpandMem int64, err error) {
chunkNum := diskIO.NumChunks()
aggFuncNum := len(p.aggFuncsForRestoring)
processRowContext := &processRowContext{
@@ -265,7 +261,6 @@ func (p *parallelHashAggSpillHelper) restoreFromOneSpillFile(ctx sessionctx.Cont
aggFuncNum: aggFuncNum,
restoreadData: restoreadData,
partialResultsRestored: make([][]aggfuncs.PartialResult, aggFuncNum),
- bInMap: bInMap,
}
for i := 0; i < chunkNum; i++ {
chunk, err := diskIO.GetChunk(i)
@@ -298,7 +293,7 @@ func (p *parallelHashAggSpillHelper) restoreFromOneSpillFile(ctx sessionctx.Cont
func (p *parallelHashAggSpillHelper) processRow(context *processRowContext) (totalMemDelta int64, expandMem int64, err error) {
key := context.chunk.GetRow(context.rowPos).GetString(context.keyColPos)
- prs, ok := (*context.restoreadData)[key]
+ prs, ok := context.restoreadData.M[key]
if ok {
exprCtx := context.ctx.GetExprCtx()
// The key has appeared before, merge results.
@@ -311,17 +306,12 @@ func (p *parallelHashAggSpillHelper) processRow(context *processRowContext) (tot
}
} else {
totalMemDelta += int64(len(key))
-
- if len(*context.restoreadData)+1 > (1<<*context.bInMap)*hack.LoadFactorNum/hack.LoadFactorDen {
- expandMem = hack.DefBucketMemoryUsageForMapStrToSlice * (1 << *context.bInMap)
- p.memTracker.Consume(expandMem)
- (*context.bInMap)++
- }
-
results := make([]aggfuncs.PartialResult, context.aggFuncNum)
- (*context.restoreadData)[key] = results
-
- for aggPos := 0; aggPos < context.aggFuncNum; aggPos++ {
+ delta := context.restoreadData.Set(key, results)
+ if delta > 0 {
+ p.memTracker.Consume(delta)
+ }
+ for aggPos := range context.aggFuncNum {
results[aggPos] = context.partialResultsRestored[aggPos][context.rowPos]
}
}
diff --git a/pkg/executor/benchmark_test.go b/pkg/executor/benchmark_test.go
index 0476d877aad27..c8f3a2b51114c 100644
--- a/pkg/executor/benchmark_test.go
+++ b/pkg/executor/benchmark_test.go
@@ -1922,10 +1922,10 @@ func BenchmarkAggPartialResultMapperMemoryUsage(b *testing.B) {
b.Run(fmt.Sprintf("MapRows %v", c.rowNum), func(b *testing.B) {
b.ReportAllocs()
for i := 0; i < b.N; i++ {
- aggMap := make(aggfuncs.AggPartialResultMapper)
+ aggMap := aggfuncs.NewAggPartialResultMapper()
tempSlice := make([]aggfuncs.PartialResult, 10)
- for num := 0; num < c.rowNum; num++ {
- aggMap[strconv.Itoa(num)] = tempSlice
+ for num := range c.rowNum {
+ aggMap.Set(strconv.Itoa(num), tempSlice)
}
}
})
diff --git a/pkg/executor/executor_pkg_test.go b/pkg/executor/executor_pkg_test.go
index f2b095eb96dd4..ea0a573fdc6f6 100644
--- a/pkg/executor/executor_pkg_test.go
+++ b/pkg/executor/executor_pkg_test.go
@@ -16,17 +16,11 @@ package executor
import (
"fmt"
- "runtime"
- "strconv"
- "strings"
"testing"
"time"
- "unsafe"
- "github.com/hashicorp/go-version"
"github.com/pingcap/tidb/pkg/domain"
"github.com/pingcap/tidb/pkg/errctx"
- "github.com/pingcap/tidb/pkg/executor/aggfuncs"
"github.com/pingcap/tidb/pkg/executor/join"
"github.com/pingcap/tidb/pkg/kv"
"github.com/pingcap/tidb/pkg/parser/ast"
@@ -165,158 +159,6 @@ func TestSlowQueryRuntimeStats(t *testing.T) {
require.Equal(t, "initialize: 2ms, read_file: 2s, parse_log: {time:200ms, concurrency:15}, total_file: 4, read_file: 4, read_size: 2 GB", stats.String())
}
-// Test whether the actual buckets in Golang Map is same with the estimated number.
-// The test relies on the implement of Golang Map. ref https://github.com/golang/go/blob/go1.13/src/runtime/map.go#L114
-func TestAggPartialResultMapperB(t *testing.T) {
- // skip err, since we guarantee the success of execution
- go113, _ := version.NewVersion(`1.13`)
- // go version format is `gox.y.z foobar`, we only need x.y.z part
- // The following is pretty hacky, but it only in test which is ok to do so.
- actualVer, err := version.NewVersion(runtime.Version()[2:6])
- if err != nil {
- t.Fatalf("Cannot get actual go version with error %v\n", err)
- }
- if actualVer.LessThan(go113) {
- t.Fatalf("Unsupported version and should never use any version less than go1.13\n")
- }
- type testCase struct {
- rowNum int
- expectedB int
- expectedGrowing bool
- }
- var cases []testCase
- // https://github.com/golang/go/issues/63438
- // in 1.21, the load factor of map is 6 rather than 6.5 and the go team refused to backport to 1.21.
- // https://github.com/golang/go/issues/65706
- // in 1.23, it has problem.
- if strings.Contains(runtime.Version(), `go1.21`) {
- cases = []testCase{
- {
- rowNum: 0,
- expectedB: 0,
- expectedGrowing: false,
- },
- {
- rowNum: 95,
- expectedB: 4,
- expectedGrowing: false,
- },
- {
- rowNum: 10000, // 6 * (1 << 11) is 12288
- expectedB: 11,
- expectedGrowing: false,
- },
- {
- rowNum: 1000000, // 6 * (1 << 18) is 1572864
- expectedB: 18,
- expectedGrowing: false,
- },
- {
- rowNum: 786432, // 6 * (1 << 17)
- expectedB: 17,
- expectedGrowing: false,
- },
- {
- rowNum: 786433, // 6 * (1 << 17) + 1
- expectedB: 18,
- expectedGrowing: true,
- },
- {
- rowNum: 393216, // 6 * (1 << 16)
- expectedB: 16,
- expectedGrowing: false,
- },
- {
- rowNum: 393217, // 6 * (1 << 16) + 1
- expectedB: 17,
- expectedGrowing: true,
- },
- }
- } else {
- cases = []testCase{
- {
- rowNum: 0,
- expectedB: 0,
- expectedGrowing: false,
- },
- {
- rowNum: 100,
- expectedB: 4,
- expectedGrowing: false,
- },
- {
- rowNum: 10000,
- expectedB: 11,
- expectedGrowing: false,
- },
- {
- rowNum: 1000000,
- expectedB: 18,
- expectedGrowing: false,
- },
- {
- rowNum: 851968, // 6.5 * (1 << 17)
- expectedB: 17,
- expectedGrowing: false,
- },
- {
- rowNum: 851969, // 6.5 * (1 << 17) + 1
- expectedB: 18,
- expectedGrowing: true,
- },
- {
- rowNum: 425984, // 6.5 * (1 << 16)
- expectedB: 16,
- expectedGrowing: false,
- },
- {
- rowNum: 425985, // 6.5 * (1 << 16) + 1
- expectedB: 17,
- expectedGrowing: true,
- },
- }
- }
-
- for _, tc := range cases {
- aggMap := make(aggfuncs.AggPartialResultMapper)
- tempSlice := make([]aggfuncs.PartialResult, 10)
- for num := 0; num < tc.rowNum; num++ {
- aggMap[strconv.Itoa(num)] = tempSlice
- }
-
- require.Equal(t, tc.expectedB, getB(aggMap))
- require.Equal(t, tc.expectedGrowing, getGrowing(aggMap))
- }
-}
-
-// A header for a Go map.
-// nolint:structcheck
-type hmap struct {
- // Note: the format of the hmap is also encoded in cmd/compile/internal/gc/reflect.go.
- // Make sure this stays in sync with the compiler's definition.
- count int // nolint:unused // # live cells == size of map. Must be first (used by len() builtin)
- flags uint8 // nolint:unused
- B uint8 // nolint:unused // log_2 of # of buckets (can hold up to loadFactor * 2^B items)
- noverflow uint16 // nolint:unused // approximate number of overflow buckets; see incrnoverflow for details
- hash0 uint32 // nolint:unused // hash seed
-
- buckets unsafe.Pointer // nolint:unused // array of 2^B Buckets. may be nil if count==0.
- oldbuckets unsafe.Pointer // nolint:unused // previous bucket array of half the size, non-nil only when growing
- nevacuate uintptr // nolint:unused // progress counter for evacuation (buckets less than this have been evacuated)
-}
-
-func getB(m aggfuncs.AggPartialResultMapper) int {
- point := (**hmap)(unsafe.Pointer(&m))
- value := *point
- return int(value.B)
-}
-
-func getGrowing(m aggfuncs.AggPartialResultMapper) bool {
- point := (**hmap)(unsafe.Pointer(&m))
- value := *point
- return value.oldbuckets != nil
-}
-
func TestFilterTemporaryTableKeys(t *testing.T) {
vars := variable.NewSessionVars(nil)
const tableID int64 = 3
diff --git a/pkg/executor/index_merge_reader.go b/pkg/executor/index_merge_reader.go
index 57d2d4c0bce9a..da546ea33e6c1 100644
--- a/pkg/executor/index_merge_reader.go
+++ b/pkg/executor/index_merge_reader.go
@@ -780,7 +780,7 @@ func (e *IndexMergeReaderExecutor) startIndexMergeTableScanWorker(ctx context.Co
}
ctx1, cancel := context.WithCancel(ctx)
go func() {
- defer trace.StartRegion(ctx, "IndexMergeTableScanWorker").End()
+ defer trace.StartRegion(ctx, tableScanWorkerType).End()
var task *indexMergeTableTask
util.WithRecovery(
// Note we use the address of `task` as the argument of both `pickAndExecTask` and `handleTableScanWorkerPanic`
diff --git a/pkg/executor/join/BUILD.bazel b/pkg/executor/join/BUILD.bazel
index fc03251c9a745..b17eda9b257ac 100644
--- a/pkg/executor/join/BUILD.bazel
+++ b/pkg/executor/join/BUILD.bazel
@@ -114,7 +114,6 @@ go_test(
"//pkg/util/chunk",
"//pkg/util/codec",
"//pkg/util/disk",
- "//pkg/util/hack",
"//pkg/util/memory",
"//pkg/util/mock",
"//pkg/util/sqlkiller",
diff --git a/pkg/executor/join/concurrent_map.go b/pkg/executor/join/concurrent_map.go
index 0c7d74a287b05..d38246f30481b 100644
--- a/pkg/executor/join/concurrent_map.go
+++ b/pkg/executor/join/concurrent_map.go
@@ -20,7 +20,7 @@ import (
)
// ShardCount controls the shard maps within the concurrent map
-var ShardCount = 320
+const ShardCount = 320
// A "thread" safe map of type string:Anything.
// To avoid lock bottlenecks this map is dived to several (ShardCount) map shards.
@@ -28,16 +28,16 @@ type concurrentMap []*concurrentMapShared
// A "thread" safe string to anything map.
type concurrentMapShared struct {
- items map[uint64]*entry
- syncutil.RWMutex // Read Write mutex, guards access to internal map.
- bInMap int64 // indicate there are 2^bInMap buckets in items
+ items hack.MemAwareMap[uint64, *entry]
+ syncutil.RWMutex // Read Write mutex, guards access to internal map.
}
// newConcurrentMap creates a new concurrent map.
func newConcurrentMap() concurrentMap {
m := make(concurrentMap, ShardCount)
- for i := 0; i < ShardCount; i++ {
- m[i] = &concurrentMapShared{items: make(map[uint64]*entry), bInMap: 0}
+ for i := range ShardCount {
+ m[i] = &concurrentMapShared{}
+ m[i].items.Init(make(map[uint64]*entry))
}
return m
}
@@ -51,34 +51,13 @@ func (m concurrentMap) getShard(hashKey uint64) *concurrentMapShared {
func (m concurrentMap) Insert(key uint64, value *entry) (memDelta int64) {
shard := m.getShard(key)
shard.Lock()
- oldValue := shard.items[key]
+ oldValue := shard.items.M[key]
value.Next = oldValue
- shard.items[key] = value
- if len(shard.items) > (1< (1<= 1 and b = 2 and c = 3 and d = 4")
- statsTbl, ok = h.Get(tblInfo.ID)
- require.True(t, ok)
+ require.Eventually(t, func() bool {
+ statsTbl, ok = h.Get(tblInfo.ID)
+ require.True(t, ok)
+ return statsTbl.ColNum() == 3
+ }, 5*time.Second, 100*time.Millisecond)
require.True(t, statsTbl.GetCol(tblInfo.Columns[0].ID).IsFullLoad())
require.True(t, statsTbl.GetCol(tblInfo.Columns[1].ID).IsFullLoad())
require.True(t, statsTbl.GetCol(tblInfo.Columns[3].ID).IsFullLoad())
@@ -465,8 +468,11 @@ func TestSyncLoadOnObjectWhichCanNotFoundInStorage(t *testing.T) {
tk.MustExec("analyze table t columns a, b, c")
require.NoError(t, h.InitStatsLite(context.TODO()))
tk.MustExec("select * from t where a >= 1 and b = 2 and c = 3 and d = 4")
- statsTbl, ok = h.Get(tblInfo.ID)
- require.True(t, ok)
+ require.Eventually(t, func() bool {
+ statsTbl, ok = h.Get(tblInfo.ID)
+ require.True(t, ok)
+ return statsTbl.ColNum() == 4
+ }, 5*time.Second, 100*time.Millisecond)
// a, b, d's status is not changed.
require.True(t, statsTbl.GetCol(tblInfo.Columns[0].ID).IsFullLoad())
require.True(t, statsTbl.GetCol(tblInfo.Columns[1].ID).IsFullLoad())
diff --git a/pkg/testkit/testdata/testdata.go b/pkg/testkit/testdata/testdata.go
index 4453e257b930b..ed70e80c9f1ac 100644
--- a/pkg/testkit/testdata/testdata.go
+++ b/pkg/testkit/testdata/testdata.go
@@ -13,7 +13,6 @@
// limitations under the License.
//go:build !codes
-// +build !codes
package testdata
diff --git a/pkg/util/cgroup/BUILD.bazel b/pkg/util/cgroup/BUILD.bazel
index 11b5c8abb7082..b094eddbdea0e 100644
--- a/pkg/util/cgroup/BUILD.bazel
+++ b/pkg/util/cgroup/BUILD.bazel
@@ -49,9 +49,15 @@ go_library(
"@io_bazel_rules_go//go/platform:openbsd": [
"@com_github_pingcap_failpoint//:failpoint",
],
+ "@io_bazel_rules_go//go/platform:osx": [
+ "@com_github_pingcap_failpoint//:failpoint",
+ ],
"@io_bazel_rules_go//go/platform:plan9": [
"@com_github_pingcap_failpoint//:failpoint",
],
+ "@io_bazel_rules_go//go/platform:qnx": [
+ "@com_github_pingcap_failpoint//:failpoint",
+ ],
"@io_bazel_rules_go//go/platform:solaris": [
"@com_github_pingcap_failpoint//:failpoint",
],
diff --git a/pkg/util/hack/BUILD.bazel b/pkg/util/hack/BUILD.bazel
index 9a21b47867c2c..bbd6caefd3dd4 100644
--- a/pkg/util/hack/BUILD.bazel
+++ b/pkg/util/hack/BUILD.bazel
@@ -2,7 +2,10 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
go_library(
name = "hack",
- srcs = ["hack.go"],
+ srcs = [
+ "hack.go",
+ "map_abi.go",
+ ],
importpath = "github.com/pingcap/tidb/pkg/util/hack",
visibility = ["//visibility:public"],
)
@@ -13,11 +16,13 @@ go_test(
srcs = [
"hack_test.go",
"main_test.go",
+ "map_abi_test.go",
],
embed = [":hack"],
flaky = True,
deps = [
"//pkg/testkit/testsetup",
+ "@com_github_stretchr_testify//require",
"@org_uber_go_goleak//:goleak",
],
)
diff --git a/pkg/util/hack/hack.go b/pkg/util/hack/hack.go
index 34a01ae6360cf..de5ec8a30bcf9 100644
--- a/pkg/util/hack/hack.go
+++ b/pkg/util/hack/hack.go
@@ -15,8 +15,6 @@
package hack
import (
- "runtime"
- "strings"
"unsafe"
)
@@ -39,52 +37,19 @@ func Slice(s string) []byte {
return unsafe.Slice(unsafe.StringData(s), len(s))
}
-// LoadFactor is the maximum average load of a bucket that triggers growth is 6.5 in Golang Map.
-// Represent as LoadFactorNum/LoadFactorDen, to allow integer math.
-// They are from the golang definition. ref: https://github.com/golang/go/blob/go1.13.15/src/runtime/map.go#L68-L71
-const (
- // LoadFactorDen is the denominator of load factor
- LoadFactorDen = 2
-)
-
-// LoadFactorNum is the numerator of load factor
-var LoadFactorNum = 13
-
func init() {
- // In go1.21, the load factor num becomes 12 and go team has decided not to backport the fix to 1.21.
- // See more details in https://github.com/golang/go/issues/63438
- if strings.Contains(runtime.Version(), `go1.21`) || strings.Contains(runtime.Version(), `go1.22`) {
- LoadFactorNum = 12
- }
-}
-
-const (
- // DefBucketMemoryUsageForMapStrToSlice = bucketSize*(1+unsafe.Sizeof(string) + unsafe.Sizeof(slice))+2*ptrSize
- // ref https://github.com/golang/go/blob/go1.15.6/src/reflect/type.go#L2162.
- // The bucket size may be changed by golang implement in the future.
- // Golang Map needs to acquire double the memory when expanding,
- // and the old buckets will be released after the data is migrated.
- // Considering the worst case, the data in the old bucket cannot be migrated in time, and the old bucket cannot
- // be GCed, we expand the bucket size to 1.5 times to estimate the memory usage of Golang Map.
- DefBucketMemoryUsageForMapStrToSlice = (8*(1+16+24) + 16) / 2 * 3
- // DefBucketMemoryUsageForMapIntToPtr = bucketSize*(1+unsafe.Sizeof(uint64) + unsafe.Sizeof(pointer))+2*ptrSize
- DefBucketMemoryUsageForMapIntToPtr = (8*(1+8+8) + 16) / 2 * 3
- // DefBucketMemoryUsageForMapStringToAny = bucketSize*(1+unsafe.Sizeof(string) + unsafe.Sizeof(interface{}))+2*ptrSize
- DefBucketMemoryUsageForMapStringToAny = (8*(1+16+16) + 16) / 2 * 3
- // DefBucketMemoryUsageForSetString = bucketSize*(1+unsafe.Sizeof(string) + unsafe.Sizeof(struct{}))+2*ptrSize
- DefBucketMemoryUsageForSetString = (8*(1+16+0) + 16) / 2 * 3
- // DefBucketMemoryUsageForSetFloat64 = bucketSize*(1+unsafe.Sizeof(float64) + unsafe.Sizeof(struct{}))+2*ptrSize
- DefBucketMemoryUsageForSetFloat64 = (8*(1+8+0) + 16) / 2 * 3
- // DefBucketMemoryUsageForSetInt64 = bucketSize*(1+unsafe.Sizeof(int64) + unsafe.Sizeof(struct{}))+2*ptrSize
- DefBucketMemoryUsageForSetInt64 = (8*(1+8+0) + 16) / 2 * 3
-)
-
-// EstimateBucketMemoryUsage returns the estimated memory usage of a bucket in a map.
-func EstimateBucketMemoryUsage[K comparable, V any]() uint64 {
- return (8*(1+uint64(unsafe.Sizeof(*new(K))+unsafe.Sizeof(*new(V)))) + 16) / 2 * 3
+ checkMapABI()
}
// GetBytesFromPtr return a bytes array from the given ptr and length
func GetBytesFromPtr(ptr unsafe.Pointer, length int) []byte {
return unsafe.Slice((*byte)(ptr), length)
}
+
+// Memory usage constants for swiss map
+const (
+ DefBucketMemoryUsageForMapStringToAny = 312
+ DefBucketMemoryUsageForSetString = 248
+ DefBucketMemoryUsageForSetFloat64 = 184
+ DefBucketMemoryUsageForSetInt64 = 184
+)
diff --git a/pkg/util/hack/map_abi.go b/pkg/util/hack/map_abi.go
new file mode 100644
index 0000000000000..170ce960fd863
--- /dev/null
+++ b/pkg/util/hack/map_abi.go
@@ -0,0 +1,424 @@
+// Copyright 2025 PingCAP, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package hack
+
+import (
+ "runtime"
+ "strings"
+ "unsafe"
+)
+
+// Maximum size of a table before it is split at the directory level.
+const maxTableCapacity = 1024
+
+// Number of bits in the group.slot count.
+const swissMapGroupSlotsBits = 3
+
+// Number of slots in a group.
+const swissMapGroupSlots = 1 << swissMapGroupSlotsBits // 8
+
+// $GOROOT/src/internal/runtime/maps/table.go:`type table struct`
+type swissMapTable struct {
+ // The number of filled slots (i.e. the number of elements in the table).
+ used uint16
+
+ // The total number of slots (always 2^N). Equal to
+ // `(groups.lengthMask+1)*abi.SwissMapGroupSlots`.
+ capacity uint16
+
+ // The number of slots we can still fill without needing to rehash.
+ //
+ // We rehash when used + tombstones > loadFactor*capacity, including
+ // tombstones so the table doesn't overfill with tombstones. This field
+ // counts down remaining empty slots before the next rehash.
+ growthLeft uint16
+
+ // The number of bits used by directory lookups above this table. Note
+ // that this may be less then globalDepth, if the directory has grown
+ // but this table has not yet been split.
+ localDepth uint8
+
+ // Index of this table in the Map directory. This is the index of the
+ // _first_ location in the directory. The table may occur in multiple
+ // sequential indicies.
+ //
+ // index is -1 if the table is stale (no longer installed in the
+ // directory).
+ index int
+
+ // groups is an array of slot groups. Each group holds abi.SwissMapGroupSlots
+ // key/elem slots and their control bytes. A table has a fixed size
+ // groups array. The table is replaced (in rehash) when more space is
+ // required.
+ //
+ // TODO(prattmic): keys and elements are interleaved to maximize
+ // locality, but it comes at the expense of wasted space for some types
+ // (consider uint8 key, uint64 element). Consider placing all keys
+ // together in these cases to save space.
+ groups groupsReference
+}
+
+// groupsReference is a wrapper type describing an array of groups stored at
+// data.
+type groupsReference struct {
+ // data points to an array of groups. See groupReference above for the
+ // definition of group.
+ data unsafe.Pointer // data *[length]typ.Group
+
+ // lengthMask is the number of groups in data minus one (note that
+ // length must be a power of two). This allows computing i%length
+ // quickly using bitwise AND.
+ lengthMask uint64
+}
+
+// $GOROOT/src/internal/runtime/maps/map.go:`type Map struct`
+type swissMap struct {
+ // The number of filled slots (i.e. the number of elements in all
+ // tables). Excludes deleted slots.
+ // Must be first (known by the compiler, for len() builtin).
+ Used uint64
+
+ // seed is the hash seed, computed as a unique random number per map.
+ seed uintptr
+
+ // The directory of tables.
+ //
+ // Normally dirPtr points to an array of table pointers
+ //
+ // dirPtr *[dirLen]*table
+ //
+ // The length (dirLen) of this array is `1 << globalDepth`. Multiple
+ // entries may point to the same table. See top-level comment for more
+ // details.
+ //
+ // Small map optimization: if the map always contained
+ // abi.SwissMapGroupSlots or fewer entries, it fits entirely in a
+ // single group. In that case dirPtr points directly to a single group.
+ //
+ // dirPtr *group
+ //
+ // In this case, dirLen is 0. used counts the number of used slots in
+ // the group. Note that small maps never have deleted slots (as there
+ // is no probe sequence to maintain).
+ dirPtr unsafe.Pointer
+ dirLen int
+
+ // The number of bits to use in table directory lookups.
+ globalDepth uint8
+
+ // The number of bits to shift out of the hash for directory lookups.
+ // On 64-bit systems, this is 64 - globalDepth.
+ globalShift uint8
+
+ // writing is a flag that is toggled (XOR 1) while the map is being
+ // written. Normally it is set to 1 when writing, but if there are
+ // multiple concurrent writers, then toggling increases the probability
+ // that both sides will detect the race.
+ writing uint8
+
+ // tombstonePossible is false if we know that no table in this map
+ // contains a tombstone.
+ tombstonePossible bool
+
+ // clearSeq is a sequence counter of calls to Clear. It is used to
+ // detect map clears during iteration.
+ clearSeq uint64
+}
+
+func (m *swissMap) directoryAt(i uintptr) *swissMapTable {
+ return *(**swissMapTable)(unsafe.Pointer(uintptr(m.dirPtr) + uintptr(sizeofPtr)*i))
+}
+
+// Size returns the accurate memory size of the swissMap including all its tables.
+func (m *swissMap) Size(groupSize uint64) (sz uint64) {
+ sz += swissMapSize
+ sz += sizeofPtr * uint64(m.dirLen)
+ if m.dirLen == 0 {
+ sz += groupSize
+ return
+ }
+
+ var lastTab *swissMapTable
+ for i := range m.dirLen {
+ t := m.directoryAt(uintptr(i))
+ if t == lastTab {
+ continue
+ }
+ lastTab = t
+ sz += swissTableSize
+ sz += groupSize * (t.groups.lengthMask + 1)
+ }
+ return
+}
+
+// Cap returns the total capacity of the swissMap.
+func (m *swissMap) Cap() uint64 {
+ if m.dirLen == 0 {
+ return swissMapGroupSlots
+ }
+ var capacity uint64
+ var lastTab *swissMapTable
+ for i := range m.dirLen {
+ t := m.directoryAt(uintptr(i))
+ if t == lastTab {
+ continue
+ }
+ lastTab = t
+ capacity += uint64(t.capacity)
+ }
+ return capacity
+}
+
+// Size returns the accurate memory size
+func (m *SwissMapWrap) Size() uint64 {
+ return m.Data.Size(uint64(m.Type.GroupSize))
+}
+
+const (
+ swissMapSize = uint64(unsafe.Sizeof(swissMap{}))
+ swissTableSize = uint64(unsafe.Sizeof(swissMapTable{}))
+ sizeofPtr = uint64(unsafe.Sizeof(uintptr(0)))
+)
+
+// TODO: use a more accurate size calculation if necessary
+func approxSize(groupSize uint64, maxLen uint64) (size uint64) {
+ // 204 can fit the `split`/`rehash` behavior of different kinds of swisstable
+ const ratio = 204
+ return groupSize * maxLen * ratio / 1000
+}
+
+type ctrlGroup uint64
+
+type groupReference struct {
+ // data points to the group, which is described by typ.Group and has
+ // layout:
+ //
+ // type group struct {
+ // ctrls ctrlGroup
+ // slots [abi.SwissMapGroupSlots]slot
+ // }
+ //
+ // type slot struct {
+ // key typ.Key
+ // elem typ.Elem
+ // }
+ data unsafe.Pointer // data *typ.Group
+}
+
+func (g *groupsReference) group(typ *swissMapType, i uint64) groupReference {
+ // TODO(prattmic): Do something here about truncation on cast to
+ // uintptr on 32-bit systems?
+ offset := uintptr(i) * typ.GroupSize
+
+ return groupReference{
+ data: unsafe.Pointer(uintptr(g.data) + offset),
+ }
+}
+
+// $GOROOT/src/internal/abi/type.go:`type Type struct`
+type abiType struct {
+ Size uintptr
+ PtrBytes uintptr // number of (prefix) bytes in the type that can contain pointers
+ Hash uint32 // hash of type; avoids computation in hash tables
+ TFlag uint8 // extra type information flags
+ Align uint8 // alignment of variable with this type
+ FieldAlign uint8 // alignment of struct field with this type
+ Kind uint8 // enumeration for C
+ // function for comparing objects of this type
+ // (ptr to object A, ptr to object B) -> ==?
+ Equal func(unsafe.Pointer, unsafe.Pointer) bool
+ // GCData stores the GC type data for the garbage collector.
+ // Normally, GCData points to a bitmask that describes the
+ // ptr/nonptr fields of the type. The bitmask will have at
+ // least PtrBytes/ptrSize bits.
+ // If the TFlagGCMaskOnDemand bit is set, GCData is instead a
+ // **byte and the pointer to the bitmask is one dereference away.
+ // The runtime will build the bitmask if needed.
+ // (See runtime/type.go:getGCMask.)
+ // Note: multiple types may have the same value of GCData,
+ // including when TFlagGCMaskOnDemand is set. The types will, of course,
+ // have the same pointer layout (but not necessarily the same size).
+ GCData *byte
+ Str int32 // string form
+ PtrToThis int32 // type for pointer to this type, may be zero
+}
+
+// $GOROOT/src/internal/abi/map_swiss.go:`type SwissMapType struct`
+type swissMapType struct {
+ abiType
+ Key *abiType
+ Elem *abiType
+ Group *abiType // internal type representing a slot group
+ // function for hashing keys (ptr to key, seed) -> hash
+ Hasher func(unsafe.Pointer, uintptr) uintptr
+ GroupSize uintptr // == Group.Size_
+ SlotSize uintptr // size of key/elem slot
+ ElemOff uintptr // offset of elem in key/elem slot; aka key size; elem size: SlotSize - ElemOff;
+ Flags uint32
+}
+
+// SwissMapWrap is a wrapper of map to access its internal structure.
+type SwissMapWrap struct {
+ Type *swissMapType
+ Data *swissMap
+}
+
+// ToSwissMap converts a map to SwissMapWrap.
+func ToSwissMap[K comparable, V any](m map[K]V) (sm SwissMapWrap) {
+ ref := any(m)
+ sm = *(*SwissMapWrap)(unsafe.Pointer(&ref))
+ return
+}
+
+const (
+ ctrlGroupsSize = unsafe.Sizeof(ctrlGroup(0))
+ groupSlotsOffset = ctrlGroupsSize
+)
+
+func (g *groupReference) cap(typ *swissMapType) uint64 {
+ _ = g
+ return groupCap(uint64(typ.GroupSize), uint64(typ.SlotSize))
+}
+
+func groupCap(groupSize, slotSize uint64) uint64 {
+ return (groupSize - uint64(groupSlotsOffset)) / slotSize
+}
+
+// key returns a pointer to the key at index i.
+func (g *groupReference) key(typ *swissMapType, i uintptr) unsafe.Pointer {
+ offset := groupSlotsOffset + i*typ.SlotSize
+ return unsafe.Pointer(uintptr(g.data) + offset)
+}
+
+// elem returns a pointer to the element at index i.
+func (g *groupReference) elem(typ *swissMapType, i uintptr) unsafe.Pointer {
+ offset := groupSlotsOffset + i*typ.SlotSize + typ.ElemOff
+ return unsafe.Pointer(uintptr(g.data) + offset)
+}
+
+// MemAwareMap is a map with memory usage tracking.
+type MemAwareMap[K comparable, V any] struct {
+ M map[K]V
+ groupSize uint64
+ nextCheckpoint uint64 // every `maxTableCapacity` increase in Used
+ Bytes uint64
+}
+
+// MockSeedForTest sets the seed of the swissMap inside MemAwareMap
+func (m *MemAwareMap[K, V]) MockSeedForTest(seed uint64) (oriSeed uint64) {
+ return m.unwrap().MockSeedForTest(seed)
+}
+
+// MockSeedForTest sets the seed of the swissMap
+func (m *swissMap) MockSeedForTest(seed uint64) (oriSeed uint64) {
+ if m.Used != 0 {
+ panic("MockSeedForTest can only be called on empty map")
+ }
+ oriSeed = uint64(m.seed)
+ m.seed = uintptr(seed)
+ return
+}
+
+// Count returns the number of elements in the map.
+func (m *MemAwareMap[K, V]) Count() int {
+ return len(m.M)
+}
+
+// Empty returns true if the map is empty.
+func (m *MemAwareMap[K, V]) Empty() bool {
+ return len(m.M) == 0
+}
+
+// Exist returns true if the key exists in the map.
+func (m *MemAwareMap[K, V]) Exist(val K) bool {
+ _, ok := m.M[val]
+ return ok
+}
+
+func (m *MemAwareMap[K, V]) unwrap() *swissMap {
+ return *(**swissMap)(unsafe.Pointer(&m.M))
+}
+
+// Set sets the value for the key in the map and returns the memory delta.
+func (m *MemAwareMap[K, V]) Set(key K, value V) (deltaBytes int64) {
+ sm := m.unwrap()
+ m.M[key] = value
+ if sm.Used >= m.nextCheckpoint {
+ newBytes := max(m.Bytes, approxSize(m.groupSize, sm.Used))
+ deltaBytes = int64(newBytes) - int64(m.Bytes)
+ m.Bytes = newBytes
+ m.nextCheckpoint = min(sm.Used, maxTableCapacity) + sm.Used
+ }
+ return
+}
+
+// SetExt sets the value for the key in the map and returns the memory delta and whether it's an insert.
+func (m *MemAwareMap[K, V]) SetExt(key K, value V) (deltaBytes int64, insert bool) {
+ sm := m.unwrap()
+ oriUsed := sm.Used
+ deltaBytes = m.Set(key, value)
+ insert = oriUsed != sm.Used
+ return
+}
+
+// Init initializes the MemAwareMap with the given map and returns the initial memory size.
+// The input map should NOT be nil.
+func (m *MemAwareMap[K, V]) Init(v map[K]V) int64 {
+ if v == nil {
+ panic("MemAwareMap.Init: input map should NOT be nil")
+ }
+ m.M = v
+ sm := m.unwrap()
+
+ m.groupSize = uint64(ToSwissMap(m.M).Type.GroupSize)
+ m.Bytes = sm.Size(m.groupSize)
+ if sm.Used <= swissMapGroupSlots {
+ m.nextCheckpoint = swissMapGroupSlots * 2
+ } else {
+ m.nextCheckpoint = min(sm.Used, maxTableCapacity) + sm.Used
+ }
+ return int64(m.Bytes)
+}
+
+// NewMemAwareMap creates a new MemAwareMap with the given initial capacity.
+func NewMemAwareMap[K comparable, V any](capacity int) *MemAwareMap[K, V] {
+ m := new(MemAwareMap[K, V])
+ m.Init(make(map[K]V, capacity))
+ return m
+}
+
+// RealBytes returns the real memory size of the map.
+// Compute the real size is expensive, so do not call it frequently.
+// Make sure the `seed` is same when testing the memory size.
+func (m *MemAwareMap[K, V]) RealBytes() uint64 {
+ return m.unwrap().Size(m.groupSize)
+}
+
+func checkMapABI() {
+ if !strings.Contains(runtime.Version(), `go1.25`) {
+ panic("The hack package only supports go1.25, please confirm the correctness of the ABI before upgrading")
+ }
+}
+
+// Get the value of the key.
+func (m *MemAwareMap[K, V]) Get(k K) (v V, ok bool) {
+ v, ok = m.M[k]
+ return
+}
+
+// Len returns the number of elements in the map.
+func (m *MemAwareMap[K, V]) Len() int {
+ return len(m.M)
+}
diff --git a/pkg/util/hack/map_abi_test.go b/pkg/util/hack/map_abi_test.go
new file mode 100644
index 0000000000000..51692c34f48e1
--- /dev/null
+++ b/pkg/util/hack/map_abi_test.go
@@ -0,0 +1,237 @@
+// Copyright 2015 PingCAP, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package hack
+
+import (
+ "fmt"
+ "strconv"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+const seed = 4992862800126241206 // set the fixed seed for test
+
+func TestSwissTable(t *testing.T) {
+ require.True(t, maxTableCapacity == 1024)
+ {
+ m := make(map[int]int)
+ tp := ToSwissMap(m).Type
+ require.True(t, tp.GroupSize == 136)
+ require.True(t, tp.SlotSize == 16)
+ require.True(t, tp.ElemOff == 8)
+ }
+ {
+ m := make(map[int32]int32)
+ tp := ToSwissMap(m).Type
+ require.True(t, tp.GroupSize == 72)
+ require.True(t, tp.SlotSize == 8)
+ require.True(t, tp.ElemOff == 4)
+ }
+ {
+ m := make(map[int8]int8)
+ tp := ToSwissMap(m).Type
+ require.True(t, tp.GroupSize == 24)
+ require.True(t, tp.SlotSize == 2)
+ require.True(t, tp.ElemOff == 1)
+ }
+ {
+ m := make(map[int64]float64)
+ tp := ToSwissMap(m).Type
+ require.True(t, tp.GroupSize == 136)
+ require.True(t, tp.SlotSize == 16)
+ require.True(t, tp.ElemOff == 8)
+ }
+ {
+ m := make(map[complex128]complex128)
+ tp := ToSwissMap(m).Type
+ require.True(t, tp.GroupSize == 264)
+ require.True(t, tp.SlotSize == 32)
+ require.True(t, tp.ElemOff == 16)
+ }
+ {
+ const N = 1024
+ mp := make(map[uint64]uint64)
+ sm := ToSwissMap(mp)
+ sm.Data.MockSeedForTest(seed)
+ mp[1234] = 5678
+ for i := range N {
+ mp[uint64(i)] = uint64(i * 2)
+ }
+ require.Equal(t, N+1, len(mp))
+ require.Equal(t, uint64(N+1), sm.Data.Used)
+ found := false
+ var lastTab *swissMapTable
+
+ for i := range sm.Data.dirLen {
+ table := sm.Data.directoryAt(uintptr(i))
+ if table == lastTab {
+ continue
+ }
+ for i := range table.groups.lengthMask + 1 {
+ ref := table.groups.group(sm.Type, i)
+ require.True(t, (sm.Type.GroupSize-groupSlotsOffset)%sm.Type.SlotSize == 0)
+ capacity := ref.cap(sm.Type)
+ require.True(t, capacity == swissMapGroupSlots)
+ for j := range capacity {
+ k, v := *(*uint64)(ref.key(sm.Type, uintptr(j))), *(*uint64)(ref.elem(sm.Type, uintptr(j)))
+ if k == 1234 && v == 5678 {
+ require.False(t, found)
+ found = true
+ break
+ }
+ }
+ }
+ }
+ require.True(t, found)
+
+ oriSeed := sm.Data.seed
+ for k := range mp {
+ delete(mp, k)
+ }
+ require.True(t, oriSeed != sm.Data.seed)
+ }
+ {
+ const N = 2000
+ mp := make(map[string]int)
+ sm := ToSwissMap(mp)
+ sm.Data.MockSeedForTest(seed)
+ for i := range N {
+ mp[fmt.Sprintf("key-%d", i)] = i
+ }
+ require.Equal(t, N, len(mp))
+ require.Equal(t, N, int(sm.Data.Used))
+ require.True(t, sm.Type.GroupSize == 200)
+ require.True(t, sm.Data.dirLen == 4)
+ require.Equal(t, 102608, int(sm.Size()))
+ }
+ {
+ mp := make(map[int]int)
+ require.Equal(t, 0, len(mp))
+ sm := ToSwissMap(mp)
+ sm.Data.MockSeedForTest(seed)
+ require.Equal(t, 0, int(sm.Data.Used))
+ require.True(t, sm.Type.GroupSize == 136)
+ require.Equal(t, 184, int(sm.Size()))
+ for i := range 8 {
+ mp[i] = i
+ }
+ require.Equal(t, 8, len(mp))
+ require.Equal(t, 184, int(sm.Size()))
+ mp[9] = 9
+ require.Equal(t, 9, len(mp))
+ require.Equal(t, 360, int(sm.Size()))
+ }
+
+ {
+ mp := make(map[complex128]complex128)
+ m := MemAwareMap[complex128, complex128]{}
+ const N = 1024*50 - 1
+ delta := m.Init(mp)
+ m.MockSeedForTest(seed)
+ for i := range N {
+ k := complex(float64(i), float64(i))
+ d := m.Set(k, k)
+ delta += d
+ if d > 0 {
+ sz := m.RealBytes()
+ expMin := sz * 75 / 100
+ require.True(t, m.Bytes >= expMin, "ApproxSize %d, RealSize %d, index %d, expMin %d", m.Bytes, sz, i, expMin)
+ require.True(t, approxSize(m.groupSize, uint64(m.Len())) >= expMin, "ApproxSize %d, RealSize %d, index %d, expMin %d", m.Bytes, sz, i, expMin)
+ }
+ }
+ sz := m.RealBytes()
+ require.True(t, sz == 2165296, sz)
+ require.True(t, delta == 2702278, delta)
+ require.True(t, delta == int64(m.Bytes))
+ require.True(t, seed == m.unwrap().seed)
+ clearSeq := m.unwrap().clearSeq
+ clear(m.M)
+ require.True(t, m.Len() == 0)
+ require.True(t, clearSeq+1 == m.unwrap().clearSeq)
+ require.True(t, m.unwrap().seed != seed)
+ require.True(t, sz == m.RealBytes())
+ require.True(t, delta == int64(m.Bytes))
+
+ m.MockSeedForTest(seed)
+ for i := range 1024 {
+ k := complex(float64(i), float64(i))
+ d, insert := m.SetExt(k, k)
+ require.True(t, d == 0)
+ require.True(t, insert)
+ }
+ require.True(t, m.Len() == 1024)
+ }
+}
+
+var result int
+
+var inputs = []struct {
+ input int
+}{
+ {input: 1},
+ {input: 100},
+ {input: 10000},
+ {input: 1000000},
+}
+
+func memAwareIntMap(size int) int {
+ var x int
+ m := NewMemAwareMap[int, int](0)
+ for j := range size {
+ m.Set(j, j)
+ }
+ for j := range size {
+ x, _ = m.Get(j)
+ }
+ return x
+}
+
+func nativeIntMap(size int) int {
+ var x int
+ m := make(map[int]int)
+ for j := range size {
+ m[j] = j
+ }
+
+ for j := range size {
+ x = m[j]
+ }
+ return x
+}
+
+func BenchmarkMemAwareIntMap(b *testing.B) {
+ for _, s := range inputs {
+ b.Run("MemAwareIntMap_"+strconv.Itoa(s.input), func(b *testing.B) {
+ var x int
+ for b.Loop() {
+ x = memAwareIntMap(s.input)
+ }
+ result = x
+ })
+ }
+}
+
+func BenchmarkNativeIntMap(b *testing.B) {
+ for _, s := range inputs {
+ b.Run("NativeIntMap_"+strconv.Itoa(s.input), func(b *testing.B) {
+ var x int
+ for b.Loop() {
+ x = nativeIntMap(s.input)
+ }
+ result = x
+ })
+ }
+}
diff --git a/pkg/util/set/BUILD.bazel b/pkg/util/set/BUILD.bazel
index 5e3b1d0343d1a..9c7f8d7f76f96 100644
--- a/pkg/util/set/BUILD.bazel
+++ b/pkg/util/set/BUILD.bazel
@@ -5,7 +5,6 @@ go_library(
srcs = [
"float64_set.go",
"int_set.go",
- "mem_aware_map.go",
"set.go",
"set_with_memory_usage.go",
"string_set.go",
@@ -26,7 +25,6 @@ go_test(
"float64_set_test.go",
"int_set_test.go",
"main_test.go",
- "mem_aware_map_test.go",
"set_test.go",
"set_with_memory_usage_test.go",
"string_set_test.go",
diff --git a/pkg/util/set/mem_aware_map.go b/pkg/util/set/mem_aware_map.go
deleted file mode 100644
index 0ec3c773229e5..0000000000000
--- a/pkg/util/set/mem_aware_map.go
+++ /dev/null
@@ -1,72 +0,0 @@
-// Copyright 2022 PingCAP, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package set
-
-import (
- "math"
-
- "github.com/pingcap/tidb/pkg/util/hack"
-)
-
-// MemAwareMap is a map which is aware of its memory usage. It's adapted from SetWithMemoryUsage.
-// It doesn't support delete.
-// The estimate usage of memory is usually smaller than the real usage.
-// According to experiments with SetWithMemoryUsage, 2/3 * estimated usage <= real usage <= estimated usage.
-type MemAwareMap[K comparable, V any] struct {
- M map[K]V // it's public, when callers want to directly access it, e.g. use in a for-range-loop
- bInMap int64
- bucketMemoryUsage uint64
-}
-
-// EstimateMapSize returns the estimated size of the map. It doesn't include the dynamic part, e.g. objects pointed to by pointers in the map.
-// len(map) <= load_factor * 2^bInMap. bInMap = ceil(log2(len(map)/load_factor)).
-// memory = bucketSize * 2^bInMap
-func EstimateMapSize(length int, bucketSize uint64) uint64 {
- if length == 0 {
- return 0
- }
- bInMap := uint64(math.Ceil(math.Log2(float64(length) * hack.LoadFactorDen / float64(hack.LoadFactorNum))))
- return bucketSize * uint64(1< (1< (1< (1< (1<]
set global tidb_mem_oom_action = DEFAULT;
diff --git a/tests/integrationtest/t/executor/index_merge_reader.test b/tests/integrationtest/t/executor/index_merge_reader.test
index b91ef7fe1f13d..2ea56a38dcf9d 100644
--- a/tests/integrationtest/t/executor/index_merge_reader.test
+++ b/tests/integrationtest/t/executor/index_merge_reader.test
@@ -280,7 +280,7 @@ create table t1(pk varchar(100) primary key, c1 int, c2 int, index idx1(c1), ind
insert into t1 values('TXwuGSfZfrgVbTksgvQBilqiUXlNEXzyXNqWRTCidzXFbrkpGFJalRMdVGQOAOojditwludthMcitNqNtvirGAudSNBtdIkpJIHQ', 1, 1), ('LSiKhgTNnuyjdBtuKKuRgzrcxbHrIlfxSkEuooaPYwfOBVQfNYAyatHiWvmUWRUvBLvXmpqAJUWRXXHFCLCjuJqFbVxQdUxqRuqW', 1, 1), ('qCOSFbvtmansHENQaAQbnyYOwCTPctlejpbpueHbtzskmPOazrMWdcMLaYjyfxYQUgDDjCnAnnExepNqwYIzHVjNVndlOzFaAOcf', 1, 1), ('qBqdtPyXIqLKynGNHnRlrufuUCZPqhxUYEqIrYERnQdqXRjVWcoYclxYXoqdpQboKydzhOHOWBwtmcXzGwCWQVdbpozvIaXxiBQj', 1, 1), ('TXZlGHnXOiSWGyRafAqworFmxuadHRTHcYyzLqZMzIMGUUBQmgiIJKQOqbHhoPEKbYBgfPDZJwwqgnCbMxZKaZfvGyVRRUOgRhoq', 1, 1), ('SwFEtKDfPDQpsyxTdTruPyNDLvEOLRdQtSttxJmgBuZiVKsflHCDZaGvkLHMqhHqLayfbZFrxUHzWHgfoPFCWCdCHScabWRNCHCL', 1, 1), ('BuZhnsTMGNtMJtrjjdMMrguutSpiLnZNCdgiNkWDPymzIymcujjBtsnKCAVRSErvbzPaOwLTTPWkGmbXltqOJXmkXnSWWlWaaBqe', 1, 1), ('LuJCpJrLUwDJutwBDtGEsGduteBWPHeGLVhmVJYVrmjunKNuplEeWDCMIAxHPoiRmdPnXneQEQWRvJkPBoXOPaGZhhFLFgGraLmH', 1, 1), ('JKJwMlPmymduJWOmKLFBmZyCFrcUvKcGQkzJmzGjuFoZweyCBptswEPHTkaIhWEEBMWzNBawtfYKKAugBNlxcwmpJSfuIAUSIxeG', 1, 1), ('IqQbehKwleoSUnwxrVLKSbzRqlEFfkwQtRtIfaVpEGfESyGjDJeAOWQPRYVQYvlPNPROQEraCqwQTzanPSrsnUvEXHSxcYjUJvzk', 1, 1), ('zNHmiBGCLUUEDgMAeIGuTgNJFPBtePpxcQrQlgnRlvosJfeYbhRfJdfMwXIRlXxVoOowhEvPhMQPlplzkUfjjmzdJKwGATvfDAiT', 1, 1), ('OjQvpfdsHSdZUAmGfmtQaYKYONAFHGNLeLKRYECqshxygiOzfKkqRwSYGgClqqnpHqPMZpqsjIYSalziqSfMbbtmmzxkOVgglVOh', 1, 1), ('dXXZaWDwdfhjIysLTNMSfwvoEBJhWOVpJnfXFofWSWMfMbUlRgAkobxoCxXPXNUWzAQczbQclQpvIvvATHHcQgdXUvwSTHqLXZny', 1, 1), ('haPqYVwFNUkedfIKPOPyUxIvbSkaUbsEWNvnDtXZsQQqafIhDXlajYpuXOSYiOwGJYAMVLUvXfwOIuyHKElzJHpOUdCiQiXRHubI', 1, 1), ('nQzOwSsVBjCpehVVmLeyYwyVEwYGAfkhCtkkaKyiXzYCRPRVZpNVnOXGbuWrQEgTuPEPFPApUaYLdCtyBEQulFEwyHlORrMfIJxr', 1, 1), ('ksGDopwNPvpjeCtAMaTrznDSgCLpRDQoCdsahWSjwumVEJITbNBPAAtkoxHuwmNQsryoILqCPBPiUSxAWjnFEdtxDIgEtqDiFvpO', 1, 1), ('AdWEZYzxCMhfcZseNVmNQpyqJrVKcKaZpKKcwZXfDPeIBMzkLzpJpOenidMBtBPBIbaiqfpSxBnGtRHAksBOgpigQTVomZzJhCFb', 1, 1), ('bVMNkLOAuRHqnCtGvVPLvmVSKihYFotmHTjObiAIARHawZTruAabGpFxeYxYTTFRxteFYyBfkBfiSEIFvOPquDnlVRNUUubssEMz', 1, 1), ('kXmcjqMYzGxvHBRRCovSTWavDnVajKLlxguJgniJeNkWQUxjHjYjBsveLfojybWkbqHBVzrOoqgXFrDnnRJPcybmnuAEUTPUoIjO', 1, 1), ('rVXzZDKudOpWCBuRCoQCpukHxenZnsaptDeJCCFzxMVvNucFwLKIiSceLsqUHHQuEfAIygjQCCkIbfInGthSnoLdNGWbsLDsxnrY', 1, 1);
explain format='brief' select /*+ use_index_merge(t1, primary, idx1, idx2) */ c1 from t1 where c1 < 1024 and c2 < 1024;
set global tidb_mem_oom_action='CANCEL';
-set @@tidb_mem_quota_query = 4000;
+set @@tidb_mem_quota_query = 3000;
-- replace_regex /conn=[-0-9]+/conn=/
-- error 8175
select /*+ use_index_merge(t1, primary, idx1, idx2) */ c1 from t1 where c1 < 1024 and c2 < 1024;
diff --git a/tools/check/bazel-check-abi.sh b/tools/check/bazel-check-abi.sh
new file mode 100755
index 0000000000000..07765d614cd71
--- /dev/null
+++ b/tools/check/bazel-check-abi.sh
@@ -0,0 +1,36 @@
+#!/usr/bin/env bash
+# Copyright 2025 PingCAP, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -euo pipefail
+
+GOROOT=$(bazel run @io_bazel_rules_go//go -- env GOROOT)
+cd ${GOROOT}
+
+gosrc_md5=()
+gosrc_md5+=("src/internal/runtime/maps/map.go a29531cd3447fd3c90ceabfde5a08921")
+gosrc_md5+=("src/internal/runtime/maps/table.go 1ff4f281722eb83ac7d64ae0453e9718")
+gosrc_md5+=("src/internal/abi/map_swiss.go 7ef614406774c5be839e63aea0225b00")
+gosrc_md5+=("src/internal/abi/type.go d0caafb471a5b971854ca6426510608c")
+
+for x in "${gosrc_md5[@]}"; do
+ x=($x)
+ src="${x[0]}"
+ md5="${x[1]}"
+ echo "Checking ${src}"
+ if [ $(md5sum "${src}" | cut -d' ' -f1) != "${md5}" ]; then
+ echo "Unexpect checksum for ${src}"
+ exit -1
+ fi
+done