diff --git a/Makefile b/Makefile index b4594b5fd9a7..d9be018f969d 100644 --- a/Makefile +++ b/Makefile @@ -795,8 +795,8 @@ SQLPARSER_TARGETS = \ PROTOBUF_TARGETS := bin/.go_protobuf_sources bin/.gw_protobuf_sources -SWAGGER_TARGETS := \ - docs/generated/swagger/spec.json +SWAGGER_TARGETS := + #docs/generated/swagger/spec.json DOCGEN_TARGETS := \ bin/.docgen_bnfs \ diff --git a/WORKSPACE b/WORKSPACE index c5f809a21129..decc1ec6f30d 100644 --- a/WORKSPACE +++ b/WORKSPACE @@ -18,12 +18,12 @@ load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") # Load go bazel tools. This gives us access to the go bazel SDK/toolchains. http_archive( name = "io_bazel_rules_go", - sha256 = "72f7456307988b1ee4f2d32bae8ac88b50c857b126b164f667f94427d85fb705", - strip_prefix = "cockroachdb-rules_go-5a4682c", + sha256 = "12f619147ab74d6d50741652ba1cdd48707d80ffd13a2cc79fb630265ff03de5", + strip_prefix = "cockroachdb-rules_go-cb54c66", urls = [ - # cockroachdb/rules_go as of 5a4682cd1eda7e7308107f3ff4adb981a81a953c - # (upstream release-0.33 plus a few patches). - "https://storage.googleapis.com/public-bazel-artifacts/bazel/cockroachdb-rules_go-v0.27.0-130-g5a4682c.tar.gz", + # cockroachdb/rules_go as of cb54c66bc3e23a9533a2d04459f6262a6abb8fad + # (upstream release-0.34 plus a few patches). + "https://storage.googleapis.com/public-bazel-artifacts/bazel/cockroachdb-rules_go-v0.27.0-164-gcb54c66.tar.gz", ], ) @@ -141,11 +141,14 @@ http_archive( "@io_bazel_rules_go//third_party:go_googleapis-gazelle.patch", "@com_github_cockroachdb_cockroach//build/patches:go_googleapis.patch", ], - sha256 = "9181bb36a1df4f397375ec5aa480db797b882073518801e3a20b0e46418f2f90", - strip_prefix = "googleapis-530ca55953b470ab3b37dc9de37fcfa59410b741", - # master, as of 2022-06-05 + sha256 = "73831cbb41f2750f3181d126bbabcd3e58b5188e131ecbc309793fa54d5439c9", + strip_prefix = "googleapis-53377c165584e84c410a0905d9effb3fe5df2806", + # master, as of 2022-07-19 + # NB: You may have to update this when bumping rules_go. Bumping to the same + # version in rules_go (go/private/repositories.bzl) is probably what you + # want to do. urls = [ - "https://storage.googleapis.com/public-bazel-artifacts/bazel/530ca55953b470ab3b37dc9de37fcfa59410b741.zip", + "https://storage.googleapis.com/public-bazel-artifacts/bazel/53377c165584e84c410a0905d9effb3fe5df2806.zip", ], ) @@ -166,15 +169,15 @@ load( go_download_sdk( name = "go_sdk", sdks = { - "darwin_amd64": ("go1.18.4.darwin-amd64.tar.gz", "5202f77d94b1bf1ff9ffdae2a663e4cda123d6f9a3851340291ae741e25672f9"), - "darwin_arm64": ("go1.18.4.darwin-arm64.tar.gz", "04eed623d5143ffa44965b618b509e0beccccfd3a4a1bfebc0cdbcf906046769"), - "freebsd_amd64": ("go1.18.4.freebsd-amd64.tar.gz", "fb00f8aaffcc80e0a2bd39db1d8e8e21ef0a691c564f7b7601383dd6adad4042"), - "linux_amd64": ("go1.18.4.linux-amd64.tar.gz", "87fdb4133707d90c1e8f303348302f2db61dc4f2478bcfcdf7c7de26ae14188b"), - "linux_arm64": ("go1.18.4.linux-arm64.tar.gz", "19a7c55563e9c09c916f5ddd350779ceeaba7178070b716f623f736780fae09e"), - "windows_amd64": ("go1.18.4.windows-amd64.tar.gz", "65cf73c972ab1a1d9b29eaa0d791f557677085c57e7f8901072ad9df13885501"), + "darwin_amd64": ("go1.19.1.darwin-amd64.tar.gz", "96a164130f532c0ed65e437aaf9cc66b518f0b887d5830b2dc01ebfee9d58f52"), + "darwin_arm64": ("go1.19.1.darwin-arm64.tar.gz", "e46aecce83a9289be16ce4ba9b8478a5b89b8aa0230171d5c6adbc0c66640548"), + "freebsd_amd64": ("go1.19.1.freebsd-amd64.tar.gz", "db5b8f232e12c655cc6cde6af1adf4d27d842541807802d747c86161e89efa0a"), + "linux_amd64": ("go1.19.1.linux-amd64.tar.gz", "b8c00cd587c49beef8943887d52d77aeda66a30e94effbc1e6d39e1c80f01d37"), + "linux_arm64": ("go1.19.1.linux-arm64.tar.gz", "49d7c2badb24de8dd75e6c709d4f26d0b5e9509da2fa8c9d79929952b2607c55"), + "windows_amd64": ("go1.19.1.windows-amd64.tar.gz", "a507d42a457175a50695cf5df8efc64309dec5aa2ebf28d8d28bcd8317d6350c"), }, - urls = ["https://storage.googleapis.com/public-bazel-artifacts/go/20220727-185937/{}"], - version = "1.18.4", + urls = ["https://storage.googleapis.com/public-bazel-artifacts/go/20220907-175858/{}"], + version = "1.19.1", ) # To point to a local SDK path, use the following instead. We'll call the diff --git a/build/bazelutil/check.sh b/build/bazelutil/check.sh index c462474e1eed..55e15539696c 100755 --- a/build/bazelutil/check.sh +++ b/build/bazelutil/check.sh @@ -28,7 +28,7 @@ pkg/security/certmgr/cert.go://go:generate mockgen -package=certmgr -destination pkg/security/securitytest/securitytest.go://go:generate go-bindata -mode 0600 -modtime 1400000000 -pkg securitytest -o embedded.go -ignore README.md -ignore regenerate.sh test_certs pkg/security/securitytest/securitytest.go://go:generate gofmt -s -w embedded.go pkg/security/securitytest/securitytest.go://go:generate goimports -w embedded.go -pkg/server/api_v2.go://go:generate swagger generate spec -w . -o ../../docs/generated/swagger/spec.json --scan-models +pkg/server/api_v2.go://-go:generate swagger generate spec -w . -o ../../docs/generated/swagger/spec.json --scan-models pkg/spanconfig/spanconfigstore/span_store.go://go:generate ../../util/interval/generic/gen.sh *entry spanconfigstore pkg/sql/conn_fsm.go://go:generate ../util/fsm/gen/reports.sh TxnStateTransitions stateNoTxn pkg/sql/opt/optgen/lang/gen.go://go:generate langgen -out expr.og.go exprs lang.opt @@ -40,7 +40,7 @@ pkg/sql/schemachanger/scop/validation.go://go:generate go run ./generate_visitor pkg/sql/schemachanger/scpb/state.go://go:generate go run element_generator.go --in elements.proto --out elements_generated.go pkg/sql/schemachanger/scpb/state.go://go:generate go run element_uml_generator.go --out uml/table.puml pkg/sql/sem/tree/eval.go://go:generate go run ./evalgen *.go -pkg/util/interval/generic/doc.go: //go:generate ../../util/interval/generic/gen.sh *latch spanlatch +pkg/util/interval/generic/doc.go: //go:generate ../../util/interval/generic/gen.sh *latch spanlatch pkg/util/interval/generic/example_t.go://go:generate ./gen.sh *example generic pkg/util/log/channels.go://go:generate go run gen/main.go logpb/log.proto channel.go channel/channel_generated.go pkg/util/log/channels.go://go:generate go run gen/main.go logpb/log.proto log_channels.go log_channels_generated.go diff --git a/build/bazelutil/distdir_files.bzl b/build/bazelutil/distdir_files.bzl index d584d1ac152a..5ce90300a90a 100644 --- a/build/bazelutil/distdir_files.bzl +++ b/build/bazelutil/distdir_files.bzl @@ -964,7 +964,7 @@ DISTDIR_FILES = { "https://storage.googleapis.com/cockroach-godeps/gomod/sigs.k8s.io/structured-merge-diff/v4/io_k8s_sigs_structured_merge_diff_v4-v4.1.2.zip": "b32af97dadd79179a8f62aaf4ef1e0562e051be77053a60c7a4e724a5cbd00ce", "https://storage.googleapis.com/cockroach-godeps/gomod/sigs.k8s.io/yaml/io_k8s_sigs_yaml-v1.2.0.zip": "55ed08c5df448a033bf7e2c2912d4daa85b856a05c854b0c87ccc85c7f3fbfc7", "https://storage.googleapis.com/cockroach-godeps/gomod/sourcegraph.com/sourcegraph/appdash/com_sourcegraph_sourcegraph_appdash-v0.0.0-20190731080439-ebfcffb1b5c0.zip": "bd2492d9db05362c2fecd0b3d0f6002c89a6d90d678fb93b4158298ab883736f", - "https://storage.googleapis.com/public-bazel-artifacts/bazel/530ca55953b470ab3b37dc9de37fcfa59410b741.zip": "9181bb36a1df4f397375ec5aa480db797b882073518801e3a20b0e46418f2f90", + "https://storage.googleapis.com/public-bazel-artifacts/bazel/53377c165584e84c410a0905d9effb3fe5df2806.zip": "73831cbb41f2750f3181d126bbabcd3e58b5188e131ecbc309793fa54d5439c9", "https://storage.googleapis.com/public-bazel-artifacts/bazel/88ef31b429631b787ceb5e4556d773b20ad797c8.zip": "92a89a2bbe6c6db2a8b87da4ce723aff6253656e8417f37e50d362817c39b98b", "https://storage.googleapis.com/public-bazel-artifacts/bazel/bazel-gazelle-v0.25.0.tar.gz": "5982e5463f171da99e3bdaeff8c0f48283a7a5f396ec5282910b9e8a49c0dd7e", "https://storage.googleapis.com/public-bazel-artifacts/bazel/bazel-skylib-1.0.3.tar.gz": "1c531376ac7e5a180e0237938a2536de0c54d93f5c278634818e0efc952dd56c", @@ -972,7 +972,7 @@ DISTDIR_FILES = { "https://storage.googleapis.com/public-bazel-artifacts/bazel/bmatcuk-doublestar-v4.0.1-0-gf7a8118.tar.gz": "d11c3b3a45574f89d6a6b2f50e53feea50df60407b35f36193bf5815d32c79d1", "https://storage.googleapis.com/public-bazel-artifacts/bazel/cockroachdb-protobuf-3f5d91f.tar.gz": "6d4e7fe1cbd958dee69ce9becbf8892d567f082b6782d3973a118d0aa00807a8", "https://storage.googleapis.com/public-bazel-artifacts/bazel/cockroachdb-rules_foreign_cc-6f7f1b1.tar.gz": "272ac2cde4efd316c8d7c0140dee411c89da104466701ac179286ef5a89c7b58", - "https://storage.googleapis.com/public-bazel-artifacts/bazel/cockroachdb-rules_go-v0.27.0-130-g5a4682c.tar.gz": "72f7456307988b1ee4f2d32bae8ac88b50c857b126b164f667f94427d85fb705", + "https://storage.googleapis.com/public-bazel-artifacts/bazel/cockroachdb-rules_go-v0.27.0-164-gcb54c66.tar.gz": "12f619147ab74d6d50741652ba1cdd48707d80ffd13a2cc79fb630265ff03de5", "https://storage.googleapis.com/public-bazel-artifacts/bazel/cockroachdb-rules_nodejs-5.5.0-1-g59a92cc.tar.gz": "7f3f747db3f924547b9ffdf86da6c604335ad95e09d4e5a69fdcfdb505099421", "https://storage.googleapis.com/public-bazel-artifacts/bazel/google-starlark-go-e043a3d.tar.gz": "a35c6468e0e0921833a63290161ff903295eaaf5915200bbce272cbc8dfd1c1c", "https://storage.googleapis.com/public-bazel-artifacts/bazel/platforms-0.0.4.tar.gz": "079945598e4b6cc075846f7fd6a9d0857c33a7afc0de868c2ccb96405225135d", @@ -999,12 +999,12 @@ DISTDIR_FILES = { "https://storage.googleapis.com/public-bazel-artifacts/c-deps/20220708-170245/libproj_foreign.macos.20220708-170245.tar.gz": "fd342ce3e99d9df6de8fcdf09ff9735887d7025d88ba9814b4c73cff24691b26", "https://storage.googleapis.com/public-bazel-artifacts/c-deps/20220708-170245/libproj_foreign.macosarm.20220708-170245.tar.gz": "6394f40dbc799909ee239e42c25d08b5b2af0ad0c8aa30f37553e936f1c1dc4e", "https://storage.googleapis.com/public-bazel-artifacts/c-deps/20220708-170245/libproj_foreign.windows.20220708-170245.tar.gz": "233c6cecef5e826bd1aea7c7c603fb86fc78299d2016c4d3afcb0c1509eff001", - "https://storage.googleapis.com/public-bazel-artifacts/go/20220727-185937/go1.18.4.darwin-amd64.tar.gz": "5202f77d94b1bf1ff9ffdae2a663e4cda123d6f9a3851340291ae741e25672f9", - "https://storage.googleapis.com/public-bazel-artifacts/go/20220727-185937/go1.18.4.darwin-arm64.tar.gz": "04eed623d5143ffa44965b618b509e0beccccfd3a4a1bfebc0cdbcf906046769", - "https://storage.googleapis.com/public-bazel-artifacts/go/20220727-185937/go1.18.4.freebsd-amd64.tar.gz": "fb00f8aaffcc80e0a2bd39db1d8e8e21ef0a691c564f7b7601383dd6adad4042", - "https://storage.googleapis.com/public-bazel-artifacts/go/20220727-185937/go1.18.4.linux-amd64.tar.gz": "87fdb4133707d90c1e8f303348302f2db61dc4f2478bcfcdf7c7de26ae14188b", - "https://storage.googleapis.com/public-bazel-artifacts/go/20220727-185937/go1.18.4.linux-arm64.tar.gz": "19a7c55563e9c09c916f5ddd350779ceeaba7178070b716f623f736780fae09e", - "https://storage.googleapis.com/public-bazel-artifacts/go/20220727-185937/go1.18.4.windows-amd64.tar.gz": "65cf73c972ab1a1d9b29eaa0d791f557677085c57e7f8901072ad9df13885501", + "https://storage.googleapis.com/public-bazel-artifacts/go/20220907-175858/go1.19.1.darwin-amd64.tar.gz": "96a164130f532c0ed65e437aaf9cc66b518f0b887d5830b2dc01ebfee9d58f52", + "https://storage.googleapis.com/public-bazel-artifacts/go/20220907-175858/go1.19.1.darwin-arm64.tar.gz": "e46aecce83a9289be16ce4ba9b8478a5b89b8aa0230171d5c6adbc0c66640548", + "https://storage.googleapis.com/public-bazel-artifacts/go/20220907-175858/go1.19.1.freebsd-amd64.tar.gz": "db5b8f232e12c655cc6cde6af1adf4d27d842541807802d747c86161e89efa0a", + "https://storage.googleapis.com/public-bazel-artifacts/go/20220907-175858/go1.19.1.linux-amd64.tar.gz": "b8c00cd587c49beef8943887d52d77aeda66a30e94effbc1e6d39e1c80f01d37", + "https://storage.googleapis.com/public-bazel-artifacts/go/20220907-175858/go1.19.1.linux-arm64.tar.gz": "49d7c2badb24de8dd75e6c709d4f26d0b5e9509da2fa8c9d79929952b2607c55", + "https://storage.googleapis.com/public-bazel-artifacts/go/20220907-175858/go1.19.1.windows-amd64.tar.gz": "a507d42a457175a50695cf5df8efc64309dec5aa2ebf28d8d28bcd8317d6350c", "https://storage.googleapis.com/public-bazel-artifacts/gomod/github.com/bazelbuild/buildtools/v0.0.0-20200718160251-b1667ff58f71/buildtools-v0.0.0-20200718160251-b1667ff58f71.tar.gz": "a9ef5103739dfb5ed2a5b47ab1654842a89695812e4af09e57d7015a5caf97e0", "https://storage.googleapis.com/public-bazel-artifacts/java/railroad/rr-1.63-java8.zip": "d2791cd7a44ea5be862f33f5a9b3d40aaad9858455828ebade7007ad7113fb41", "https://storage.googleapis.com/public-bazel-artifacts/js/node/v16.13.0/node-v16.13.0-darwin-arm64.tar.gz": "46d83fc0bd971db5050ef1b15afc44a6665dee40bd6c1cbaec23e1b40fa49e6d", diff --git a/build/bazelutil/unused_checker/unused_checker.go b/build/bazelutil/unused_checker/unused_checker.go index af2025506e56..2783055941e2 100644 --- a/build/bazelutil/unused_checker/unused_checker.go +++ b/build/bazelutil/unused_checker/unused_checker.go @@ -70,8 +70,9 @@ func impl() error { nogoX = filepath.Join(cwd, nogoX) cmd := exec.Command(gobin, "tool", "pack", "x", nogoX, "unused.out") cmd.Dir = tmpdir - if output, err := cmd.CombinedOutput(); err != nil { - return fmt.Errorf("%w (got output %s while processing file %s)", err, string(output), nogoX) + if err := cmd.Run(); err != nil { + // The unused.out file might be missing -- this is fine. + continue } encoded, err := os.ReadFile(filepath.Join(tmpdir, "unused.out")) if err != nil { diff --git a/build/bootstrap/bootstrap-debian.sh b/build/bootstrap/bootstrap-debian.sh index ddcc9f28f1ef..af5b2cb86fa8 100755 --- a/build/bootstrap/bootstrap-debian.sh +++ b/build/bootstrap/bootstrap-debian.sh @@ -46,9 +46,9 @@ sudo tar -C /usr --strip-components=1 -zxf /tmp/cmake.tgz && rm /tmp/cmake.tgz # Install Go. trap 'rm -f /tmp/go.tgz' EXIT -curl -fsSL https://dl.google.com/go/go1.18.4.linux-amd64.tar.gz > /tmp/go.tgz +curl -fsSL https://dl.google.com/go/go1.19.1.linux-amd64.tar.gz > /tmp/go.tgz sha256sum -c - < /tmp/go.tgz + curl -fsSL https://dl.google.com/go/go1.19.1.linux-amd64.tar.gz > /tmp/go.tgz sha256sum -c - < 0 { // Flush assist credit to the global pool. This gives -diff --git c/src/runtime/runtime2.go i/src/runtime/runtime2.go -index b40045e4a5..1196c41136 100644 ---- c/src/runtime/runtime2.go -+++ i/src/runtime/runtime2.go -@@ -472,7 +472,6 @@ type g struct { +diff --git a/src/runtime/runtime2.go b/src/runtime/runtime2.go +index e1788223e7..5ed1fb40ba 100644 +--- a/src/runtime/runtime2.go ++++ b/src/runtime/runtime2.go +@@ -474,7 +474,6 @@ type g struct { traceseq uint64 // trace event sequencer tracelastp puintptr // last P emitted an event for this goroutine lockedm muintptr @@ -73,7 +74,7 @@ index b40045e4a5..1196c41136 100644 writebuf []byte sigcode0 uintptr sigcode1 uintptr -@@ -486,6 +485,9 @@ type g struct { +@@ -488,6 +487,9 @@ type g struct { labels unsafe.Pointer // profiler labels timer *timer // cached timer for time.Sleep selectDone uint32 // are we participating in a select and did someone win the race? @@ -81,18 +82,18 @@ index b40045e4a5..1196c41136 100644 + lastsched int64 // timestamp when the G last started running + runningnanos int64 // wall time spent in the running state - // Per-G GC state - -diff --git c/src/runtime/sizeof_test.go i/src/runtime/sizeof_test.go -index ebf544ad3b..1e1cf1be9c 100644 ---- c/src/runtime/sizeof_test.go -+++ i/src/runtime/sizeof_test.go + // goroutineProfiled indicates the status of this goroutine's stack for the + // current in-progress goroutine profile +diff --git a/src/runtime/sizeof_test.go b/src/runtime/sizeof_test.go +index 9ce0a3afcd..bfb5d6e33e 100644 +--- a/src/runtime/sizeof_test.go ++++ b/src/runtime/sizeof_test.go @@ -21,7 +21,7 @@ func TestSizeof(t *testing.T) { _32bit uintptr // size on 32bit platforms _64bit uintptr // size on 64bit platforms }{ -- {runtime.G{}, 236, 392}, // g, but exported for testing -+ {runtime.G{}, 244, 400}, // g, but exported for testing +- {runtime.G{}, 240, 392}, // g, but exported for testing ++ {runtime.G{}, 248, 400}, // g, but exported for testing {runtime.Sudog{}, 56, 88}, // sudog, but exported for testing } - + diff --git a/build/teamcity/internal/release/build-and-publish-patched-go/impl.sh b/build/teamcity/internal/release/build-and-publish-patched-go/impl.sh index ab020cd5f1cb..f69f9467f646 100755 --- a/build/teamcity/internal/release/build-and-publish-patched-go/impl.sh +++ b/build/teamcity/internal/release/build-and-publish-patched-go/impl.sh @@ -3,15 +3,15 @@ set -xeuo pipefail # When updating to a new Go version, update all of these variables. -GOVERS=1.18.4 +GOVERS=1.19.1 GOLINK=https://go.dev/dl/go$GOVERS.src.tar.gz -SRCSHASUM=4525aa6b0e3cecb57845f4060a7075aafc9ab752bb7b6b4cf8a212d43078e1e4 +SRCSHASUM=27871baa490f3401414ad793fba49086f6c855b1c584385ed7771e1204c7e179 # We mirror the upstream freebsd because we don't have a cross-compiler targeting it. GOFREEBSDLINK=https://go.dev/dl/go$GOVERS.freebsd-amd64.tar.gz -FREEBSDSHASUM=fb00f8aaffcc80e0a2bd39db1d8e8e21ef0a691c564f7b7601383dd6adad4042 +FREEBSDSHASUM=db5b8f232e12c655cc6cde6af1adf4d27d842541807802d747c86161e89efa0a # We mirror the upstream darwin/arm64 binary because we don't have code-signing yet. GODARWINARMLINK=https://go.dev/dl/go$GOVERS.darwin-arm64.tar.gz -DARWINARMSHASUM=04eed623d5143ffa44965b618b509e0beccccfd3a4a1bfebc0cdbcf906046769 +DARWINARMSHASUM=e46aecce83a9289be16ce4ba9b8478a5b89b8aa0230171d5c6adbc0c66640548 apt-get update DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \ diff --git a/docs/generated/bazel_targets.txt b/docs/generated/bazel_targets.txt index 19211b34f0bc..d319ec22e982 100644 --- a/docs/generated/bazel_targets.txt +++ b/docs/generated/bazel_targets.txt @@ -10,4 +10,4 @@ documentation. Lines not beginning with // should be ignored. //docs/generated/settings:settings_for_tenants //docs/generated/sql //docs/generated/sql/bnf -//docs/generated/swagger +TODO(ricky) re-enable this //docs/generated/swagger diff --git a/docs/generated/logging.md b/docs/generated/logging.md index 3996690ce78c..80faa37b3210 100644 --- a/docs/generated/logging.md +++ b/docs/generated/logging.md @@ -44,25 +44,25 @@ See [Configure logs](configure-logs.html#dev-channel). The `OPS` channel is used to report "point" operational events, initiated by user operators or automation: -- Operator or system actions on server processes: process starts, - stops, shutdowns, crashes (if they can be logged), - including each time: command-line parameters, current version being run -- Actions that impact the topology of a cluster: node additions, - removals, decommissions, etc. -- Job-related initiation or termination -- [Cluster setting](cluster-settings.html) changes -- [Zone configuration](configure-replication-zones.html) changes + - Operator or system actions on server processes: process starts, + stops, shutdowns, crashes (if they can be logged), + including each time: command-line parameters, current version being run + - Actions that impact the topology of a cluster: node additions, + removals, decommissions, etc. + - Job-related initiation or termination + - [Cluster setting](cluster-settings.html) changes + - [Zone configuration](configure-replication-zones.html) changes ### `HEALTH` The `HEALTH` channel is used to report "background" operational events, initiated by CockroachDB or reporting on automatic processes: -- Current resource usage, including critical resource usage -- Node-node connection events, including connection errors and - gossip details -- Range and table leasing events -- Up- and down-replication, range unavailability + - Current resource usage, including critical resource usage + - Node-node connection events, including connection errors and + gossip details + - Range and table leasing events + - Up- and down-replication, range unavailability ### `STORAGE` @@ -75,9 +75,9 @@ The `SESSIONS` channel is used to report client network activity when enabled vi the `server.auth_log.sql_connections.enabled` and/or `server.auth_log.sql_sessions.enabled` [cluster setting](cluster-settings.html): -- Connections opened/closed -- Authentication events: logins, failed attempts -- Session and query cancellation + - Connections opened/closed + - Authentication events: logins, failed attempts + - Session and query cancellation This is typically configured in "audit" mode, with event numbering and synchronous writes. @@ -91,9 +91,9 @@ zone configuration changes (which go to the `OPS` channel). This includes: -- Database/schema/table/sequence/view/type creation -- Adding/removing/changing table columns -- Changing sequence parameters + - Database/schema/table/sequence/view/type creation + - Adding/removing/changing table columns + - Changing sequence parameters `SQL_SCHEMA` events generally comprise changes to the schema that affect the functional behavior of client apps using stored objects. @@ -103,10 +103,10 @@ functional behavior of client apps using stored objects. The `USER_ADMIN` channel is used to report changes in users and roles, including: -- Users added/dropped -- Changes to authentication credentials (e.g., passwords, validity, etc.) -- Role grants/revocations -- Role option grants/revocations + - Users added/dropped + - Changes to authentication credentials (e.g., passwords, validity, etc.) + - Role grants/revocations + - Role option grants/revocations This is typically configured in "audit" mode, with event numbering and synchronous writes. @@ -116,8 +116,8 @@ numbering and synchronous writes. The `PRIVILEGES` channel is used to report data authorization changes, including: -- Privilege grants/revocations on database, objects, etc. -- Object ownership changes + - Privilege grants/revocations on database, objects, etc. + - Object ownership changes This is typically configured in "audit" mode, with event numbering and synchronous writes. @@ -127,10 +127,10 @@ numbering and synchronous writes. The `SENSITIVE_ACCESS` channel is used to report SQL data access to sensitive data: -- Data access audit events (when table audit is enabled via - [EXPERIMENTAL_AUDIT](experimental-audit.html)) -- SQL statements executed by users with the admin role -- Operations that write to system tables + - Data access audit events (when table audit is enabled via + [EXPERIMENTAL_AUDIT](experimental-audit.html)) + - SQL statements executed by users with the admin role + - Operations that write to system tables This is typically configured in "audit" mode, with event numbering and synchronous writes. @@ -140,9 +140,9 @@ numbering and synchronous writes. The `SQL_EXEC` channel is used to report SQL execution on behalf of client connections: -- Logical SQL statement executions (when enabled via the - `sql.trace.log_statement_execute` [cluster setting](cluster-settings.html)) -- uncaught Go panic errors during the execution of a SQL statement. + - Logical SQL statement executions (when enabled via the + `sql.trace.log_statement_execute` [cluster setting](cluster-settings.html)) + - uncaught Go panic errors during the execution of a SQL statement. ### `SQL_PERF` diff --git a/docs/generated/logsinks.md b/docs/generated/logsinks.md index 12b6111c7357..5f3165beccf5 100644 --- a/docs/generated/logsinks.md +++ b/docs/generated/logsinks.md @@ -23,10 +23,10 @@ configurable logging directory. The configuration key under the `sinks` key in the YAML configuration is `file-groups`. Example configuration: - sinks: - file-groups: # file group configurations start here - health: # defines one group called "health" - channels: HEALTH +// sinks: +// file-groups: # file group configurations start here +// health: # defines one group called "health" +// channels: HEALTH Each generated log file is prefixed by the name of the process, followed by the name of the group, separated by a hyphen. For example, @@ -44,26 +44,25 @@ the configurations set in the `file-defaults` section. For example: - file-defaults: - redactable: false # default: disable redaction markers - dir: logs - sinks: - file-groups: - health: - channels: HEALTH - # This sink has redactable set to false, - # as the setting is inherited from file-defaults - # unless overridden here. - # - # Example override: - dir: health-logs # override the default 'logs' +// file-defaults: +// redactable: false # default: disable redaction markers +// dir: logs +// sinks: +// file-groups: +// health: +// channels: HEALTH +// # This sink has redactable set to false, +// # as the setting is inherited from file-defaults +// # unless overridden here. +// # +// # Example override: +// dir: health-logs # override the default 'logs' {{site.data.alerts.callout_success}} Run `cockroach debug check-log-config` to verify the effect of defaults inheritance. {{site.data.alerts.end}} - Type-specific configuration options: | Field | Description | @@ -119,25 +118,25 @@ the logging event is dropped. The configuration key under the `sinks` key in the YAML configuration is `fluent-servers`. Example configuration: - sinks: - fluent-servers: # fluent configurations start here - health: # defines one sink called "health" - channels: HEALTH - address: 127.0.0.1:5170 +// sinks: +// fluent-servers: # fluent configurations start here +// health: # defines one sink called "health" +// channels: HEALTH +// address: 127.0.0.1:5170 Every new server sink configured automatically inherits the configurations set in the `fluent-defaults` section. For example: - fluent-defaults: - redactable: false # default: disable redaction markers - sinks: - fluent-servers: - health: - channels: HEALTH - # This sink has redactable set to false, - # as the setting is inherited from fluent-defaults - # unless overridden here. +// fluent-defaults: +// redactable: false # default: disable redaction markers +// sinks: +// fluent-servers: +// health: +// channels: HEALTH +// # This sink has redactable set to false, +// # as the setting is inherited from fluent-defaults +// # unless overridden here. The default output format for Fluent sinks is `json-fluent-compact`. The `fluent` variants of the JSON formats @@ -149,7 +148,6 @@ Run `cockroach debug check-log-config` to verify the effect of defaults inherita {{site.data.alerts.end}} - Type-specific configuration options: | Field | Description | @@ -184,25 +182,25 @@ as requests to an HTTP server. The configuration key under the `sinks` key in the YAML configuration is `http-servers`. Example configuration: - sinks: - http-servers: - health: - channels: HEALTH - address: http://127.0.0.1 +// sinks: +// http-servers: +// health: +// channels: HEALTH +// address: http://127.0.0.1 Every new server sink configured automatically inherits the configuration set in the `http-defaults` section. For example: - http-defaults: - redactable: false # default: disable redaction markers - sinks: - http-servers: - health: - channels: HEALTH - # This sink has redactable set to false, - # as the setting is inherited from fluent-defaults - # unless overridden here. +// http-defaults: +// redactable: false # default: disable redaction markers +// sinks: +// http-servers: +// health: +// channels: HEALTH +// # This sink has redactable set to false, +// # as the setting is inherited from fluent-defaults +// # unless overridden here. The default output format for HTTP sinks is `json-compact`. [Other supported formats.](log-formats.html) @@ -212,7 +210,6 @@ Run `cockroach debug check-log-config` to verify the effect of defaults inherita {{site.data.alerts.end}} - Type-specific configuration options: | Field | Description | @@ -250,9 +247,9 @@ process. The configuration key under the `sinks` key in the YAML configuration is `stderr`. Example configuration: - sinks: - stderr: # standard error sink configuration starts here - channels: DEV +// sinks: +// stderr: # standard error sink configuration starts here +// channels: DEV {{site.data.alerts.callout_info}} The server start-up messages are still emitted at the start of the standard error @@ -275,7 +272,6 @@ when `capture-stray-errors` is disabled, since the standard error stream can the contain an arbitrary interleaving of non-formatted error data. - Type-specific configuration options: | Field | Description | @@ -384,21 +380,21 @@ etc. Buffering may be configured with the following fields. It may also be explicitly set to "NONE" to disable buffering. Example configuration: - file-defaults: - dir: logs - buffering: - max-staleness: 20s - flush-trigger-size: 25KB - max-buffer-size: 10MB - sinks: - file-groups: - health: - channels: HEALTH - buffering: - max-staleness: 5s # Override max-staleness for this sink. - ops: - channels: OPS - buffering: NONE # Disable buffering for this sink. +// file-defaults: +// dir: logs +// buffering: +// max-staleness: 20s +// flush-trigger-size: 25KB +// max-buffer-size: 10MB +// sinks: +// file-groups: +// health: +// channels: HEALTH +// buffering: +// max-staleness: 5s # Override max-staleness for this sink. +// ops: +// channels: OPS +// buffering: NONE # Disable buffering for this sink. | Field | Description | diff --git a/docs/generated/swagger/BUILD.bazel b/docs/generated/swagger/BUILD.bazel index a69f1d3eed4c..a8ee4ddc067e 100644 --- a/docs/generated/swagger/BUILD.bazel +++ b/docs/generated/swagger/BUILD.bazel @@ -1,23 +1,24 @@ load("@io_bazel_rules_go//go:def.bzl", "go_path") -genrule( - name = "swagger", - srcs = [ - ":swagger_go_path", - ], - outs = ["spec.json"], - cmd = """ - GO_REL_PATH=`dirname $(location @go_sdk//:bin/go)` - GO_ABS_PATH=`cd $$GO_REL_PATH && pwd` - env PATH=$$GO_ABS_PATH HOME=$(GENDIR) GOPATH=$$(cd $(location :swagger_go_path) && pwd) GO111MODULE=off \ - $(location @com_github_go_swagger_go_swagger//cmd/swagger) generate spec -w $(location :swagger_go_path)/src/github.com/cockroachdb/cockroach/pkg --scan-models -t bazel -o $@ - """, - exec_tools = [ - "@com_github_go_swagger_go_swagger//cmd/swagger", - "@go_sdk//:bin/go", - ], - visibility = ["//visibility:public"], -) +# TODO(ricky): re-enable this when go-swagger/go-swagger#2759 is resolved. +#genrule( +# name = "swagger", +# srcs = [ +# ":swagger_go_path", +# ], +# outs = ["spec.json"], +# cmd = """ +# GO_REL_PATH=`dirname $(location @go_sdk//:bin/go)` +# GO_ABS_PATH=`cd $$GO_REL_PATH && pwd` +# env PATH=$$GO_ABS_PATH HOME=$(GENDIR) GOPATH=$$(cd $(location :swagger_go_path) && pwd) GO111MODULE=off \ +# $(location @com_github_go_swagger_go_swagger//cmd/swagger) generate spec -w $(location :swagger_go_path)/src/github.com/cockroachdb/cockroach/pkg --scan-models -t bazel -o $@ +# """, +# exec_tools = [ +# "@com_github_go_swagger_go_swagger//cmd/swagger", +# "@go_sdk//:bin/go", +# ], +# visibility = ["//visibility:public"], +#) go_path( name = "swagger_go_path", diff --git a/pkg/BUILD.bazel b/pkg/BUILD.bazel index 1cb3bae0d43f..62492b506c54 100644 --- a/pkg/BUILD.bazel +++ b/pkg/BUILD.bazel @@ -4,6 +4,7 @@ load("//build/bazelutil/unused_checker:unused.bzl", "unused_checker") ALL_TESTS = [ + "//pkg/acceptance:acceptance_test", "//pkg/base:base_test", "//pkg/bench/rttanalysis:rttanalysis_test", "//pkg/bench/tpcc:tpcc_test", @@ -135,6 +136,8 @@ ALL_TESTS = [ "//pkg/col/coldata:coldata_test", "//pkg/col/coldataext:coldataext_test", "//pkg/col/colserde:colserde_test", + "//pkg/compose/compare/compare:compare_test", + "//pkg/compose:compose_test", "//pkg/config/zonepb:zonepb_test", "//pkg/config:config_disallowed_imports_test", "//pkg/config:config_test", @@ -494,6 +497,7 @@ ALL_TESTS = [ "//pkg/storage/fs:fs_test", "//pkg/storage/metamorphic:metamorphic_test", "//pkg/storage:storage_test", + "//pkg/testutils/docker:docker_test", "//pkg/testutils/floatcmp:floatcmp_test", "//pkg/testutils/keysutils:keysutils_test", "//pkg/testutils/lint/passes/errwrap:errwrap_test", @@ -508,6 +512,7 @@ ALL_TESTS = [ "//pkg/testutils/lint/passes/returnerrcheck:returnerrcheck_test", "//pkg/testutils/lint/passes/timer:timer_test", "//pkg/testutils/lint/passes/unconvert:unconvert_test", + "//pkg/testutils/lint:lint_test", "//pkg/testutils/sqlutils:sqlutils_test", "//pkg/testutils/testcluster:testcluster_test", "//pkg/testutils/zerofields:zerofields_test", diff --git a/pkg/acceptance/BUILD.bazel b/pkg/acceptance/BUILD.bazel index 87de9b6e70cf..4b093d3e23fb 100644 --- a/pkg/acceptance/BUILD.bazel +++ b/pkg/acceptance/BUILD.bazel @@ -42,6 +42,7 @@ go_test( "debug_remote_test.go", "main_test.go", ], + args = ["-test.timeout=3595s"], data = glob([ "testdata/**", "compose/**", diff --git a/pkg/acceptance/cluster/dockercluster.go b/pkg/acceptance/cluster/dockercluster.go index d88a3fa3757c..d8eb981ec413 100644 --- a/pkg/acceptance/cluster/dockercluster.go +++ b/pkg/acceptance/cluster/dockercluster.go @@ -18,7 +18,6 @@ import ( "flag" "fmt" "io" - "io/ioutil" "net" "net/url" "os" @@ -891,7 +890,7 @@ func (l *DockerCluster) ExecCLI(ctx context.Context, i int, cmd []string) (strin // Cleanup removes the cluster's volumes directory, optionally preserving the // logs directory. func (l *DockerCluster) Cleanup(ctx context.Context, preserveLogs bool) { - volumes, err := ioutil.ReadDir(l.volumesDir) + volumes, err := os.ReadDir(l.volumesDir) if err != nil { log.Warningf(ctx, "%v", err) return diff --git a/pkg/acceptance/compose/gss/psql/Dockerfile b/pkg/acceptance/compose/gss/psql/Dockerfile index d0eec480ca3d..8d317ed19f07 100644 --- a/pkg/acceptance/compose/gss/psql/Dockerfile +++ b/pkg/acceptance/compose/gss/psql/Dockerfile @@ -1,5 +1,5 @@ # Build the test binary in a multistage build. -FROM golang:1.18 AS builder +FROM golang:1.19 AS builder WORKDIR /workspace COPY . . RUN go test -v -c -tags gss_compose -o gss.test diff --git a/pkg/base/addr_validation.go b/pkg/base/addr_validation.go index 678be9c65028..bb94091fc3a4 100644 --- a/pkg/base/addr_validation.go +++ b/pkg/base/addr_validation.go @@ -23,12 +23,12 @@ import ( // ValidateAddrs controls the address fields in the Config object // and "fills in" the blanks: -// - the host part of Addr and HTTPAddr is resolved to an IP address -// if specified (it stays blank if blank to mean "all addresses"). -// - the host part of AdvertiseAddr is filled in if blank, either -// from Addr if non-empty or os.Hostname(). It is also checked -// for resolvability. -// - non-numeric port numbers are resolved to numeric. +// - the host part of Addr and HTTPAddr is resolved to an IP address +// if specified (it stays blank if blank to mean "all addresses"). +// - the host part of AdvertiseAddr is filled in if blank, either +// from Addr if non-empty or os.Hostname(). It is also checked +// for resolvability. +// - non-numeric port numbers are resolved to numeric. // // The addresses fields must be guaranteed by the caller to either be // completely empty, or have both a host part and a port part diff --git a/pkg/base/store_spec.go b/pkg/base/store_spec.go index 5ffb6a513521..d1a28b71cf1e 100644 --- a/pkg/base/store_spec.go +++ b/pkg/base/store_spec.go @@ -352,23 +352,24 @@ var fractionRegex = regexp.MustCompile(`^([-]?([0-9]+\.[0-9]*|[0-9]*\.[0-9]+|[0- // NewStoreSpec parses the string passed into a --store flag and returns a // StoreSpec if it is correctly parsed. // There are five possible fields that can be passed in, comma separated: -// - path=xxx The directory in which to the rocks db instance should be -// located, required unless using a in memory storage. -// - type=mem This specifies that the store is an in memory storage instead of -// an on disk one. mem is currently the only other type available. -// - size=xxx The optional maximum size of the storage. This can be in one of a -// few different formats. +// - path=xxx The directory in which to the rocks db instance should be +// located, required unless using a in memory storage. +// - type=mem This specifies that the store is an in memory storage instead of +// an on disk one. mem is currently the only other type available. +// - size=xxx The optional maximum size of the storage. This can be in one of a +// few different formats. // - 10000000000 -> 10000000000 bytes // - 20GB -> 20000000000 bytes // - 20GiB -> 21474836480 bytes // - 0.02TiB -> 21474836480 bytes // - 20% -> 20% of the available space // - 0.2 -> 20% of the available space -// - attrs=xxx:yyy:zzz A colon separated list of optional attributes. -// - provisioned-rate=disk-name=[:bandwidth=] The -// provisioned-rate can be used for admission control for operations on the -// store. The bandwidth is optional, and if unspecified, a cluster setting -// (kv.store.admission.provisioned_bandwidth) will be used. +// - attrs=xxx:yyy:zzz A colon separated list of optional attributes. +// - provisioned-rate=disk-name=[:bandwidth=] The +// provisioned-rate can be used for admission control for operations on the +// store. The bandwidth is optional, and if unspecified, a cluster setting +// (kv.store.admission.provisioned_bandwidth) will be used. +// // Note that commas are forbidden within any field name or value. func NewStoreSpec(value string) (StoreSpec, error) { const pathField = "path" diff --git a/pkg/build/util/util.go b/pkg/build/util/util.go index a743d518a372..b39d47211583 100644 --- a/pkg/build/util/util.go +++ b/pkg/build/util/util.go @@ -59,7 +59,9 @@ type xmlMessage struct { // OutputOfBinaryRule returns the path of the binary produced by the // given build target, relative to bazel-bin. That is, -// filepath.Join(bazelBin, OutputOfBinaryRule(target, isWindows)) is the absolute +// +// filepath.Join(bazelBin, OutputOfBinaryRule(target, isWindows)) is the absolute +// // path to the build binary for the target. func OutputOfBinaryRule(target string, isWindows bool) string { colon := strings.Index(target, ":") diff --git a/pkg/ccl/backupccl/backup_job.go b/pkg/ccl/backupccl/backup_job.go index 62cc97912d19..5dca580f8a0b 100644 --- a/pkg/ccl/backupccl/backup_job.go +++ b/pkg/ccl/backupccl/backup_job.go @@ -116,10 +116,10 @@ func clusterNodeCount(gw gossip.OptionalGossip) (int, error) { // backup exports a snapshot of every kv entry into ranged sstables. // // The output is an sstable per range with files in the following locations: -// - /.sst -// - is given by the user and may be cloud storage -// - Each file contains data for a key range that doesn't overlap with any other -// file. +// - /.sst +// - is given by the user and may be cloud storage +// - Each file contains data for a key range that doesn't overlap with any other +// file. func backup( ctx context.Context, execCtx sql.JobExecContext, diff --git a/pkg/ccl/backupccl/backup_test.go b/pkg/ccl/backupccl/backup_test.go index dde171b6f341..7e4d2cda1809 100644 --- a/pkg/ccl/backupccl/backup_test.go +++ b/pkg/ccl/backupccl/backup_test.go @@ -17,7 +17,6 @@ import ( "fmt" "hash/crc32" "io" - "io/ioutil" "math" "math/rand" "net/url" @@ -255,7 +254,7 @@ func TestBackupRestorePartitioned(t *testing.T) { hasSSTs := func(t *testing.T, location string) bool { sstMatcher := regexp.MustCompile(`\d+\.sst`) subDir := filepath.Join(locationToDir(location), "data") - files, err := ioutil.ReadDir(subDir) + files, err := os.ReadDir(subDir) if err != nil { if oserror.IsNotExist(err) { return false @@ -289,7 +288,7 @@ func TestBackupRestorePartitioned(t *testing.T) { partitionMatcher := regexp.MustCompile(`^BACKUP_PART_`) for _, location := range locations { subDir := locationToDir(location) - files, err := ioutil.ReadDir(subDir) + files, err := os.ReadDir(subDir) if err != nil { t.Fatal(err) } diff --git a/pkg/ccl/backupccl/restore_job.go b/pkg/ccl/backupccl/restore_job.go index c9ad3567361b..7ee2343ec8e2 100644 --- a/pkg/ccl/backupccl/restore_job.go +++ b/pkg/ccl/backupccl/restore_job.go @@ -679,17 +679,17 @@ func backedUpDescriptorWithInProgressImportInto( // createImportingDescriptors creates the tables that we will restore into and returns up to three // configurations for separate restoration flows. The three restoration flows are // -// 1. dataToPreRestore: a restoration flow cfg to ingest a subset of -// system tables (e.g. zone configs) during a cluster restore that are -// required to be set up before the rest of the data gets restored. -// This should be empty during non-cluster restores. +// 1. dataToPreRestore: a restoration flow cfg to ingest a subset of +// system tables (e.g. zone configs) during a cluster restore that are +// required to be set up before the rest of the data gets restored. +// This should be empty during non-cluster restores. // -// 2. preValidation: a restoration flow cfg to ingest the remainder of system tables, -// during a verify_backup_table_data, cluster level, restores. This should be empty otherwise. +// 2. preValidation: a restoration flow cfg to ingest the remainder of system tables, +// during a verify_backup_table_data, cluster level, restores. This should be empty otherwise. // -// 3. trackedRestore: a restoration flow cfg to ingest the remainder of -// restore targets. This flow should get executed last and should contain the -// bulk of the work, as it is used for job progress tracking. +// 3. trackedRestore: a restoration flow cfg to ingest the remainder of +// restore targets. This flow should get executed last and should contain the +// bulk of the work, as it is used for job progress tracking. func createImportingDescriptors( ctx context.Context, p sql.JobExecContext, diff --git a/pkg/ccl/backupccl/restore_mid_schema_change_test.go b/pkg/ccl/backupccl/restore_mid_schema_change_test.go index 31b126d6ec2c..2eb880f4a3ea 100644 --- a/pkg/ccl/backupccl/restore_mid_schema_change_test.go +++ b/pkg/ccl/backupccl/restore_mid_schema_change_test.go @@ -11,7 +11,6 @@ package backupccl import ( "context" "fmt" - "io/ioutil" "os" "path/filepath" "testing" @@ -83,7 +82,7 @@ func TestRestoreMidSchemaChange(t *testing.T) { // after the backfill portion of the schema change. for _, blockLocation := range []string{"before", "after"} { t.Run(blockLocation, func(t *testing.T) { - versionDirs, err := ioutil.ReadDir(filepath.Join(exportDirs, blockLocation)) + versionDirs, err := os.ReadDir(filepath.Join(exportDirs, blockLocation)) require.NoError(t, err) for _, clusterVersionDir := range versionDirs { clusterVersion, err := parseMajorVersion(clusterVersionDir.Name()) @@ -101,7 +100,7 @@ func TestRestoreMidSchemaChange(t *testing.T) { // In each version folder (e.g. "19.2", "20.1"), there is a backup for // each schema change. - backupDirs, err := ioutil.ReadDir(fullClusterVersionDir) + backupDirs, err := os.ReadDir(fullClusterVersionDir) require.NoError(t, err) for _, backupDir := range backupDirs { diff --git a/pkg/ccl/backupccl/restore_old_sequences_test.go b/pkg/ccl/backupccl/restore_old_sequences_test.go index 3e2fb96122b8..4809c59e6033 100644 --- a/pkg/ccl/backupccl/restore_old_sequences_test.go +++ b/pkg/ccl/backupccl/restore_old_sequences_test.go @@ -9,7 +9,6 @@ package backupccl import ( - "io/ioutil" "os" "path/filepath" "testing" @@ -29,16 +28,15 @@ import ( // // The SSTs were created via the following commands: // -// VERSION=... -// roachprod create local -// roachprod wipe localÅ -// roachprod stage local release ${VERSION} -// roachprod start local -// roachprod sql local:1 -- -e "$(cat pkg/ccl/backupccl/testdata/restore_old_sequences/create.sql)" -// roachprod sql local:1 -- -e "BACKUP DATABASE test TO 'nodelocal://1/backup'" -// # Then grab the backups and put the files into the appropriate -// # testdata directory. -// +// VERSION=... +// roachprod create local +// roachprod wipe localÅ +// roachprod stage local release ${VERSION} +// roachprod start local +// roachprod sql local:1 -- -e "$(cat pkg/ccl/backupccl/testdata/restore_old_sequences/create.sql)" +// roachprod sql local:1 -- -e "BACKUP DATABASE test TO 'nodelocal://1/backup'" +// # Then grab the backups and put the files into the appropriate +// # testdata directory. func TestRestoreOldSequences(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) @@ -48,7 +46,7 @@ func TestRestoreOldSequences(t *testing.T) { ) t.Run("sequences-restore", func(t *testing.T) { - dirs, err := ioutil.ReadDir(exportDirs) + dirs, err := os.ReadDir(exportDirs) require.NoError(t, err) for _, isSchemaOnly := range []bool{true, false} { suffix := "" diff --git a/pkg/ccl/backupccl/restore_old_versions_test.go b/pkg/ccl/backupccl/restore_old_versions_test.go index 2e8f5446b60c..051fab313491 100644 --- a/pkg/ccl/backupccl/restore_old_versions_test.go +++ b/pkg/ccl/backupccl/restore_old_versions_test.go @@ -11,7 +11,6 @@ package backupccl import ( "context" "fmt" - "io/ioutil" "os" "path/filepath" "strings" @@ -48,18 +47,17 @@ import ( // // The SSTs were created via the following commands: // -// VERSION=... -// roachprod wipe local -// roachprod stage local release ${VERSION} -// roachprod start local -// # If the version is v1.0.7 then you need to enable enterprise with the -// # enterprise.enabled cluster setting. -// roachprod sql local:1 -- -e "$(cat pkg/ccl/backupccl/testdata/restore_old_versions/create.sql)" -// # Create an S3 bucket to store the backup. -// roachprod sql local:1 -- -e "BACKUP DATABASE test TO 's3:///${VERSION}?AWS_ACCESS_KEY_ID=<...>&AWS_SECRET_ACCESS_KEY=<...>'" -// # Then download the backup from s3 and plop the files into the appropriate -// # testdata directory. -// +// VERSION=... +// roachprod wipe local +// roachprod stage local release ${VERSION} +// roachprod start local +// # If the version is v1.0.7 then you need to enable enterprise with the +// # enterprise.enabled cluster setting. +// roachprod sql local:1 -- -e "$(cat pkg/ccl/backupccl/testdata/restore_old_versions/create.sql)" +// # Create an S3 bucket to store the backup. +// roachprod sql local:1 -- -e "BACKUP DATABASE test TO 's3:///${VERSION}?AWS_ACCESS_KEY_ID=<...>&AWS_SECRET_ACCESS_KEY=<...>'" +// # Then download the backup from s3 and plop the files into the appropriate +// # testdata directory. func TestRestoreOldVersions(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) @@ -77,7 +75,7 @@ func TestRestoreOldVersions(t *testing.T) { ) t.Run("table-restore", func(t *testing.T) { - dirs, err := ioutil.ReadDir(exportDirsWithoutInterleave) + dirs, err := os.ReadDir(exportDirsWithoutInterleave) require.NoError(t, err) for _, dir := range dirs { require.True(t, dir.IsDir()) @@ -88,7 +86,7 @@ func TestRestoreOldVersions(t *testing.T) { }) t.Run("table-restore-with-interleave", func(t *testing.T) { - dirs, err := ioutil.ReadDir(exportDirsWithoutInterleave) + dirs, err := os.ReadDir(exportDirsWithoutInterleave) require.NoError(t, err) for _, dir := range dirs { require.True(t, dir.IsDir()) @@ -99,7 +97,7 @@ func TestRestoreOldVersions(t *testing.T) { }) t.Run("fk-rev-restore", func(t *testing.T) { - dirs, err := ioutil.ReadDir(fkRevDirs) + dirs, err := os.ReadDir(fkRevDirs) require.NoError(t, err) for _, dir := range dirs { require.True(t, dir.IsDir()) @@ -110,7 +108,7 @@ func TestRestoreOldVersions(t *testing.T) { }) t.Run("cluster-restore", func(t *testing.T) { - dirs, err := ioutil.ReadDir(clusterDirs) + dirs, err := os.ReadDir(clusterDirs) require.NoError(t, err) for _, dir := range dirs { require.True(t, dir.IsDir()) @@ -129,7 +127,7 @@ func TestRestoreOldVersions(t *testing.T) { t.Run("multi-region-restore", func(t *testing.T) { skip.UnderRace(t, "very slow as it starts multiple servers") - dirs, err := ioutil.ReadDir(multiRegionDirs) + dirs, err := os.ReadDir(multiRegionDirs) require.NoError(t, err) for _, dir := range dirs { require.True(t, dir.IsDir()) @@ -246,7 +244,7 @@ ORDER BY object_type, object_name`, [][]string{ }) t.Run("zoneconfig_privilege_restore", func(t *testing.T) { - dirs, err := ioutil.ReadDir(privilegeDirs) + dirs, err := os.ReadDir(privilegeDirs) require.NoError(t, err) for _, dir := range dirs { require.True(t, dir.IsDir()) @@ -257,7 +255,7 @@ ORDER BY object_type, object_name`, [][]string{ }) t.Run("public_schema_remap", func(t *testing.T) { - dirs, err := ioutil.ReadDir(publicSchemaDirs) + dirs, err := os.ReadDir(publicSchemaDirs) require.NoError(t, err) for _, dir := range dirs { require.True(t, dir.IsDir()) @@ -268,7 +266,7 @@ ORDER BY object_type, object_name`, [][]string{ }) t.Run("missing_public_schema_namespace_entry_cleanup_on_fail", func(t *testing.T) { - dirs, err := ioutil.ReadDir(publicSchemaDirs) + dirs, err := os.ReadDir(publicSchemaDirs) require.NoError(t, err) for _, dir := range dirs { require.True(t, dir.IsDir()) @@ -279,7 +277,7 @@ ORDER BY object_type, object_name`, [][]string{ }) t.Run("system-users-restore", func(t *testing.T) { - dirs, err := ioutil.ReadDir(systemUsersDirs) + dirs, err := os.ReadDir(systemUsersDirs) require.NoError(t, err) for _, dir := range dirs { require.True(t, dir.IsDir()) @@ -290,7 +288,7 @@ ORDER BY object_type, object_name`, [][]string{ }) t.Run("full-cluster-restore-users-without-ids", func(t *testing.T) { - dirs, err := ioutil.ReadDir(systemUsersDirs) + dirs, err := os.ReadDir(systemUsersDirs) require.NoError(t, err) for _, dir := range dirs { require.True(t, dir.IsDir()) diff --git a/pkg/ccl/backupccl/restore_span_covering.go b/pkg/ccl/backupccl/restore_span_covering.go index b47cc37e2777..d65cef3d0ed0 100644 --- a/pkg/ccl/backupccl/restore_span_covering.go +++ b/pkg/ccl/backupccl/restore_span_covering.go @@ -51,20 +51,23 @@ const targetRestoreSpanSize = 384 << 20 // based on the lowWaterMark before the covering for them is generated. Consider // a chain of backups with files f1, f2… which cover spans as follows: // -// backup -// 0| a___1___c c__2__e h__3__i -// 1| b___4___d g____5___i -// 2| a___________6______________h j_7_k -// 3| h_8_i l_9_m -// keys--a---b---c---d---e---f---g---h----i---j---k---l----m------p----> +// backup +// 0| a___1___c c__2__e h__3__i +// 1| b___4___d g____5___i +// 2| a___________6______________h j_7_k +// 3| h_8_i l_9_m +// keys--a---b---c---d---e---f---g---h----i---j---k---l----m------p----> +// // spans: |-------span1-------||---span2---| |---span3---| // // The cover for those spans would look like: -// [a, c): 1, 4, 6 -// [c, e): 2, 4, 6 -// [e, f): 6 -// [f, i): 3, 5, 6, 8 -// [l, m): 9 +// +// [a, c): 1, 4, 6 +// [c, e): 2, 4, 6 +// [e, f): 6 +// [f, i): 3, 5, 6, 8 +// [l, m): 9 +// // This example is tested in TestRestoreEntryCoverExample. // // If targetSize > 0, then spans which would be added to the right-hand side of diff --git a/pkg/ccl/backupccl/show_test.go b/pkg/ccl/backupccl/show_test.go index 5ef2b65c9f74..1782618edfee 100644 --- a/pkg/ccl/backupccl/show_test.go +++ b/pkg/ccl/backupccl/show_test.go @@ -12,7 +12,6 @@ import ( "context" gosql "database/sql" "fmt" - "io/ioutil" "net/url" "os" "path/filepath" @@ -723,7 +722,7 @@ func TestShowUpgradedForeignKeys(t *testing.T) { fkRevDirs = testdataBase + "/fk-rev-history" ) - dirs, err := ioutil.ReadDir(fkRevDirs) + dirs, err := os.ReadDir(fkRevDirs) require.NoError(t, err) for _, dir := range dirs { require.True(t, dir.IsDir()) diff --git a/pkg/ccl/backupccl/system_schema.go b/pkg/ccl/backupccl/system_schema.go index 63a42affece0..2b98111361b7 100644 --- a/pkg/ccl/backupccl/system_schema.go +++ b/pkg/ccl/backupccl/system_schema.go @@ -52,13 +52,14 @@ const ( // systemBackupConfiguration holds any configuration related to backing up // system tables. System tables differ from normal tables with respect to backup // for 2 reasons: -// 1) For some tables, their contents are read during the restore, so it is non- -// trivial to restore this data without affecting the restore job itself. // -// 2) It may reference system data which could be rewritten. This is particularly -// problematic for data that references tables. At time of writing, cluster -// restore creates descriptors with the same ID as they had in the backing up -// cluster so there is no need to rewrite system table data. +// 1. For some tables, their contents are read during the restore, so it is non- +// trivial to restore this data without affecting the restore job itself. +// +// 2. It may reference system data which could be rewritten. This is particularly +// problematic for data that references tables. At time of writing, cluster +// restore creates descriptors with the same ID as they had in the backing up +// cluster so there is no need to rewrite system table data. type systemBackupConfiguration struct { shouldIncludeInClusterBackup clusterBackupInclusion // restoreBeforeData indicates that this system table should be fully restored diff --git a/pkg/ccl/backupccl/targets.go b/pkg/ccl/backupccl/targets.go index 61bb2f1b9744..472e8067d05d 100644 --- a/pkg/ccl/backupccl/targets.go +++ b/pkg/ccl/backupccl/targets.go @@ -333,13 +333,13 @@ func fullClusterTargetsBackup( // filters the descriptors based on the targets specified in the restore, and // calculates the max descriptor ID in the backup. // Post filtering, the method returns: -// - A list of all descriptors (table, type, database, schema) along with their -// parent databases. -// - A list of database descriptors IFF the user is restoring on the cluster or -// database level. -// - A map of table patterns to the resolved descriptor IFF the user is -// restoring on the table leve. -// - A list of tenants to restore, if applicable. +// - A list of all descriptors (table, type, database, schema) along with their +// parent databases. +// - A list of database descriptors IFF the user is restoring on the cluster or +// database level. +// - A map of table patterns to the resolved descriptor IFF the user is +// restoring on the table leve. +// - A list of tenants to restore, if applicable. func selectTargets( ctx context.Context, p sql.PlanHookState, diff --git a/pkg/ccl/changefeedccl/avro_test.go b/pkg/ccl/changefeedccl/avro_test.go index e7a8c0cc4ff9..ed52980dd451 100644 --- a/pkg/ccl/changefeedccl/avro_test.go +++ b/pkg/ccl/changefeedccl/avro_test.go @@ -189,7 +189,7 @@ func randTime(rng *rand.Rand) time.Time { return timeutil.Unix(0, rng.Int63()) } -//Create a thin, in-memory user-defined enum type +// Create a thin, in-memory user-defined enum type func createEnum(enumLabels tree.EnumValueList, typeName tree.TypeName) *types.T { members := make([]descpb.TypeDescriptor_EnumMember, len(enumLabels)) diff --git a/pkg/ccl/changefeedccl/cdceval/validation.go b/pkg/ccl/changefeedccl/cdceval/validation.go index 9fb21d784287..0d52c4436513 100644 --- a/pkg/ccl/changefeedccl/cdceval/validation.go +++ b/pkg/ccl/changefeedccl/cdceval/validation.go @@ -29,9 +29,10 @@ import ( // expression is valid for a table and target family. includeVirtual indicates // if virtual columns should be considered valid in the expressions. // Normalization steps include: -// * Table name replaces with table reference -// * UDTs values replaced with their physical representation (to keep expression stable +// - Table name replaces with table reference +// - UDTs values replaced with their physical representation (to keep expression stable // across data type changes). +// // The normalized (updated) select clause expression can be serialized into protocol // buffer using cdceval.AsStringUnredacted. func NormalizeAndValidateSelectForTarget( diff --git a/pkg/ccl/changefeedccl/cdcevent/doc.go b/pkg/ccl/changefeedccl/cdcevent/doc.go index eb4792b408ae..62510856599d 100644 --- a/pkg/ccl/changefeedccl/cdcevent/doc.go +++ b/pkg/ccl/changefeedccl/cdcevent/doc.go @@ -7,7 +7,6 @@ // https://github.com/cockroachdb/cockroach/blob/master/licenses/CCL.txt /* - Package cdcevent facilitates conversion from low level roachpb.KeyValue into a higher level Row. This package abstracts away the low level catalog objects (catalog.TableDescriptor, catalog.ColumnDescriptor, etc) so that the rest of the cdc code diff --git a/pkg/ccl/changefeedccl/changefeedbase/settings.go b/pkg/ccl/changefeedccl/changefeedbase/settings.go index 61f851c0c5b6..8d022374119a 100644 --- a/pkg/ccl/changefeedccl/changefeedbase/settings.go +++ b/pkg/ccl/changefeedccl/changefeedbase/settings.go @@ -98,8 +98,9 @@ var FrontierHighwaterLagCheckpointThreshold = settings.RegisterDurationSetting( // So, 1KB per span. We could be looking at 10MB checkpoint record. // // The default for this setting was chosen as follows: -// * Assume a very long backfill, running for 25 hours (GC TTL default duration). -// * Assume we want to have at most 150MB worth of checkpoints in the job record. +// - Assume a very long backfill, running for 25 hours (GC TTL default duration). +// - Assume we want to have at most 150MB worth of checkpoints in the job record. +// // Therefore, we should write at most 6 MB of checkpoint/hour; OR, based on the default // FrontierCheckpointFrequency setting, 1 MB per checkpoint. var FrontierCheckpointMaxBytes = settings.RegisterByteSizeSetting( diff --git a/pkg/ccl/changefeedccl/doc.go b/pkg/ccl/changefeedccl/doc.go index 9f200c62476d..71dabf3612d9 100644 --- a/pkg/ccl/changefeedccl/doc.go +++ b/pkg/ccl/changefeedccl/doc.go @@ -17,7 +17,7 @@ Changefeeds are built on top of rangefeeds, which provide a stream of KV events for a given keyspan as well as periodic "resolved timestamps" for those spans. For more information on rangefeeds see - docs/RFCS/20170613_range_feeds_storage_primitive.md + docs/RFCS/20170613_range_feeds_storage_primitive.md The changefeed machinery encodes and delivers both the KV events and resolved timestamps to the sinks. It further uses the resolved @@ -54,38 +54,37 @@ schemafeed: Periodically polls the table descriptors table. Rangefeed events are held until it is sure it knows the schema for the relevant table at the event's timestamp. - +-----------------+ - +------+ | | +-----+ - | sink |<------+ changeFrontier +------>| job | - +------+ | | +-----+ - +--------+--------+ - ^ - | - +-------+--------+ - +------+ | | - | sink +<-------+ changefeedAgg |<------------+ - +------+ | | | - +--+-------------+ chanBuffer - | | - v +------+------+ - +--------------+ | | - | +------>| copyFromTo +--+ - | kvfeed | | | | - | | +------+------+ | - +--------+---+-+ ^ | - | | memBuffer | - | | | | - | | +-----+------+ | +-----------+ - | | | | | | | - | +--------> |physical +----->| rangefeed | - | | feed | | | | - | +------------+ | +-----------+ - | | - | | - | +------------+ | - +------------> | schemafeed |<-| - | (polls) | - +------------+ - + +-----------------+ + +------+ | | +-----+ + | sink |<------+ changeFrontier +------>| job | + +------+ | | +-----+ + +--------+--------+ + ^ + | + +-------+--------+ + +------+ | | + | sink +<-------+ changefeedAgg |<------------+ + +------+ | | | + +--+-------------+ chanBuffer + | | + v +------+------+ + +--------------+ | | + | +------>| copyFromTo +--+ + | kvfeed | | | | + | | +------+------+ | + +--------+---+-+ ^ | + | | memBuffer | + | | | | + | | +-----+------+ | +-----------+ + | | | | | | | + | +--------> |physical +----->| rangefeed | + | | feed | | | | + | +------------+ | +-----------+ + | | + | | + | +------------+ | + +------------> | schemafeed |<-| + | (polls) | + +------------+ */ package changefeedccl diff --git a/pkg/ccl/changefeedccl/schema_registry.go b/pkg/ccl/changefeedccl/schema_registry.go index 70f4ca750c9a..be35c75d7539 100644 --- a/pkg/ccl/changefeedccl/schema_registry.go +++ b/pkg/ccl/changefeedccl/schema_registry.go @@ -14,7 +14,6 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "net/url" "path" @@ -141,8 +140,7 @@ func (r *confluentSchemaRegistry) Ping(ctx context.Context) error { // RegisterSchemaForSubject registers the given schema for the given // subject. The schema type is assumed to be AVRO. // -// https://docs.confluent.io/platform/current/schema-registry/develop/api.html#post--subjects-(string-%20subject)-versions -// +// https://docs.confluent.io/platform/current/schema-registry/develop/api.html#post--subjects-(string-%20subject)-versions func (r *confluentSchemaRegistry) RegisterSchemaForSubject( ctx context.Context, subject string, schema string, ) (int32, error) { @@ -214,7 +212,7 @@ func gracefulClose(ctx context.Context, toClose io.ReadCloser) { // // We read upto 4k to try to reach io.EOF. const respExtraReadLimit = 4096 - _, _ = io.CopyN(ioutil.Discard, toClose, respExtraReadLimit) + _, _ = io.CopyN(io.Discard, toClose, respExtraReadLimit) if err := toClose.Close(); err != nil { log.VInfof(ctx, 2, "failure to close schema registry connection", err) } diff --git a/pkg/ccl/changefeedccl/schemafeed/table_event_filter_datadriven_test.go b/pkg/ccl/changefeedccl/schemafeed/table_event_filter_datadriven_test.go index 57e1decc152d..65b12df705b1 100644 --- a/pkg/ccl/changefeedccl/schemafeed/table_event_filter_datadriven_test.go +++ b/pkg/ccl/changefeedccl/schemafeed/table_event_filter_datadriven_test.go @@ -33,30 +33,29 @@ import ( // It provides a mechanism to classify and understand how the schemafeed // will interpret different schema change operations. // -// - "exec" -// Executes the input SQL query. +// - "exec" +// Executes the input SQL query. // -// - "create" f= -// Creates a schemafeed with the targets specified as the input with the -// provided ID. +// - "create" f= +// Creates a schemafeed with the targets specified as the input with the +// provided ID. // -// - "pop" f= -// Pop all events from the schemafeed with the given ID. -// The structure of the events looks like as follows: +// - "pop" f= +// Pop all events from the schemafeed with the given ID. +// The structure of the events looks like as follows: // -// t 1->2: Unknown -// t 2->3: Unknown -// t 3->4: Unknown -// t 4->5: Unknown -// t 5->6: Unknown -// t 6->7: PrimaryKeyChange -// t 7->8: Unknown -// t 8->9: Unknown -// -// The first column is the name of the table in question. -// The second is the version transition. The third indicates -// the event classification. +// t 1->2: Unknown +// t 2->3: Unknown +// t 3->4: Unknown +// t 4->5: Unknown +// t 5->6: Unknown +// t 6->7: PrimaryKeyChange +// t 7->8: Unknown +// t 8->9: Unknown // +// The first column is the name of the table in question. +// The second is the version transition. The third indicates +// the event classification. func TestDataDriven(t *testing.T) { defer leaktest.AfterTest(t)() diff --git a/pkg/ccl/changefeedccl/sink_cloudstorage.go b/pkg/ccl/changefeedccl/sink_cloudstorage.go index 829560fc1036..c87170ee2957 100644 --- a/pkg/ccl/changefeedccl/sink_cloudstorage.go +++ b/pkg/ccl/changefeedccl/sink_cloudstorage.go @@ -189,7 +189,6 @@ func (f *cloudStorageSinkFile) Write(p []byte) (int, error) { // guaranteed to be sorted by timestamp. A duplicate of some records might exist // in a different file or even in the same file. // -// // The resolved timestamp files are named `.RESOLVED`. This is // carefully done so that we can offer the following external guarantee: At any // given time, if the files are iterated in lexicographic filename order, @@ -276,7 +275,6 @@ func (f *cloudStorageSinkFile) Write(p []byte) (int, error) { // satisfies requirements of lemma 1. So we can consider these k jobs conceptually as one // job (call it P). Now, we're back to the case where k = 2 with jobs P and Q. Thus, by // induction we have the required proof. -// type cloudStorageSink struct { srcID base.SQLInstanceID sinkID int64 diff --git a/pkg/ccl/changefeedccl/sink_cloudstorage_test.go b/pkg/ccl/changefeedccl/sink_cloudstorage_test.go index 37f1838dab0b..4545d73c3730 100644 --- a/pkg/ccl/changefeedccl/sink_cloudstorage_test.go +++ b/pkg/ccl/changefeedccl/sink_cloudstorage_test.go @@ -14,7 +14,6 @@ import ( "context" "fmt" "io" - "io/ioutil" "math" "net/url" "os" @@ -95,7 +94,7 @@ func TestCloudStorageSink(t *testing.T) { var folders []string hasChildDirs := func(path string) bool { - files, err := ioutil.ReadDir(path) + files, err := os.ReadDir(path) if err != nil { return false } diff --git a/pkg/ccl/changefeedccl/sink_webhook.go b/pkg/ccl/changefeedccl/sink_webhook.go index 68c4c52cc1c5..2ff43b56ce02 100644 --- a/pkg/ccl/changefeedccl/sink_webhook.go +++ b/pkg/ccl/changefeedccl/sink_webhook.go @@ -218,17 +218,18 @@ type retryConfig struct { } // proper JSON schema for webhook sink config: -// { -// "Flush": { -// "Messages": ..., -// "Bytes": ..., -// "Frequency": ..., -// }, -// "Retry": { -// "Max": ..., -// "Backoff": ..., -// } -// } +// +// { +// "Flush": { +// "Messages": ..., +// "Bytes": ..., +// "Frequency": ..., +// }, +// "Retry": { +// "Max": ..., +// "Backoff": ..., +// } +// } type webhookSinkConfig struct { Flush batchConfig `json:",omitempty"` Retry retryConfig `json:",omitempty"` diff --git a/pkg/ccl/importerccl/ccl_test.go b/pkg/ccl/importerccl/ccl_test.go index 4b1ccf2cf9df..af4a1e63aece 100644 --- a/pkg/ccl/importerccl/ccl_test.go +++ b/pkg/ccl/importerccl/ccl_test.go @@ -294,11 +294,11 @@ CREATE TABLE mr_regional_by_row (i INT8 PRIMARY KEY, s typ, b bytea) LOCALITY RE // There are two goals of this testcase: // -// 1) Ensure that we can properly export from REGIONAL BY ROW tables (that the -// hidden row stays hidden, unless explicitly requested). -// 2) That we can import the exported data both into a non-RBR table, as well -// as a table which we can later convert to RBR, while preserving the -// crdb_region column data. +// 1. Ensure that we can properly export from REGIONAL BY ROW tables (that the +// hidden row stays hidden, unless explicitly requested). +// 2. That we can import the exported data both into a non-RBR table, as well +// as a table which we can later convert to RBR, while preserving the +// crdb_region column data. func TestMultiRegionExportImportRoundTrip(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) diff --git a/pkg/ccl/multiregionccl/datadriven_test.go b/pkg/ccl/multiregionccl/datadriven_test.go index 7c9ee648f2ae..0fe2e441b4ac 100644 --- a/pkg/ccl/multiregionccl/datadriven_test.go +++ b/pkg/ccl/multiregionccl/datadriven_test.go @@ -62,11 +62,12 @@ import ( // Similar to exec-sql, but also traces the input statement and analyzes the // trace. Currently, the trace analysis only works for "simple" queries which // perform a single kv operation. The trace is analyzed for the following: -// - served locally: prints true iff the query was routed to the local -// replica. +// - served locally: prints true iff the query was routed to the local +// replica. // - served via follower read: prints true iff the query was served using a -// follower read. This is omitted completely if the query was not served -// locally. +// follower read. This is omitted completely if the query was not served +// locally. +// // This is because it the replica the query is routed to may or may not be the // leaseholder. // diff --git a/pkg/ccl/multiregionccl/region_test.go b/pkg/ccl/multiregionccl/region_test.go index 8905452d0402..dd1c5b5edbb8 100644 --- a/pkg/ccl/multiregionccl/region_test.go +++ b/pkg/ccl/multiregionccl/region_test.go @@ -30,13 +30,14 @@ import ( // TestConcurrentAddDropRegions tests all combinations of add/drop as if they // were executed by two concurrent sessions. The general sketch of the test is // as follows: -// - First operation is executed and blocks before the enum members are promoted. -// - The second operation starts once the first operation has reached the type -// schema changer. It continues to completion. It may succeed/fail depending -// on the specific test setup. -// - The first operation is resumed and allowed to complete. We expect it to -// succeed. -// - Verify database regions are as expected. +// - First operation is executed and blocks before the enum members are promoted. +// - The second operation starts once the first operation has reached the type +// schema changer. It continues to completion. It may succeed/fail depending +// on the specific test setup. +// - The first operation is resumed and allowed to complete. We expect it to +// succeed. +// - Verify database regions are as expected. +// // Operations act on a multi-region database that contains a REGIONAL BY ROW // table, so as to exercise the repartitioning semantics. func TestConcurrentAddDropRegions(t *testing.T) { diff --git a/pkg/ccl/multitenantccl/tenantcostclient/tenant_side.go b/pkg/ccl/multitenantccl/tenantcostclient/tenant_side.go index 2f77eefcee50..6e0afdb62542 100644 --- a/pkg/ccl/multitenantccl/tenantcostclient/tenant_side.go +++ b/pkg/ccl/multitenantccl/tenantcostclient/tenant_side.go @@ -125,7 +125,8 @@ const defaultTickInterval = time.Second // (with one sample per tickInterval). // // If we want a factor of 0.5 per second, this should be: -// 0.5^(1 second / tickInterval) +// +// 0.5^(1 second / tickInterval) const movingAvgRUPerSecFactor = 0.5 // We request more tokens when the available RUs go below a threshold. The diff --git a/pkg/ccl/multitenantccl/tenantcostclient/tenant_side_test.go b/pkg/ccl/multitenantccl/tenantcostclient/tenant_side_test.go index 03892a3c815d..94b5bf1f3f78 100644 --- a/pkg/ccl/multitenantccl/tenantcostclient/tenant_side_test.go +++ b/pkg/ccl/multitenantccl/tenantcostclient/tenant_side_test.go @@ -14,7 +14,6 @@ import ( gosql "database/sql" "fmt" "io" - "io/ioutil" "net/http" "net/http/httptest" "strconv" @@ -399,10 +398,10 @@ func (ts *testState) notCompleted(t *testing.T, d *datadriven.TestData, args cmd // advance advances the clock by the provided duration and returns the new // current time. // -// advance -// 2s -// ---- -// 00:00:02.000 +// advance +// 2s +// ---- +// 00:00:02.000 // // An optional "wait" argument will cause advance to block until it receives a // tick event, indicating the clock change has been processed. @@ -458,11 +457,10 @@ func (ts *testState) unblockRequest(t *testing.T, d *datadriven.TestData, args c // The following example would wait for there to be two outstanding timers at // 00:00:01.000 and 00:00:02.000. // -// timers -// ---- -// 00:00:01.000 -// 00:00:02.000 -// +// timers +// ---- +// 00:00:01.000 +// 00:00:02.000 func (ts *testState) timers(t *testing.T, d *datadriven.TestData, args cmdArgs) string { // If we are rewriting the test, just sleep a bit before returning the // timers. @@ -1127,7 +1125,7 @@ func TestConsumptionExternalStorage(t *testing.T) { testSink := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { switch r.Method { case http.MethodPost: - n, err := io.Copy(ioutil.Discard, r.Body) + n, err := io.Copy(io.Discard, r.Body) if err != nil { w.WriteHeader(http.StatusInternalServerError) return @@ -1308,7 +1306,7 @@ func BenchmarkExternalIOAccounting(b *testing.B) { if err != nil { return err } - _, err = io.Copy(ioutil.Discard, ioctx.ReaderCtxAdapter(ctx, r)) + _, err = io.Copy(io.Discard, ioctx.ReaderCtxAdapter(ctx, r)) if err != nil { _ = r.Close(ctx) return err diff --git a/pkg/ccl/multitenantccl/tenantcostclient/token_bucket.go b/pkg/ccl/multitenantccl/tenantcostclient/token_bucket.go index 658c7766a7a7..37c264a3924b 100644 --- a/pkg/ccl/multitenantccl/tenantcostclient/token_bucket.go +++ b/pkg/ccl/multitenantccl/tenantcostclient/token_bucket.go @@ -17,8 +17,8 @@ import ( // tokenBucket implements a token bucket. It is a more specialized form of // quotapool.TokenBucket. The main differences are: -// - it does not currently support a burst limit; -// - it has special debt handling. +// - it does not currently support a burst limit; +// - it has special debt handling. // // -- Debt handling -- // @@ -47,17 +47,17 @@ import ( // much it can vary, imagine that at time t=0 we incur some debt D(0) and // consider the two extremes: // -// A. We start with debt D(0), and we never recalculate the rate (no -// "refinancing"). We apply debt at constant rate D(0) / T and all debt is -// paid at time T. +// A. We start with debt D(0), and we never recalculate the rate (no +// "refinancing"). We apply debt at constant rate D(0) / T and all debt is +// paid at time T. // -// B. We start with debt D(0), and we recalculate the rate ("refinance") -// continuously (or, more intuitively, every nanosecond). The -// instantaneous rate is: -// D'(t) = - D(t) / T -// The debt formula is: -// D(t) = D(0) * e^(-t/T) -// We apply 63% of the debt in time T; 86% in 2T; and 95% in 3T. +// B. We start with debt D(0), and we recalculate the rate ("refinance") +// continuously (or, more intuitively, every nanosecond). The +// instantaneous rate is: +// D'(t) = - D(t) / T +// The debt formula is: +// D(t) = D(0) * e^(-t/T) +// We apply 63% of the debt in time T; 86% in 2T; and 95% in 3T. // // The difference between these two extremes is reasonable - we apply between // 63% and 100% of the debt in time T, depending on the usage pattern. diff --git a/pkg/ccl/multitenantccl/tenantcostserver/server.go b/pkg/ccl/multitenantccl/tenantcostserver/server.go index fdf6c839adf4..f3e4a0b5f125 100644 --- a/pkg/ccl/multitenantccl/tenantcostserver/server.go +++ b/pkg/ccl/multitenantccl/tenantcostserver/server.go @@ -30,7 +30,8 @@ type instance struct { } // Note: the "four" in the description comes from -// tenantcostclient.extendedReportingPeriodFactor. +// +// tenantcostclient.extendedReportingPeriodFactor. var instanceInactivity = settings.RegisterDurationSetting( settings.TenantWritable, "tenant_usage_instance_inactivity", diff --git a/pkg/ccl/multitenantccl/tenantcostserver/system_table.go b/pkg/ccl/multitenantccl/tenantcostserver/system_table.go index f519b0bc418f..1abb7caa3981 100644 --- a/pkg/ccl/multitenantccl/tenantcostserver/system_table.go +++ b/pkg/ccl/multitenantccl/tenantcostserver/system_table.go @@ -406,7 +406,9 @@ func (h *sysTableHelper) maybeCleanupStaleInstance( // maybeCleanupStaleInstances removes up to maxInstancesCleanup stale instances // (where the last update time is before the cutoff) with IDs in the range -// [startID, endID). +// +// [startID, endID). +// // If endID is -1, then the range is unrestricted [startID, ∞). // // Returns the ID of the instance following the deleted instances. This is diff --git a/pkg/ccl/multitenantccl/tenantcostserver/tenanttokenbucket/tenant_token_bucket.go b/pkg/ccl/multitenantccl/tenantcostserver/tenanttokenbucket/tenant_token_bucket.go index b90363aa3340..d2b7dc401f14 100644 --- a/pkg/ccl/multitenantccl/tenantcostserver/tenanttokenbucket/tenant_token_bucket.go +++ b/pkg/ccl/multitenantccl/tenantcostserver/tenanttokenbucket/tenant_token_bucket.go @@ -131,28 +131,27 @@ func (s *State) Request( // // Arguments: // -// - availableRU is the amount of Request Units that the tenant can consume at -// will. Also known as "burst RUs". +// - availableRU is the amount of Request Units that the tenant can consume at +// will. Also known as "burst RUs". // -// - refillRate is the amount of Request Units per second that the tenant -// receives. +// - refillRate is the amount of Request Units per second that the tenant +// receives. // -// - maxBurstRU is the maximum amount of Request Units that can be accumulated -// from the refill rate, or 0 if there is no limit. +// - maxBurstRU is the maximum amount of Request Units that can be accumulated +// from the refill rate, or 0 if there is no limit. // -// - asOf is a timestamp; the reconfiguration request is assumed to be based on -// the consumption at that time. This timestamp is used to compensate for any -// refill that would have happened in the meantime. +// - asOf is a timestamp; the reconfiguration request is assumed to be based on +// the consumption at that time. This timestamp is used to compensate for any +// refill that would have happened in the meantime. // -// - asOfConsumedRequestUnits is the total number of consumed RUs based on -// which the reconfiguration values were calculated (i.e. at the asOf time). -// It is used to adjust availableRU with the consumption that happened in the -// meantime. +// - asOfConsumedRequestUnits is the total number of consumed RUs based on +// which the reconfiguration values were calculated (i.e. at the asOf time). +// It is used to adjust availableRU with the consumption that happened in the +// meantime. // -// - now is the current time. -// -// - currentConsumedRequestUnits is the current total number of consumed RUs. +// - now is the current time. // +// - currentConsumedRequestUnits is the current total number of consumed RUs. func (s *State) Reconfigure( ctx context.Context, tenantID roachpb.TenantID, diff --git a/pkg/ccl/oidcccl/authentication_oidc.go b/pkg/ccl/oidcccl/authentication_oidc.go index b4e00e161508..0e889010fcbb 100644 --- a/pkg/ccl/oidcccl/authentication_oidc.go +++ b/pkg/ccl/oidcccl/authentication_oidc.go @@ -69,48 +69,48 @@ var ( // A successful configuration and login flow looks like the following (logout logic is unchanged // with OIDC): // -// 0. The cluster operator configures the cluster to use OIDC. Once the cluster setting -// `server.oidc_authentication.enabled` is set to true, the OIDC client will make a request to -// retrieve the discovery document using the `server.oidc_authentication.provider_url` setting. -// This attempt will be retried automatically with every call to the `login` or `callback` or -// any change to any OIDC settings as long as `enabled` is still true. That behavior is meant to -// support easy recovery from any downtime or HTTP errors on the provider side. +// 0. The cluster operator configures the cluster to use OIDC. Once the cluster setting +// `server.oidc_authentication.enabled` is set to true, the OIDC client will make a request to +// retrieve the discovery document using the `server.oidc_authentication.provider_url` setting. +// This attempt will be retried automatically with every call to the `login` or `callback` or +// any change to any OIDC settings as long as `enabled` is still true. That behavior is meant to +// support easy recovery from any downtime or HTTP errors on the provider side. // -// 1. A CRDB user opens the Admin UI and clicks on the `Login with OIDC` button (text is -// configurable using the `server.oidc_authentication.button_text` setting. +// 1. A CRDB user opens the Admin UI and clicks on the `Login with OIDC` button (text is +// configurable using the `server.oidc_authentication.button_text` setting. // -// 2. The browser loads `/oidc/v1/login` from the cluster, which triggers a redirect to the auth -// provider. A number of parameters are sent along with this request: (these are all defined in -// the OIDC spec available at: https://openid.net/specs/openid-connect-core-1_0.html) -// - client_id and client_secret: these are set using their correspondingly named cluster -// settings and are values that the auth provider will create. -// - redirect_uri: set using the `server.oidc_authentication.redirect_url` cluster setting. This -// will point to `/oidc/v1/callback` at the appropriate host or load balancer -// that the cluster is deployed to. -// - scopes: set using the `server.oidc_authentication.scopes` cluster setting. This defines what -// information about the user we expect to receive in the callback. -// - state: this is a base64 encoded protobuf value that contains the NodeID of the node that -// originated the login request and the state variable that was recorded as being in the -// caller's cookie at the time. This value wil be returned back as a parameter to the -// callback URL by the authentication provider. We check to make sure it matches the -// cookie and our stored state to ensure we're processing a response to the request -// that we triggered. +// 2. The browser loads `/oidc/v1/login` from the cluster, which triggers a redirect to the auth +// provider. A number of parameters are sent along with this request: (these are all defined in +// the OIDC spec available at: https://openid.net/specs/openid-connect-core-1_0.html) +// - client_id and client_secret: these are set using their correspondingly named cluster +// settings and are values that the auth provider will create. +// - redirect_uri: set using the `server.oidc_authentication.redirect_url` cluster setting. This +// will point to `/oidc/v1/callback` at the appropriate host or load balancer +// that the cluster is deployed to. +// - scopes: set using the `server.oidc_authentication.scopes` cluster setting. This defines what +// information about the user we expect to receive in the callback. +// - state: this is a base64 encoded protobuf value that contains the NodeID of the node that +// originated the login request and the state variable that was recorded as being in the +// caller's cookie at the time. This value wil be returned back as a parameter to the +// callback URL by the authentication provider. We check to make sure it matches the +// cookie and our stored state to ensure we're processing a response to the request +// that we triggered. // // 3. The user authenticates at the auth provider // -// 4. The auth provider redirects to the `redirect_uri` we provided, which is handled at -// `/oidc/v1/callback`. We validate that the `state` parameter matches the user's browser cookie, -// then we exchange the `authentication_code` that was provided for an OAuth token from the -// auth provider via an HTTP request. This handled by the `go-oidc` library. Once we have the -// id token, we validate and decode it, extract a field from the JSON set via the -// `server.oidc_authentication.claim_json_key`. The key is then passed through a regular -// expression to transform its value to a DB principal (this is to support the typical workflow -// of stripping a realm or domain name from an email address principal). The regular expression -// is set using the `server.oidc_authentication.principal_regex` cluster setting. +// 4. The auth provider redirects to the `redirect_uri` we provided, which is handled at +// `/oidc/v1/callback`. We validate that the `state` parameter matches the user's browser cookie, +// then we exchange the `authentication_code` that was provided for an OAuth token from the +// auth provider via an HTTP request. This handled by the `go-oidc` library. Once we have the +// id token, we validate and decode it, extract a field from the JSON set via the +// `server.oidc_authentication.claim_json_key`. The key is then passed through a regular +// expression to transform its value to a DB principal (this is to support the typical workflow +// of stripping a realm or domain name from an email address principal). The regular expression +// is set using the `server.oidc_authentication.principal_regex` cluster setting. // -// If the username we compute exists in the DB, we create a web session for them in the usual -// manner, bypassing any password validation requirements, and redirect them to `/` so they can -// enjoy a logged-in experience in the Admin UI. +// If the username we compute exists in the DB, we create a web session for them in the usual +// manner, bypassing any password validation requirements, and redirect them to `/` so they can +// enjoy a logged-in experience in the Admin UI. type oidcAuthenticationServer struct { mutex syncutil.RWMutex conf oidcAuthenticationConf diff --git a/pkg/ccl/oidcccl/settings.go b/pkg/ccl/oidcccl/settings.go index fbad1cf36bfc..a6066be25b63 100644 --- a/pkg/ccl/oidcccl/settings.go +++ b/pkg/ccl/oidcccl/settings.go @@ -159,13 +159,13 @@ func validateOIDCRedirectURL(values *settings.Values, s string) error { // callback URL that matches its own `region` locality tag. // // Example valid values: -// - 'https://cluster.example.com:8080/oidc/v1/callback' -// - '{ -// "redirect_urls": { -// "us-east-1": "https://localhost:8080/oidc/v1/callback", -// "eu-west-1": "example.com" -// } -// }' +// - 'https://cluster.example.com:8080/oidc/v1/callback' +// - '{ +// "redirect_urls": { +// "us-east-1": "https://localhost:8080/oidc/v1/callback", +// "eu-west-1": "example.com" +// } +// }' // // In a multi-region cluster where this setting is set to a URL string, we will // use the same callback URL on all auth requests. In a multi-region setting diff --git a/pkg/ccl/spanconfigccl/spanconfiglimiterccl/datadriven_test.go b/pkg/ccl/spanconfigccl/spanconfiglimiterccl/datadriven_test.go index 44d2922665ca..ec308cbe1471 100644 --- a/pkg/ccl/spanconfigccl/spanconfiglimiterccl/datadriven_test.go +++ b/pkg/ccl/spanconfigccl/spanconfiglimiterccl/datadriven_test.go @@ -33,23 +33,22 @@ import ( // TestDataDriven is a data-driven test for spanconfig.Limiter. It offers the // following commands: // -// - "initialize" tenant= -// Initialize a secondary tenant with the given ID. +// - "initialize" tenant= +// Initialize a secondary tenant with the given ID. // -// - "exec-sql" [tenant=] -// Executes the input SQL query for the given tenant. All statements are -// executed in a single transaction. +// - "exec-sql" [tenant=] +// Executes the input SQL query for the given tenant. All statements are +// executed in a single transaction. // -// - "query-sql" [tenant=] [retry] -// Executes the input SQL query for the given tenant and print the results. -// If retry is specified and the expected results do not match the actual -// results, the query will be retried under a testutils.SucceedsSoon block. -// If run with -rewrite, we insert a 500ms sleep before executing the query -// once. -// -// - override limit= -// Override the span limit each tenant is configured with. +// - "query-sql" [tenant=] [retry] +// Executes the input SQL query for the given tenant and print the results. +// If retry is specified and the expected results do not match the actual +// results, the query will be retried under a testutils.SucceedsSoon block. +// If run with -rewrite, we insert a 500ms sleep before executing the query +// once. // +// - override limit= +// Override the span limit each tenant is configured with. func TestDataDriven(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) diff --git a/pkg/ccl/spanconfigccl/spanconfigreconcilerccl/datadriven_test.go b/pkg/ccl/spanconfigccl/spanconfigreconcilerccl/datadriven_test.go index 042bf68705af..74ae90df7430 100644 --- a/pkg/ccl/spanconfigccl/spanconfigreconcilerccl/datadriven_test.go +++ b/pkg/ccl/spanconfigccl/spanconfigreconcilerccl/datadriven_test.go @@ -40,36 +40,36 @@ import ( // we'd expect. Only fields that differ from the static RANGE DEFAULT are // printed in the test output for readability. The following syntax is provided: // -// - "initialize" tenant= -// Initialize a secondary tenant with the given ID. +// - "initialize" tenant= +// Initialize a secondary tenant with the given ID. // -// - "exec-sql" [tenant=] -// Executes the input SQL query for the given tenant. All statements are -// executed in a single transaction. +// - "exec-sql" [tenant=] +// Executes the input SQL query for the given tenant. All statements are +// executed in a single transaction. // -// - "query-sql" [tenant=] -// Executes the input SQL query for the given tenant and print the results. +// - "query-sql" [tenant=] +// Executes the input SQL query for the given tenant and print the results. // -// - "reconcile" [tenant=] -// Start the reconciliation process for the given tenant. +// - "reconcile" [tenant=] +// Start the reconciliation process for the given tenant. // -// - "mutations" [tenant=] [discard] -// Print the latest set of mutations issued by the reconciler for the given -// tenant. If 'discard' is specified, nothing is printed. +// - "mutations" [tenant=] [discard] +// Print the latest set of mutations issued by the reconciler for the given +// tenant. If 'discard' is specified, nothing is printed. // -// - "state" [offset=] [limit=] [limit=] [ts=] -// cluster OR -// tenants id1,id2... OR -// descs id1,id2... -// Creates and writes a protected timestamp record with id and ts with an -// appropriate ptpb.Target. +// - "protect" [record-id=] [ts=] +// cluster OR +// tenants id1,id2... OR +// descs id1,id2... +// Creates and writes a protected timestamp record with id and ts with an +// appropriate ptpb.Target. // -// - "release" [record-id=] -// Releases the protected timestamp record with id. +// - "release" [record-id=] +// Releases the protected timestamp record with id. // // TODO(irfansharif): Provide a way to stop reconcilers and/or start them back // up again. It would let us add simulate for suspended tenants, and behavior of diff --git a/pkg/ccl/spanconfigccl/spanconfigsplitterccl/datadriven_test.go b/pkg/ccl/spanconfigccl/spanconfigsplitterccl/datadriven_test.go index 22e339417998..226b7b3d65ef 100644 --- a/pkg/ccl/spanconfigccl/spanconfigsplitterccl/datadriven_test.go +++ b/pkg/ccl/spanconfigccl/spanconfigsplitterccl/datadriven_test.go @@ -34,17 +34,16 @@ import ( // TestDataDriven is a data-driven test for spanconfig.Splitter. It offers // the following commands: // -// - "exec-sql" -// Executes the input SQL query. +// - "exec-sql" +// Executes the input SQL query. // -// - "query-sql" -// Executes the input SQL query and prints the results. -// -// - "splits" [database= table=] [id=] -// Prints the number splits generated the referenced object (named database + -// table, or descriptor id). Also logs the set of internal steps the Splitter -// takes to arrive at the number. +// - "query-sql" +// Executes the input SQL query and prints the results. // +// - "splits" [database= table=] [id=] +// Prints the number splits generated the referenced object (named database + +// table, or descriptor id). Also logs the set of internal steps the Splitter +// takes to arrive at the number. func TestDataDriven(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) diff --git a/pkg/ccl/spanconfigccl/spanconfigsqltranslatorccl/datadriven_test.go b/pkg/ccl/spanconfigccl/spanconfigsqltranslatorccl/datadriven_test.go index 204e865474a5..7b102b8469d9 100644 --- a/pkg/ccl/spanconfigccl/spanconfigsqltranslatorccl/datadriven_test.go +++ b/pkg/ccl/spanconfigccl/spanconfigsqltranslatorccl/datadriven_test.go @@ -47,36 +47,36 @@ import ( // (default) RANGE DEFAULT are printed in the test output for readability. It // offers the following commands: // -// - "exec-sql" -// Executes the input SQL query. +// - "exec-sql" +// Executes the input SQL query. // -// - "query-sql" -// Executes the input SQL query and prints the results. +// - "query-sql" +// Executes the input SQL query and prints the results. // -// - "translate" [database=] [table=] [named-zone=] [id=] -// Translates the SQL zone config state to the span config state starting -// from the referenced object (named zone, database, database + table, or -// descriptor id) as the root. +// - "translate" [database=] [table=] [named-zone=] [id=] +// Translates the SQL zone config state to the span config state starting +// from the referenced object (named zone, database, database + table, or +// descriptor id) as the root. // -// - "full-translate" -// Performs a full translation of the SQL zone config state to the implied -// span config state. +// - "full-translate" +// Performs a full translation of the SQL zone config state to the implied +// span config state. // -// - "mark-table-offline" [database=] [table=] -// Marks the given table as offline for testing purposes. +// - "mark-table-offline" [database=] [table=] +// Marks the given table as offline for testing purposes. // -// - "mark-table-public" [database=] [table=] -// Marks the given table as public. +// - "mark-table-public" [database=] [table=] +// Marks the given table as public. // -// - "protect" [record-id=] [ts=] -// cluster OR -// tenants id1,id2... OR -// descs id1,id2... -// Creates and writes a protected timestamp record with id and ts with an -// appropriate ptpb.Target. +// - "protect" [record-id=] [ts=] +// cluster OR +// tenants id1,id2... OR +// descs id1,id2... +// Creates and writes a protected timestamp record with id and ts with an +// appropriate ptpb.Target. // -// - "release" [record-id=] -// Releases the protected timestamp record with id. +// - "release" [record-id=] +// Releases the protected timestamp record with id. func TestDataDriven(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) diff --git a/pkg/ccl/sqlproxyccl/denylist/watcher.go b/pkg/ccl/sqlproxyccl/denylist/watcher.go index 2acb474ed171..94418e420ff8 100644 --- a/pkg/ccl/sqlproxyccl/denylist/watcher.go +++ b/pkg/ccl/sqlproxyccl/denylist/watcher.go @@ -104,9 +104,11 @@ func newWatcher(list *Denylist, next chan *Denylist) *Watcher { // is returned immediately. // // Example Usage: -// remove, err := w.ListenForDenied(connection, func(err error) { -// /* connection was added to the deny list */ -// }) +// +// remove, err := w.ListenForDenied(connection, func(err error) { +// /* connection was added to the deny list */ +// }) +// // if err != nil { /*connection already on the denylist*/ } // defer remove() // diff --git a/pkg/ccl/sqlproxyccl/error.go b/pkg/ccl/sqlproxyccl/error.go index 13b31bf7724f..3418f53607f9 100644 --- a/pkg/ccl/sqlproxyccl/error.go +++ b/pkg/ccl/sqlproxyccl/error.go @@ -15,6 +15,7 @@ import ( ) // errorCode classifies errors emitted by Proxy(). +// //go:generate stringer -type=errorCode type errorCode int diff --git a/pkg/ccl/sqlproxyccl/proxy_handler.go b/pkg/ccl/sqlproxyccl/proxy_handler.go index 751dd292c6d4..3a21fba291dd 100644 --- a/pkg/ccl/sqlproxyccl/proxy_handler.go +++ b/pkg/ccl/sqlproxyccl/proxy_handler.go @@ -615,15 +615,15 @@ func (handler *proxyHandler) setupIncomingCert(ctx context.Context) error { // // We currently support embedding the cluster identifier in three ways: // -// - Through server name identification (SNI) when using TLS connections -// (e.g. happy-koala-3.5xj.gcp-us-central1.cockroachlabs.cloud) +// - Through server name identification (SNI) when using TLS connections +// (e.g. happy-koala-3.5xj.gcp-us-central1.cockroachlabs.cloud) // // - Within the database param (e.g. "happy-koala-3.defaultdb") // -// - Within the options param (e.g. "... --cluster=happy-koala-5 ..."). -// PostgreSQL supports three different ways to set a run-time parameter -// through its command-line options, i.e. "-c NAME=VALUE", "-cNAME=VALUE", and -// "--NAME=VALUE". +// - Within the options param (e.g. "... --cluster=happy-koala-5 ..."). +// PostgreSQL supports three different ways to set a run-time parameter +// through its command-line options, i.e. "-c NAME=VALUE", "-cNAME=VALUE", and +// "--NAME=VALUE". func clusterNameAndTenantFromParams( ctx context.Context, fe *FrontendAdmitInfo, ) (*pgproto3.StartupMessage, string, roachpb.TenantID, error) { @@ -790,9 +790,10 @@ func parseDatabaseParam(databaseParam string) (clusterIdentifier, databaseName s // options parameter with the cluster key stripped out. Just like PostgreSQL, // the sqlproxy supports three different ways to set a run-time parameter // through its command-line options: -// -c NAME=VALUE (commonly used throughout documentation around PGOPTIONS) -// -cNAME=VALUE -// --NAME=VALUE +// +// -c NAME=VALUE (commonly used throughout documentation around PGOPTIONS) +// -cNAME=VALUE +// --NAME=VALUE // // Note that this parsing approach is not perfect as it allows a negative case // like options="-c --cluster=happy-koala -c -c -c" to go through. To properly diff --git a/pkg/ccl/storageccl/encryption_test.go b/pkg/ccl/storageccl/encryption_test.go index d3505f6c9007..e8e5d233b432 100644 --- a/pkg/ccl/storageccl/encryption_test.go +++ b/pkg/ccl/storageccl/encryption_test.go @@ -13,7 +13,6 @@ import ( "context" "fmt" "io" - "io/ioutil" "testing" "github.com/cockroachdb/cockroach/pkg/util/humanizeutil" @@ -271,7 +270,7 @@ func BenchmarkEncryption(b *testing.B) { if err != nil { b.Fatal(err) } - _, err = io.Copy(ioutil.Discard, r.(io.Reader)) + _, err = io.Copy(io.Discard, r.(io.Reader)) if err != nil { b.Fatal(err) } diff --git a/pkg/ccl/storageccl/external_sst_reader_test.go b/pkg/ccl/storageccl/external_sst_reader_test.go index 80210375f1cd..23fd41c74246 100644 --- a/pkg/ccl/storageccl/external_sst_reader_test.go +++ b/pkg/ccl/storageccl/external_sst_reader_test.go @@ -96,7 +96,6 @@ func TestSSTReaderCache(t *testing.T) { // t2 a50--------------a1000 // // t1 a0----a100 -// func TestNewExternalSSTReader(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) diff --git a/pkg/ccl/streamingccl/streamclient/client.go b/pkg/ccl/streamingccl/streamclient/client.go index e85a76bd1e51..4953065d1843 100644 --- a/pkg/ccl/streamingccl/streamclient/client.go +++ b/pkg/ccl/streamingccl/streamclient/client.go @@ -42,7 +42,8 @@ type CheckpointToken []byte // Client provides a way for the stream ingestion job to consume a // specified stream. // TODO(57427): The stream client does not yet support the concept of -// generations in a stream. +// +// generations in a stream. type Client interface { // Create initializes a stream with the source, potentially reserving any // required resources, such as protected timestamps, and returns an ID which diff --git a/pkg/cli/clienturl/doc.go b/pkg/cli/clienturl/doc.go index 1496cc118b5f..0231ca211f70 100644 --- a/pkg/cli/clienturl/doc.go +++ b/pkg/cli/clienturl/doc.go @@ -10,8 +10,8 @@ // Package clienturl provides glue between: // -// - security/clientconnurl, which is able to translate configuration -// parameters for SQL clients and compute security parameters, +// - security/clientconnurl, which is able to translate configuration +// parameters for SQL clients and compute security parameters, // // - cli/clisqlcfg, which serves as input for a SQL shell. // diff --git a/pkg/cli/clierror/syntax_error_test.go b/pkg/cli/clierror/syntax_error_test.go index 3d321c95edb0..a2c6b1ca26ac 100644 --- a/pkg/cli/clierror/syntax_error_test.go +++ b/pkg/cli/clierror/syntax_error_test.go @@ -12,7 +12,7 @@ package clierror_test import ( "context" - "io/ioutil" + "io" "net/url" "testing" @@ -38,7 +38,7 @@ func TestIsSQLSyntaxError(t *testing.T) { defer cleanup() var sqlConnCtx clisqlclient.Context - conn := sqlConnCtx.MakeSQLConn(ioutil.Discard, ioutil.Discard, url.String()) + conn := sqlConnCtx.MakeSQLConn(io.Discard, io.Discard, url.String()) defer func() { if err := conn.Close(); err != nil { t.Fatal(err) diff --git a/pkg/cli/clierrorplus/decorate_error.go b/pkg/cli/clierrorplus/decorate_error.go index 00e7d44f7759..8b2f380b56db 100644 --- a/pkg/cli/clierrorplus/decorate_error.go +++ b/pkg/cli/clierrorplus/decorate_error.go @@ -36,9 +36,13 @@ import ( // to the details of a GRPC connection failure. // // On *nix, a connect error looks like: -// dial tcp : : connection refused +// +// dial tcp : : connection refused +// // On Windows, it looks like: -// dial tcp : : No connection could be made because the target machine actively refused it. +// +// dial tcp : : No connection could be made because the target machine actively refused it. +// // So we look for the common bit. var reGRPCConnRefused = regexp.MustCompile(`Error while dialing dial tcp .*: connection.* refused`) diff --git a/pkg/cli/clisqlcfg/doc.go b/pkg/cli/clisqlcfg/doc.go index 3034517f5c40..984ed0ec4202 100644 --- a/pkg/cli/clisqlcfg/doc.go +++ b/pkg/cli/clisqlcfg/doc.go @@ -17,13 +17,12 @@ // // 2. load customizations from e.g. command-line flags, env vars, etc. // -// 3. validate the configuration and open the input/output streams via -// `(*Context).Open()`. Defer a call to the returned cleanup function. +// 3. validate the configuration and open the input/output streams via +// `(*Context).Open()`. Defer a call to the returned cleanup function. // -// 4. open a client connection via `(*Context).MakeConn()`. -// Note: this must occur after the call to `Open()`, as the configuration -// may not be ready before that point. +// 4. open a client connection via `(*Context).MakeConn()`. +// Note: this must occur after the call to `Open()`, as the configuration +// may not be ready before that point. // // 5. call `(*Context).Run()`. -// package clisqlcfg diff --git a/pkg/cli/clisqlclient/conn_test.go b/pkg/cli/clisqlclient/conn_test.go index 73bf2e9a9dc8..5419b3bb4da7 100644 --- a/pkg/cli/clisqlclient/conn_test.go +++ b/pkg/cli/clisqlclient/conn_test.go @@ -12,7 +12,7 @@ package clisqlclient_test import ( "context" - "io/ioutil" + "io" "net/url" "testing" @@ -27,7 +27,7 @@ import ( func makeSQLConn(url string) clisqlclient.Conn { var sqlConnCtx clisqlclient.Context - return sqlConnCtx.MakeSQLConn(ioutil.Discard, ioutil.Discard, url) + return sqlConnCtx.MakeSQLConn(io.Discard, io.Discard, url) } func TestConnRecover(t *testing.T) { diff --git a/pkg/cli/clisqlexec/format_html_test.go b/pkg/cli/clisqlexec/format_html_test.go index d76709ea471c..1ef690f2831e 100644 --- a/pkg/cli/clisqlexec/format_html_test.go +++ b/pkg/cli/clisqlexec/format_html_test.go @@ -13,7 +13,7 @@ package clisqlexec import ( "bytes" "fmt" - "io/ioutil" + "io" "testing" "github.com/cockroachdb/cockroach/pkg/util/leaktest" @@ -87,7 +87,7 @@ func TestRenderHTML(t *testing.T) { name := fmt.Sprintf("escape=%v/rowStats=%v", tc.reporter.escape, tc.reporter.rowStats) t.Run(name, func(t *testing.T) { var buf bytes.Buffer - err := render(&tc.reporter, &buf, ioutil.Discard, + err := render(&tc.reporter, &buf, io.Discard, cols, NewRowSliceIter(rows, align), nil /* completedHook */, nil /* noRowsHook */) if err != nil { diff --git a/pkg/cli/clisqlexec/format_table.go b/pkg/cli/clisqlexec/format_table.go index 75bb88207ff8..a311dbfac117 100644 --- a/pkg/cli/clisqlexec/format_table.go +++ b/pkg/cli/clisqlexec/format_table.go @@ -149,14 +149,14 @@ func newRowIter(rows clisqlclient.Rows, showMoreChars bool) *rowIter { } // rowReporter is used to render result sets. -// - describe is called once in any case with the result column set. -// - beforeFirstRow is called once upon the first row encountered. -// - iter is called for every row, including the first (called after beforeFirstRowFn). -// - doneRows is called once after the last row encountered (in case of no error). -// This can also be called when there were no rows, if the rowsAffectedHook -// passed to render() returns false. -// - doneNoRows is called once when there were no rows and the rowsAffectedHook -// returns true. +// - describe is called once in any case with the result column set. +// - beforeFirstRow is called once upon the first row encountered. +// - iter is called for every row, including the first (called after beforeFirstRowFn). +// - doneRows is called once after the last row encountered (in case of no error). +// This can also be called when there were no rows, if the rowsAffectedHook +// passed to render() returns false. +// - doneNoRows is called once when there were no rows and the rowsAffectedHook +// returns true. type rowReporter interface { describe(w io.Writer, cols []string) error beforeFirstRow(w io.Writer, allRows RowStrIter) error diff --git a/pkg/cli/clisqlexec/run_query_test.go b/pkg/cli/clisqlexec/run_query_test.go index ceff3ee41f54..1343310ab951 100644 --- a/pkg/cli/clisqlexec/run_query_test.go +++ b/pkg/cli/clisqlexec/run_query_test.go @@ -14,7 +14,6 @@ import ( "bytes" "context" "io" - "io/ioutil" "net/url" "reflect" "testing" @@ -33,7 +32,7 @@ var testExecCtx = clisqlexec.Context{ func makeSQLConn(url string) clisqlclient.Conn { var sqlConnCtx clisqlclient.Context - return sqlConnCtx.MakeSQLConn(ioutil.Discard, ioutil.Discard, url) + return sqlConnCtx.MakeSQLConn(io.Discard, io.Discard, url) } func runQueryAndFormatResults( @@ -41,7 +40,7 @@ func runQueryAndFormatResults( ) (err error) { return testExecCtx.RunQueryAndFormatResults( context.Background(), - conn, w, ioutil.Discard, ioutil.Discard, fn) + conn, w, io.Discard, io.Discard, fn) } func TestRunQuery(t *testing.T) { diff --git a/pkg/cli/clisqlshell/sql_test.go b/pkg/cli/clisqlshell/sql_test.go index 6eb8d3c5a9e1..0b2d046085bc 100644 --- a/pkg/cli/clisqlshell/sql_test.go +++ b/pkg/cli/clisqlshell/sql_test.go @@ -12,7 +12,7 @@ package clisqlshell_test import ( "fmt" - "io/ioutil" + "io" "os" "strings" "testing" @@ -371,7 +371,7 @@ func Example_sql_lex() { defer c.Cleanup() var sqlConnCtx clisqlclient.Context - conn := sqlConnCtx.MakeSQLConn(ioutil.Discard, ioutil.Discard, + conn := sqlConnCtx.MakeSQLConn(io.Discard, io.Discard, fmt.Sprintf("postgres://%s@%s/?sslmode=disable", username.RootUser, c.ServingSQLAddr())) defer func() { diff --git a/pkg/cli/debug_recover_loss_of_quorum_test.go b/pkg/cli/debug_recover_loss_of_quorum_test.go index 0dc37b62fc4a..ae2981c72c67 100644 --- a/pkg/cli/debug_recover_loss_of_quorum_test.go +++ b/pkg/cli/debug_recover_loss_of_quorum_test.go @@ -38,9 +38,10 @@ import ( // This is done by running three node cluster with disk backed storage, // stopping it and verifying content of collected replica info file. // This check verifies that: -// we successfully iterate requested stores, -// data is written in expected location, -// data contains info only about stores requested. +// +// we successfully iterate requested stores, +// data is written in expected location, +// data contains info only about stores requested. func TestCollectInfoFromMultipleStores(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) diff --git a/pkg/cli/democluster/demo_cluster_test.go b/pkg/cli/democluster/demo_cluster_test.go index 6a1ddc38973e..f1115f00a836 100644 --- a/pkg/cli/democluster/demo_cluster_test.go +++ b/pkg/cli/democluster/demo_cluster_test.go @@ -13,7 +13,7 @@ package democluster import ( "context" "fmt" - "io/ioutil" + "io" "testing" "time" @@ -218,7 +218,7 @@ func TestTransientClusterSimulateLatencies(t *testing.T) { true /* includeAppName */, false /* isTenant */) require.NoError(t, err) sqlConnCtx := clisqlclient.Context{} - conn := sqlConnCtx.MakeSQLConn(ioutil.Discard, ioutil.Discard, url.ToPQ().String()) + conn := sqlConnCtx.MakeSQLConn(io.Discard, io.Discard, url.ToPQ().String()) defer func() { if err := conn.Close(); err != nil { t.Fatal(err) @@ -312,7 +312,7 @@ func TestTransientClusterMultitenant(t *testing.T) { true /* includeAppName */, true /* isTenant */) require.NoError(t, err) sqlConnCtx := clisqlclient.Context{} - conn := sqlConnCtx.MakeSQLConn(ioutil.Discard, ioutil.Discard, url.ToPQ().String()) + conn := sqlConnCtx.MakeSQLConn(io.Discard, io.Discard, url.ToPQ().String()) defer func() { if err := conn.Close(); err != nil { t.Fatal(err) diff --git a/pkg/cli/exit/doc.go b/pkg/cli/exit/doc.go index 14ebd56500fb..c76a1128796e 100644 --- a/pkg/cli/exit/doc.go +++ b/pkg/cli/exit/doc.go @@ -39,11 +39,10 @@ // // This package accommodates this as follows: // -// - exit codes common to all commands should be allocated -// incrementally starting from the last defined common error -// in codes.go. -// -// - exit codes specific to one command should be allocated downwards -// starting from 125. +// - exit codes common to all commands should be allocated +// incrementally starting from the last defined common error +// in codes.go. // +// - exit codes specific to one command should be allocated downwards +// starting from 125. package exit diff --git a/pkg/cli/flags.go b/pkg/cli/flags.go index 52b6b46500df..ba5aeb5b6df7 100644 --- a/pkg/cli/flags.go +++ b/pkg/cli/flags.go @@ -47,11 +47,11 @@ import ( // structs. // // Corollaries: -// - it would be a programming error to access these variables directly -// outside of this file (flags.go) -// - the underlying context parameters must receive defaults in -// initCLIDefaults() even when they are otherwise overridden by the -// flags logic, because some tests to not use the flag logic at all. +// - it would be a programming error to access these variables directly +// outside of this file (flags.go) +// - the underlying context parameters must receive defaults in +// initCLIDefaults() even when they are otherwise overridden by the +// flags logic, because some tests to not use the flag logic at all. var serverListenPort, serverSocketDir string var serverAdvertiseAddr, serverAdvertisePort string var serverSQLAddr, serverSQLPort string diff --git a/pkg/cli/haproxy.go b/pkg/cli/haproxy.go index 9088e1f61ddc..44d79eeeb4e7 100644 --- a/pkg/cli/haproxy.go +++ b/pkg/cli/haproxy.go @@ -15,7 +15,6 @@ import ( "fmt" "html/template" "io" - "io/ioutil" "os" "regexp" "sort" @@ -80,7 +79,7 @@ func nodeStatusesToNodeInfos(nodes *serverpb.NodesResponse) []haProxyNodeInfo { fs.Var(aliasStrVar{&httpPort}, cliflags.ListenHTTPPort.Name, "" /* usage */) // Discard parsing output. - fs.SetOutput(ioutil.Discard) + fs.SetOutput(io.Discard) nodeInfos := make([]haProxyNodeInfo, 0, len(nodes.Nodes)) diff --git a/pkg/cli/prefixer.go b/pkg/cli/prefixer.go index 4f05369de50e..4e074fd7a46c 100644 --- a/pkg/cli/prefixer.go +++ b/pkg/cli/prefixer.go @@ -44,7 +44,6 @@ type filePrefixer struct { // // Use FilePrefixerOptions to override default token // delimiters and template. -// func newFilePrefixer(opts ...filePrefixerOption) filePrefixer { options := defaultFilePrefixerOptions for _, o := range opts { @@ -64,19 +63,20 @@ func newFilePrefixer(opts ...filePrefixerOption) filePrefixer { // template is provided, template defaults to "${host} > ". // // example file paths: -// testdata/merge_logs_v2/nodes/1/cockroach.test-0001.ubuntu.2018-11-30T22_06_47Z.003959.log -// testdata/merge_logs_v2/nodes/2/cockroach.test-0001.ubuntu.2018-11-30T22_06_47Z.003959.log -// testdata/merge_logs_v2/nodes/3/cockroach.test-0001.ubuntu.2018-11-30T22_06_47Z.003959.log +// +// testdata/merge_logs_v2/nodes/1/cockroach.test-0001.ubuntu.2018-11-30T22_06_47Z.003959.log +// testdata/merge_logs_v2/nodes/2/cockroach.test-0001.ubuntu.2018-11-30T22_06_47Z.003959.log +// testdata/merge_logs_v2/nodes/3/cockroach.test-0001.ubuntu.2018-11-30T22_06_47Z.003959.log // // prefix provided: (${fpath}) ${host}> // // produces: -// (1) test-0001> -// (2) test-0001> -// (3) test-0001> // -// See [debug_merge_logs_test.go, prefixer_test.go] for additional examples. +// (1) test-0001> +// (2) test-0001> +// (3) test-0001> // +// See [debug_merge_logs_test.go, prefixer_test.go] for additional examples. func (f filePrefixer) PopulatePrefixes(logFiles []fileInfo) { tPaths := make([][]string, len(logFiles)) diff --git a/pkg/cli/userfiletable_test.go b/pkg/cli/userfiletable_test.go index 297bf3c082d4..4a030bddd2c7 100644 --- a/pkg/cli/userfiletable_test.go +++ b/pkg/cli/userfiletable_test.go @@ -13,7 +13,7 @@ import ( "bytes" "context" "fmt" - "io/ioutil" + "io" "net/url" "os" "path/filepath" @@ -788,7 +788,7 @@ func TestUsernameUserfileInteraction(t *testing.T) { url.User(username.RootUser)) defer cleanup() - conn := sqlConnCtx.MakeSQLConn(ioutil.Discard, ioutil.Discard, rootURL.String()) + conn := sqlConnCtx.MakeSQLConn(io.Discard, io.Discard, rootURL.String()) defer func() { if err := conn.Close(); err != nil { t.Fatal(err) diff --git a/pkg/cli/zip.go b/pkg/cli/zip.go index 393803fb6b91..871269ca3fc2 100644 --- a/pkg/cli/zip.go +++ b/pkg/cli/zip.go @@ -13,7 +13,7 @@ package cli import ( "context" "fmt" - "io/ioutil" + "io" "net" "os" "strings" @@ -357,7 +357,7 @@ func (zc *debugZipContext) dumpTableDataForZip( } // Pump the SQL rows directly into the zip writer, to avoid // in-RAM buffering. - return sqlExecCtx.RunQueryAndFormatResults(ctx, conn, w, ioutil.Discard, stderr, clisqlclient.MakeQuery(query)) + return sqlExecCtx.RunQueryAndFormatResults(ctx, conn, w, io.Discard, stderr, clisqlclient.MakeQuery(query)) }() if sqlErr != nil { if cErr := zc.z.createError(s, name, sqlErr); cErr != nil { diff --git a/pkg/cli/zip_helpers.go b/pkg/cli/zip_helpers.go index 004b8217cfdb..5a0d6dc917d6 100644 --- a/pkg/cli/zip_helpers.go +++ b/pkg/cli/zip_helpers.go @@ -501,9 +501,10 @@ func (z *zipReporter) result(err error) error { // timestampValue is a wrapper around time.Time which supports the // pflag.Value interface and can be initialized from a command line flag. // It recognizes the following input formats: -// YYYY-MM-DD -// YYYY-MM-DD HH:MM -// YYYY-MM-DD HH:MM:SS +// +// YYYY-MM-DD +// YYYY-MM-DD HH:MM +// YYYY-MM-DD HH:MM:SS type timestampValue time.Time // Type implements the pflag.Value interface. diff --git a/pkg/cli/zip_test.go b/pkg/cli/zip_test.go index 304a26722d50..f41deded47e6 100644 --- a/pkg/cli/zip_test.go +++ b/pkg/cli/zip_test.go @@ -16,7 +16,7 @@ import ( "context" enc_hex "encoding/hex" "fmt" - "io/ioutil" + "io" "net/url" "os" "path/filepath" @@ -493,7 +493,7 @@ func TestZipRetries(t *testing.T) { Host: s.ServingSQLAddr(), RawQuery: "sslmode=disable", } - sqlConn := sqlConnCtx.MakeSQLConn(ioutil.Discard, ioutil.Discard, sqlURL.String()) + sqlConn := sqlConnCtx.MakeSQLConn(io.Discard, io.Discard, sqlURL.String()) defer func() { if err := sqlConn.Close(); err != nil { t.Fatal(err) diff --git a/pkg/cloud/cloud_io.go b/pkg/cloud/cloud_io.go index e0c021421c5f..e40970dec4c4 100644 --- a/pkg/cloud/cloud_io.go +++ b/pkg/cloud/cloud_io.go @@ -137,13 +137,14 @@ func DelayedRetry( // We can attempt to resume download if the error is ErrUnexpectedEOF. // In particular, we should not worry about a case when error is io.EOF. // The reason for this is two-fold: -// 1. The underlying http library converts io.EOF to io.ErrUnexpectedEOF -// if the number of bytes transferred is less than the number of -// bytes advertised in the Content-Length header. So if we see -// io.ErrUnexpectedEOF we can simply request the next range. -// 2. If the server did *not* advertise Content-Length, then -// there is really nothing we can do: http standard says that -// the stream ends when the server terminates connection. +// 1. The underlying http library converts io.EOF to io.ErrUnexpectedEOF +// if the number of bytes transferred is less than the number of +// bytes advertised in the Content-Length header. So if we see +// io.ErrUnexpectedEOF we can simply request the next range. +// 2. If the server did *not* advertise Content-Length, then +// there is really nothing we can do: http standard says that +// the stream ends when the server terminates connection. +// // In addition, we treat connection reset by peer errors (which can // happen if we didn't read from the connection too long due to e.g. load), // the same as unexpected eof errors. diff --git a/pkg/clusterversion/clusterversion.go b/pkg/clusterversion/clusterversion.go index a6c0ab4aa22b..929d63480ebe 100644 --- a/pkg/clusterversion/clusterversion.go +++ b/pkg/clusterversion/clusterversion.go @@ -20,23 +20,23 @@ // it. This package provides a way to do this safely with (hopefully) minimal // disruption. It works as follows: // -// - Each node in the cluster is running a binary that was released at some -// version ("binary version"). We allow for rolling upgrades, so two nodes in -// the cluster may be running different binary versions. All nodes in a given -// cluster must be within 1 major release of each other (i.e. to upgrade two -// major releases, the cluster must first be rolled onto X+1 and then to X+2). -// - Separate from the build versions of the binaries, the cluster itself has a -// logical "active cluster version", the version all the binaries are -// currently operating at. This is used for two related things: first as a -// promise from the user that they'll never downgrade any nodes in the cluster -// to a binary below some "minimum supported version", and second, to unlock -// features that are not backwards compatible (which is now safe given that -// the old binary will never be used). -// - Each binary can operate within a "range of supported versions". When a -// cluster is initialized, the binary doing the initialization uses the upper -// end of its supported range as the initial "active cluster version". Each -// node that joins this cluster then must be compatible with this cluster -// version. +// - Each node in the cluster is running a binary that was released at some +// version ("binary version"). We allow for rolling upgrades, so two nodes in +// the cluster may be running different binary versions. All nodes in a given +// cluster must be within 1 major release of each other (i.e. to upgrade two +// major releases, the cluster must first be rolled onto X+1 and then to X+2). +// - Separate from the build versions of the binaries, the cluster itself has a +// logical "active cluster version", the version all the binaries are +// currently operating at. This is used for two related things: first as a +// promise from the user that they'll never downgrade any nodes in the cluster +// to a binary below some "minimum supported version", and second, to unlock +// features that are not backwards compatible (which is now safe given that +// the old binary will never be used). +// - Each binary can operate within a "range of supported versions". When a +// cluster is initialized, the binary doing the initialization uses the upper +// end of its supported range as the initial "active cluster version". Each +// node that joins this cluster then must be compatible with this cluster +// version. package clusterversion import ( diff --git a/pkg/clusterversion/cockroach_versions.go b/pkg/clusterversion/cockroach_versions.go index a8b4440acf6d..0482d97b874a 100644 --- a/pkg/clusterversion/cockroach_versions.go +++ b/pkg/clusterversion/cockroach_versions.go @@ -19,97 +19,100 @@ type Key int // migrations. Before you add a version or consider removing one, please // familiarize yourself with the rules below. // -// Adding Versions +// # Adding Versions // // You'll want to add a new one in the following cases: // // (a) When introducing a backwards incompatible feature. Broadly, by this we -// mean code that's structured as follows: // -// if (specific-version is active) { -// // Implies that all nodes in the cluster are running binaries that -// // have this code. We can "enable" the new feature knowing that -// // outbound RPCs, requests, etc. will be handled by nodes that know -// // how to do so. -// } else { -// // There may be some nodes running older binaries without this code. -// // To be safe, we'll want to behave as we did before introducing -// // this feature. -// } +// mean code that's structured as follows: // -// Authors of migrations need to be careful in ensuring that end-users -// aren't able to enable feature gates before they're active. This is fine: +// if (specific-version is active) { +// // Implies that all nodes in the cluster are running binaries that +// // have this code. We can "enable" the new feature knowing that +// // outbound RPCs, requests, etc. will be handled by nodes that know +// // how to do so. +// } else { +// // There may be some nodes running older binaries without this code. +// // To be safe, we'll want to behave as we did before introducing +// // this feature. +// } // -// func handleSomeNewStatement() error { -// if !(specific-version is active) { -// return errors.New("cluster version needs to be bumped") -// } -// // ... -// } +// Authors of migrations need to be careful in ensuring that end-users +// aren't able to enable feature gates before they're active. This is fine: // -// At the same time, with requests/RPCs originating at other crdb nodes, the -// initiator of the request gets to decide what's supported. A node should -// not refuse functionality on the grounds that its view of the version gate -// is as yet inactive. Consider the sender: +// func handleSomeNewStatement() error { +// if !(specific-version is active) { +// return errors.New("cluster version needs to be bumped") +// } +// // ... +// } // -// func invokeSomeRPC(req) { -// if (specific-version is active) { -// // Like mentioned above, this implies that all nodes in the -// // cluster are running binaries that can handle this new -// // feature. We may have learned about this fact before the -// // node on the other end. This is due to the fact that migration -// // manager informs each node about the specific-version being -// // activated active concurrently. See BumpClusterVersion for -// // where that happens. Still, it's safe for us to enable the new -// // feature flags as we trust the recipient to know how to deal -// // with it. -// req.NewFeatureFlag = true -// } -// send(req) -// } +// At the same time, with requests/RPCs originating at other crdb nodes, the +// initiator of the request gets to decide what's supported. A node should +// not refuse functionality on the grounds that its view of the version gate +// is as yet inactive. Consider the sender: // -// And consider the recipient: +// func invokeSomeRPC(req) { +// if (specific-version is active) { +// // Like mentioned above, this implies that all nodes in the +// // cluster are running binaries that can handle this new +// // feature. We may have learned about this fact before the +// // node on the other end. This is due to the fact that migration +// // manager informs each node about the specific-version being +// // activated active concurrently. See BumpClusterVersion for +// // where that happens. Still, it's safe for us to enable the new +// // feature flags as we trust the recipient to know how to deal +// // with it. +// req.NewFeatureFlag = true +// } +// send(req) +// } // -// func someRPC(req) { -// if !req.NewFeatureFlag { -// // Legacy behavior... -// } -// // There's no need to even check if the specific-version is active. -// // If the flag is enabled, the specific-version must have been -// // activated, even if we haven't yet heard about it (we will pretty -// // soon). -// } +// And consider the recipient: // -// See clusterversion.Handle.IsActive and usage of some existing versions -// below for more clues on the matter. +// func someRPC(req) { +// if !req.NewFeatureFlag { +// // Legacy behavior... +// } +// // There's no need to even check if the specific-version is active. +// // If the flag is enabled, the specific-version must have been +// // activated, even if we haven't yet heard about it (we will pretty +// // soon). +// } +// +// See clusterversion.Handle.IsActive and usage of some existing versions +// below for more clues on the matter. // // (b) When cutting a major release branch. When cutting release-20.2 for -// example, you'll want to introduce the following to `master`. // -// (i) V20_2 (keyed to v20.2.0-0}) -// (ii) Start21_1 (keyed to v20.2.0-1}) +// example, you'll want to introduce the following to `master`. +// +// (i) V20_2 (keyed to v20.2.0-0}) +// (ii) Start21_1 (keyed to v20.2.0-1}) // -// You'll then want to backport (i) to the release branch itself (i.e. -// release-20.2). You'll also want to bump binaryMinSupportedVersion. In the -// example above, you'll set it to V20_2. This indicates that the -// minimum binary version required in a cluster with nodes running -// v21.1 binaries (including pre-release alphas) is v20.2, i.e. that an -// upgrade into such a binary must start out from at least v20.2 nodes. +// You'll then want to backport (i) to the release branch itself (i.e. +// release-20.2). You'll also want to bump binaryMinSupportedVersion. In the +// example above, you'll set it to V20_2. This indicates that the +// minimum binary version required in a cluster with nodes running +// v21.1 binaries (including pre-release alphas) is v20.2, i.e. that an +// upgrade into such a binary must start out from at least v20.2 nodes. // -// Aside: At the time of writing, the binary min supported version is the -// last major release, though we may consider relaxing this in the future -// (i.e. for example could skip up to one major release) as we move to a more -// frequent release schedule. +// Aside: At the time of writing, the binary min supported version is the +// last major release, though we may consider relaxing this in the future +// (i.e. for example could skip up to one major release) as we move to a more +// frequent release schedule. // // When introducing a version constant, you'll want to: -// (1) Add it at the end of this block. For versions introduced during and -// after the 21.1 release, Internal versions must be even-numbered. The -// odd versions are used for internal book-keeping. The Internal version -// should be the previous Internal version for the same minor release plus -// two. -// (2) Add it at the end of the `versionsSingleton` block below. // -// Migrations +// (1) Add it at the end of this block. For versions introduced during and +// after the 21.1 release, Internal versions must be even-numbered. The +// odd versions are used for internal book-keeping. The Internal version +// should be the previous Internal version for the same minor release plus +// two. +// (2) Add it at the end of the `versionsSingleton` block below. +// +// # Migrations // // Migrations are idempotent functions that can be attached to versions and will // be rolled out before the respective cluster version gets rolled out. They are @@ -119,7 +122,7 @@ type Key int // their own documentation in ./pkg/upgrade, which you should peruse should you // feel that a migration is necessary for your use case. // -// Phasing out Versions and Migrations +// # Phasing out Versions and Migrations // // Versions and Migrations can be removed once they are no longer going to be // exercised. This is primarily driven by the BinaryMinSupportedVersion, which diff --git a/pkg/cmd/bazci/main.go b/pkg/cmd/bazci/main.go index 31c6eafaf869..3b17dbfa6864 100644 --- a/pkg/cmd/bazci/main.go +++ b/pkg/cmd/bazci/main.go @@ -11,8 +11,8 @@ // bazci is glue code to make debugging Bazel builds and tests in Teamcity as // painless as possible. // -// bazci [build|test] \ -// --artifacts_dir=$ARTIFACTS_DIR targets... -- [command-line options] +// bazci [build|test] \ +// --artifacts_dir=$ARTIFACTS_DIR targets... -- [command-line options] // // bazci will invoke a `bazel build` or `bazel test` of all the given targets // and stage the resultant build/test artifacts in the given `artifacts_dir`. diff --git a/pkg/cmd/dev/datadriven_test.go b/pkg/cmd/dev/datadriven_test.go index eb63753b7e5e..49573f73dc6f 100644 --- a/pkg/cmd/dev/datadriven_test.go +++ b/pkg/cmd/dev/datadriven_test.go @@ -14,7 +14,6 @@ import ( "bytes" "fmt" "io" - "io/ioutil" "log" "testing" @@ -39,8 +38,8 @@ const ( // DataDriven divvies up these files as subtests, so individual "files" are // runnable through: // -// dev test pkg/cmd/dev -f TestDataDriven/ [--rewrite] -// OR go test ./pkg/cmd/dev -run TestDataDriven/ [-rewrite] +// dev test pkg/cmd/dev -f TestDataDriven/ [--rewrite] +// OR go test ./pkg/cmd/dev -run TestDataDriven/ [-rewrite] // // NB: See commentary on TestRecorderDriven to see how they compare. // TestDataDriven is well suited for exercising flows that don't depend on @@ -66,7 +65,7 @@ func TestDataDriven(t *testing.T) { } if !verbose { // suppress all internal output unless told otherwise - execOpts = append(execOpts, exec.WithStdOutErr(ioutil.Discard, ioutil.Discard)) + execOpts = append(execOpts, exec.WithStdOutErr(io.Discard, io.Discard)) } devExec := exec.New(execOpts...) @@ -88,8 +87,8 @@ func TestDataDriven(t *testing.T) { dev.log = log.New(logger, "", 0) if !verbose { - dev.cli.SetErr(ioutil.Discard) - dev.cli.SetOut(ioutil.Discard) + dev.cli.SetErr(io.Discard) + dev.cli.SetOut(io.Discard) } require.Equalf(t, d.Cmd, "exec", "unknown command: %s", d.Cmd) diff --git a/pkg/cmd/dev/dev.go b/pkg/cmd/dev/dev.go index 813782eaed7b..bff23e415c78 100644 --- a/pkg/cmd/dev/dev.go +++ b/pkg/cmd/dev/dev.go @@ -11,7 +11,7 @@ package main import ( - "io/ioutil" + "io" "log" stdos "os" @@ -35,7 +35,7 @@ type dev struct { func makeDevCmd() *dev { var ret dev - ret.log = log.New(ioutil.Discard, "DEBUG: ", 0) // used for debug logging (see --debug) + ret.log = log.New(io.Discard, "DEBUG: ", 0) // used for debug logging (see --debug) ret.exec = exec.New(exec.WithLogger(ret.log)) ret.os = os.New(os.WithLogger(ret.log)) diff --git a/pkg/cmd/dev/recorderdriven_test.go b/pkg/cmd/dev/recorderdriven_test.go index 3dd5b70ae5de..20291b19ef76 100644 --- a/pkg/cmd/dev/recorderdriven_test.go +++ b/pkg/cmd/dev/recorderdriven_test.go @@ -15,7 +15,6 @@ import ( "flag" "fmt" "io" - "io/ioutil" "log" stdos "os" stdexec "os/exec" @@ -40,8 +39,8 @@ import ( // DataDriven divvies up these files as subtests, so individual "files" are // runnable through: // -// dev test pkg/cmd/dev -f TestRecorderDriven/ -// OR go test ./pkg/cmd/dev -run TestRecorderDriven/ +// dev test pkg/cmd/dev -f TestRecorderDriven/ +// OR go test ./pkg/cmd/dev -run TestRecorderDriven/ // // Recordings are used to mock out "system" behavior. When --rewrite is // specified, attempts to shell out to bazel or perform other OS operations @@ -49,7 +48,7 @@ import ( // responses are recorded for future playback. To update the test files with new // capture data, try: // -// go test ./pkg/cmd/dev -run TestRecorderDriven/ -rewrite +// go test ./pkg/cmd/dev -run TestRecorderDriven/ -rewrite // // NB: This test is worth contrasting to TestDataDriven, where all operations // are run in "dry-run" mode when --rewrite is specified. Here we'll actually @@ -69,7 +68,6 @@ import ( // bazel rules, we should re-evaluate whether this harness provides much value. // Probably dev commands that require writing a TestRecorderDriven test is worth // re-writing. -// func TestRecorderDriven(t *testing.T) { rewriting := false if f := flag.Lookup("rewrite"); f != nil && f.Value.String() == "true" { @@ -98,7 +96,7 @@ func TestRecorderDriven(t *testing.T) { if !verbose { // Suppress all internal output unless told otherwise. - execOpts = append(execOpts, exec.WithStdOutErr(ioutil.Discard, ioutil.Discard)) + execOpts = append(execOpts, exec.WithStdOutErr(io.Discard, io.Discard)) } if rewriting { @@ -140,8 +138,8 @@ func TestRecorderDriven(t *testing.T) { dev.knobs.devBinOverride = "dev" if !verbose { - dev.cli.SetErr(ioutil.Discard) - dev.cli.SetOut(ioutil.Discard) + dev.cli.SetErr(io.Discard) + dev.cli.SetOut(io.Discard) } require.Equalf(t, d.Cmd, "dev", "unknown command: %s", d.Cmd) diff --git a/pkg/cmd/dev/ui.go b/pkg/cmd/dev/ui.go index 4d476011121f..dfc14533c36f 100644 --- a/pkg/cmd/dev/ui.go +++ b/pkg/cmd/dev/ui.go @@ -605,8 +605,8 @@ launching test in a real browser. Extra flags are passed directly to the // buildBazelYarnArgv returns the provided argv formatted so it can be run with // the bazel-provided version of yarn via `d.exec.CommandContextWithEnv`, e.g.: // -// argv := buildBazelYarnArgv("--cwd", "/path/to/dir", "run", "some-target") -// d.exec.CommandContextWithEnv(ctx, env, "bazel", argv) +// argv := buildBazelYarnArgv("--cwd", "/path/to/dir", "run", "some-target") +// d.exec.CommandContextWithEnv(ctx, env, "bazel", argv) func buildBazelYarnArgv(argv ...string) []string { return append([]string{ "run", "@yarn//:yarn", "--", diff --git a/pkg/cmd/docgen/diagrams.go b/pkg/cmd/docgen/diagrams.go index 82b42685be79..7aaf77389b2e 100644 --- a/pkg/cmd/docgen/diagrams.go +++ b/pkg/cmd/docgen/diagrams.go @@ -591,16 +591,16 @@ var specs = []stmtSpec{ }, }, { - name: "check_column_level", - stmt: "stmt_block", + name: "check_column_level", + stmt: "stmt_block", replace: map[string]string{" stmt": " 'CREATE' 'TABLE' table_name '(' column_name column_type 'CHECK' '(' check_expr ')' ( column_constraints | ) ( ',' ( column_table_def ( ',' column_table_def )* ) | ) ( table_constraints | ) ')' ')'"}, - unlink: []string{"table_name", "column_name", "column_type", "check_expr", "column_constraints", "table_constraints"}, + unlink: []string{"table_name", "column_name", "column_type", "check_expr", "column_constraints", "table_constraints"}, }, { - name: "check_table_level", - stmt: "stmt_block", + name: "check_table_level", + stmt: "stmt_block", replace: map[string]string{" stmt": " 'CREATE' 'TABLE' table_name '(' ( column_table_def ( ',' column_table_def )* ) ( 'CONSTRAINT' constraint_name | ) 'CHECK' '(' check_expr ')' ( table_constraints | ) ')'"}, - unlink: []string{"table_name", "check_expr", "table_constraints"}, + unlink: []string{"table_name", "check_expr", "table_constraints"}, }, { name: "column_table_def", @@ -944,16 +944,16 @@ var specs = []stmtSpec{ inline: []string{"privileges", "opt_privileges_clause", "opt_with_grant_option"}, }, { - name: "foreign_key_column_level", - stmt: "stmt_block", + name: "foreign_key_column_level", + stmt: "stmt_block", replace: map[string]string{" stmt": " 'CREATE' 'TABLE' table_name '(' column_name column_type 'REFERENCES' parent_table ( '(' ref_column_name ')' | ) ( column_constraints | ) ( ',' ( column_table_def ( ',' column_table_def )* ) | ) ( table_constraints | ) ')' ')'"}, - unlink: []string{"table_name", "column_name", "column_type", "parent_table", "table_constraints"}, + unlink: []string{"table_name", "column_name", "column_type", "parent_table", "table_constraints"}, }, { - name: "foreign_key_table_level", - stmt: "stmt_block", + name: "foreign_key_table_level", + stmt: "stmt_block", replace: map[string]string{" stmt": " 'CREATE' 'TABLE' table_name '(' ( column_table_def ( ',' column_table_def )* ) ( 'CONSTRAINT' constraint_name | ) 'FOREIGN KEY' '(' ( fk_column_name ( ',' fk_column_name )* ) ')' 'REFERENCES' parent_table ( '(' ( ref_column_name ( ',' ref_column_name )* ) ')' | ) ( table_constraints | ) ')'"}, - unlink: []string{"table_name", "column_name", "parent_table", "table_constraints"}, + unlink: []string{"table_name", "column_name", "parent_table", "table_constraints"}, }, { name: "index_def", @@ -1030,10 +1030,10 @@ var specs = []stmtSpec{ }, {name: "iso_level"}, { - name: "not_null_column_level", - stmt: "stmt_block", + name: "not_null_column_level", + stmt: "stmt_block", replace: map[string]string{" stmt": " 'CREATE' 'TABLE' table_name '(' column_name column_type 'NOT NULL' ( column_constraints | ) ( ',' ( column_table_def ( ',' column_table_def )* ) | ) ( table_constraints | ) ')' ')'"}, - unlink: []string{"table_name", "column_name", "column_type", "table_constraints"}, + unlink: []string{"table_name", "column_name", "column_type", "table_constraints"}, }, { name: "opt_with_storage_parameter_list", @@ -1052,16 +1052,16 @@ var specs = []stmtSpec{ unlink: []string{"schedule_id"}, }, { - name: "primary_key_column_level", - stmt: "stmt_block", + name: "primary_key_column_level", + stmt: "stmt_block", replace: map[string]string{" stmt": " 'CREATE' 'TABLE' table_name '(' column_name column_type 'PRIMARY KEY' ( column_constraints | ) ( ',' ( column_table_def ( ',' column_table_def )* ) | ) ( table_constraints | ) ')' ')'"}, - unlink: []string{"table_name", "column_name", "column_type", "table_constraints"}, + unlink: []string{"table_name", "column_name", "column_type", "table_constraints"}, }, { - name: "primary_key_table_level", - stmt: "stmt_block", + name: "primary_key_table_level", + stmt: "stmt_block", replace: map[string]string{" stmt": " 'CREATE' 'TABLE' table_name '(' ( column_table_def ( ',' column_table_def )* ) ( 'CONSTRAINT' name | ) 'PRIMARY KEY' '(' ( column_name ( ',' column_name )* ) ')' ( table_constraints | ) ')'"}, - unlink: []string{"table_name", "column_name", "table_constraints"}, + unlink: []string{"table_name", "column_name", "table_constraints"}, }, { name: "refresh_materialized_views", @@ -1463,16 +1463,16 @@ var specs = []stmtSpec{ unlink: []string{"table_name"}, }, { - name: "unique_column_level", - stmt: "stmt_block", + name: "unique_column_level", + stmt: "stmt_block", replace: map[string]string{" stmt": " 'CREATE' 'TABLE' table_name '(' column_name column_type 'UNIQUE' ( column_constraints | ) ( ',' ( column_table_def ( ',' column_table_def )* ) | ) ( table_constraints | ) ')' ')'"}, - unlink: []string{"table_name", "column_name", "column_type", "table_constraints"}, + unlink: []string{"table_name", "column_name", "column_type", "table_constraints"}, }, { - name: "unique_table_level", - stmt: "stmt_block", + name: "unique_table_level", + stmt: "stmt_block", replace: map[string]string{" stmt": " 'CREATE' 'TABLE' table_name '(' ( column_table_def ( ',' column_table_def )* ) ( 'CONSTRAINT' name | ) 'UNIQUE' '(' ( column_name ( ',' column_name )* ) ')' ( table_constraints | ) ')'"}, - unlink: []string{"table_name", "check_expr", "table_constraints"}, + unlink: []string{"table_name", "check_expr", "table_constraints"}, }, { name: "unsplit_index_at", diff --git a/pkg/cmd/fuzz/main.go b/pkg/cmd/fuzz/main.go index b102c56dcbed..f8196203b2fe 100644 --- a/pkg/cmd/fuzz/main.go +++ b/pkg/cmd/fuzz/main.go @@ -11,13 +11,16 @@ // fuzz builds and executes fuzz tests. // // Fuzz tests can be added to CockroachDB by adding a function of the form: -// func FuzzXXX(data []byte) int +// +// func FuzzXXX(data []byte) int +// // To help the fuzzer increase coverage, this function should return 1 on // interesting input (for example, a parse succeeded) and 0 otherwise. Panics // will be detected and reported. // // To exclude this file except during fuzzing, tag it with: -// // +build gofuzz +// +// // +build gofuzz package main import ( diff --git a/pkg/cmd/generate-bazel-extra/main.go b/pkg/cmd/generate-bazel-extra/main.go index 8aa87cce2b94..34f6e773084c 100644 --- a/pkg/cmd/generate-bazel-extra/main.go +++ b/pkg/cmd/generate-bazel-extra/main.go @@ -15,7 +15,6 @@ import ( "errors" "flag" "fmt" - "io/ioutil" "log" "os" "os/exec" @@ -75,7 +74,7 @@ func getTestTargets(testTargetSize string) ([]string, error) { func generateTestSuites() { // First list all test and binary targets. - infos, err := ioutil.ReadDir("pkg") + infos, err := os.ReadDir("pkg") if err != nil { panic(err) } diff --git a/pkg/cmd/generate-metadata-tables/main.go b/pkg/cmd/generate-metadata-tables/main.go index d8e760472fd9..f3defe53f469 100644 --- a/pkg/cmd/generate-metadata-tables/main.go +++ b/pkg/cmd/generate-metadata-tables/main.go @@ -20,7 +20,8 @@ // --catalog: can be pg_catalog or information_schema. Default is pg_catalog // --rdbms: can be postgres or mysql. Default is postgres // --stdout: for testing purposes, use this flag to send the output to the -// console +// +// console // // Output of this file should generate (If not using --stout): // pkg/sql/testdata/_tables_from_.json diff --git a/pkg/cmd/gossipsim/main.go b/pkg/cmd/gossipsim/main.go index b45451825bd2..df4ae39de7fe 100644 --- a/pkg/cmd/gossipsim/main.go +++ b/pkg/cmd/gossipsim/main.go @@ -12,7 +12,7 @@ Package simulation provides tools meant to visualize or test aspects of a Cockroach cluster on a single host. -Gossip +# Gossip Gossip creates a gossip network of up to 250 nodes and outputs successive visualization of the gossip network graph via dot. @@ -23,8 +23,8 @@ simulation. To run: - go install github.com/cockroachdb/cockroach/cmd/gossipsim - gossipsim -size=(small|medium|large|huge|ginormous) + go install github.com/cockroachdb/cockroach/cmd/gossipsim + gossipsim -size=(small|medium|large|huge|ginormous) Log output includes instructions for displaying the graph output as a series of images to visualize the evolution of the network. @@ -33,23 +33,23 @@ Running the large through ginormous simulations will require the open files limit be increased either for the shell running the simulation, or system wide. For Linux: - # For the current shell: - ulimit -n 65536 + # For the current shell: + ulimit -n 65536 - # System-wide: - sysctl fs.file-max - fs.file-max = 50384 + # System-wide: + sysctl fs.file-max + fs.file-max = 50384 For MacOS: - # To view current limits (soft / hard): - launchctl limit maxfiles + # To view current limits (soft / hard): + launchctl limit maxfiles - # To edit, add/edit the following line in /etc/launchd.conf and - # restart for the new file limit to take effect. - # - # limit maxfiles 16384 32768 - sudo vi /etc/launchd.conf + # To edit, add/edit the following line in /etc/launchd.conf and + # restart for the new file limit to take effect. + # + # limit maxfiles 16384 32768 + sudo vi /etc/launchd.conf */ package main @@ -117,23 +117,23 @@ func (em edgeMap) addEdge(nodeID roachpb.NodeID, e edge) { // // The format of the output looks like this: // -// digraph G { -// node [shape=record]; -// node1 [fontsize=12,label="{Node 1|MH=3}"] -// node1 -> node3 [color=green] -// node1 -> node4 -// node1 -> node5 [color=red,style=dotted] -// node2 [fontsize=24,label="{Node 2|MH=2}"] -// node2 -> node5 -// node3 [fontsize=18,label="{Node 3|MH=5}"] -// node3 -> node5 -// node3 -> node4 -// node4 [fontsize=24,label="{Node 4|MH=4}"] -// node4 -> node2 -// node5 [fontsize=24,label="{Node 5|MH=1}"] -// node5 -> node2 -// node5 -> node3 -// } +// digraph G { +// node [shape=record]; +// node1 [fontsize=12,label="{Node 1|MH=3}"] +// node1 -> node3 [color=green] +// node1 -> node4 +// node1 -> node5 [color=red,style=dotted] +// node2 [fontsize=24,label="{Node 2|MH=2}"] +// node2 -> node5 +// node3 [fontsize=18,label="{Node 3|MH=5}"] +// node3 -> node5 +// node3 -> node4 +// node4 [fontsize=24,label="{Node 4|MH=4}"] +// node4 -> node2 +// node5 [fontsize=24,label="{Node 5|MH=1}"] +// node5 -> node2 +// node5 -> node3 +// } // // Returns the name of the output file and a boolean for whether or not // the network has quiesced (that is, no new edges, and all nodes are diff --git a/pkg/cmd/internal/issues/issues_test.go b/pkg/cmd/internal/issues/issues_test.go index f99bf597fb42..644293fa5948 100644 --- a/pkg/cmd/internal/issues/issues_test.go +++ b/pkg/cmd/internal/issues/issues_test.go @@ -64,8 +64,8 @@ func TestPost(t *testing.T) { name: "failure", packageName: "github.com/cockroachdb/cockroach/pkg/storage", testName: "TestReplicateQueueRebalance", - message: " :12: storage/replicate_queue_test.go:103, condition failed to evaluate within 45s: not balanced: [10 1 10 1 8]", - reproCmd: "make stressrace TESTS=TestReplicateQueueRebalance PKG=./pkg/storage TESTTIMEOUT=5m STRESSFLAGS='-timeout 5m' 2>&1", + message: " :12: storage/replicate_queue_test.go:103, condition failed to evaluate within 45s: not balanced: [10 1 10 1 8]", + reproCmd: "make stressrace TESTS=TestReplicateQueueRebalance PKG=./pkg/storage TESTTIMEOUT=5m STRESSFLAGS='-timeout 5m' 2>&1", }, { name: "fatal", diff --git a/pkg/cmd/prereqs/prereqs.go b/pkg/cmd/prereqs/prereqs.go index 4c834576af9f..7403c40c74d4 100644 --- a/pkg/cmd/prereqs/prereqs.go +++ b/pkg/cmd/prereqs/prereqs.go @@ -15,22 +15,22 @@ // dependency graph to determine what files impact its compilation. It then // outputs a Makefile that expresses these dependencies. For example: // -// $ prereqs ./pkg/cmd/foo -// # Code generated by prereqs. DO NOT EDIT! +// $ prereqs ./pkg/cmd/foo +// # Code generated by prereqs. DO NOT EDIT! // -// bin/foo: ./pkg/cmd/foo/foo.go ./some/dep.go ./some/other_dep.go +// bin/foo: ./pkg/cmd/foo/foo.go ./some/dep.go ./some/other_dep.go // -// ./pkg/cmd/foo/foo.go: -// ./some/dep.go: -// ./some/other_dep.go: +// ./pkg/cmd/foo/foo.go: +// ./some/dep.go: +// ./some/other_dep.go: // // The intended usage is automatic dependency generation from another Makefile: // -// bin/target: -// prereqs ./pkg/cmd/target > bin/target.d -// go build -o $@ ./pkg/cmd/target +// bin/target: +// prereqs ./pkg/cmd/target > bin/target.d +// go build -o $@ ./pkg/cmd/target // -// include bin/target.d +// include bin/target.d // // Notice that depended-upon files are mentioned not only in the prerequisites // list but also as a rule with no prerequisite or recipe. This prevents Make diff --git a/pkg/cmd/prereqs/prereqs_test.go b/pkg/cmd/prereqs/prereqs_test.go index a539976333de..286f87fa0855 100644 --- a/pkg/cmd/prereqs/prereqs_test.go +++ b/pkg/cmd/prereqs/prereqs_test.go @@ -21,14 +21,13 @@ import ( var expectedA = `# Code generated by prereqs. DO NOT EDIT! -bin/a: a a/a.c a/a.f a/a.go a/cgo.go a/ignore.go a/invalid.go b b/b.go b/vendor/foo.com/bar b/vendor/foo.com/bar/bar.go vendor/foo.com/foo vendor/foo.com/foo/foo.go +bin/a: a a/a.c a/a.f a/a.go a/cgo.go a/invalid.go b b/b.go b/vendor/foo.com/bar b/vendor/foo.com/bar/bar.go vendor/foo.com/foo vendor/foo.com/foo/foo.go a: a/a.c: a/a.f: a/a.go: a/cgo.go: -a/ignore.go: a/invalid.go: b: b/b.go: diff --git a/pkg/cmd/prereqs/testdata/src/example.com/a/ignore.go b/pkg/cmd/prereqs/testdata/src/example.com/a/ignore.go index e1c18fc2d42d..928e2d30a73e 100644 --- a/pkg/cmd/prereqs/testdata/src/example.com/a/ignore.go +++ b/pkg/cmd/prereqs/testdata/src/example.com/a/ignore.go @@ -8,7 +8,7 @@ // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. -package a - //go:build notme // +build notme + +package a diff --git a/pkg/cmd/publish-provisional-artifacts/main_test.go b/pkg/cmd/publish-provisional-artifacts/main_test.go index 5cd5b80b66f3..42e6af12c4d8 100644 --- a/pkg/cmd/publish-provisional-artifacts/main_test.go +++ b/pkg/cmd/publish-provisional-artifacts/main_test.go @@ -14,7 +14,6 @@ import ( "bytes" "fmt" "io" - "io/ioutil" "os" "os/exec" "path/filepath" @@ -48,7 +47,7 @@ func (s *mockStorage) GetObject(i *release.GetObjectInput) (*release.GetObjectOu url := fmt.Sprintf(`s3://%s/%s`, s.Bucket(), *i.Key) s.gets = append(s.gets, url) o := &release.GetObjectOutput{ - Body: ioutil.NopCloser(bytes.NewBufferString(url)), + Body: io.NopCloser(bytes.NewBufferString(url)), } return o, nil } diff --git a/pkg/cmd/release/github.go b/pkg/cmd/release/github.go index 7bc790182a26..5ac5ef874ec1 100644 --- a/pkg/cmd/release/github.go +++ b/pkg/cmd/release/github.go @@ -135,7 +135,8 @@ func (c *githubClientImpl) branchExists(branchName string) (bool, error) { } // issueEvents returns events in chronological order, e.g. -// https://api.github.com/repos/cockroachdb/cockroach/issues/77157/timeline +// +// https://api.github.com/repos/cockroachdb/cockroach/issues/77157/timeline func (c *githubClientImpl) issueEvents(issueNum int) ([]githubEvent, error) { var details []githubEvent // TODO: This pagination pattern is a potential race condition: we may want to move to graphql api, diff --git a/pkg/cmd/release/jira.go b/pkg/cmd/release/jira.go index 40252fdfbf54..16b6ec4dca00 100644 --- a/pkg/cmd/release/jira.go +++ b/pkg/cmd/release/jira.go @@ -190,7 +190,9 @@ func createTrackingIssue( // - https://cockroachlabs.atlassian.net/browse/SREOPS-4037 // - https://cockroachlabs.atlassian.net/rest/api/2/issue/SREOPS-4037 // TODO(celia): [Future "week 0" work] We'll eventually want the ability to specify -// a qualification partition & friendly ID: +// +// a qualification partition & friendly ID: +// // During the stability period, release managers may be qualifying multiple candidates // at the same time. If that's the case, release managers will want the ability to // explicitly specify which partition to use, so that we don't "overwrite" the diff --git a/pkg/cmd/roachprod/docker/Dockerfile b/pkg/cmd/roachprod/docker/Dockerfile index 197e92dbc337..88cb3ad5ef0d 100644 --- a/pkg/cmd/roachprod/docker/Dockerfile +++ b/pkg/cmd/roachprod/docker/Dockerfile @@ -12,7 +12,7 @@ RUN bazel build --config=crosslinux //pkg/cmd/roachprod:roachprod # Copy the roachprod binary to a stable location RUN cp $(bazel info bazel-bin --config=crosslinux)/pkg/cmd/roachprod/roachprod_/roachprod ./ -FROM golang:1.18 +FROM golang:1.19 COPY entrypoint.sh build.sh /build/ RUN ["/build/build.sh"] COPY --from=builder /build/roachprod /usr/local/bin/roachprod diff --git a/pkg/cmd/roachtest/cluster.go b/pkg/cmd/roachtest/cluster.go index 3494afd1b8b2..3ed2f18a2271 100644 --- a/pkg/cmd/roachtest/cluster.go +++ b/pkg/cmd/roachtest/cluster.go @@ -1349,7 +1349,7 @@ WHERE t.status NOT IN ('RANGE_CONSISTENT', 'RANGE_INDETERMINATE')`) } // FailOnReplicaDivergence fails the test if -// crdb_internal.check_consistency(true, '', '') indicates that any ranges' +// crdb_internal.check_consistency(true, ”, ”) indicates that any ranges' // replicas are inconsistent with each other. It uses the first node that // is up to run the query. func (c *clusterImpl) FailOnReplicaDivergence(ctx context.Context, t *testImpl) { diff --git a/pkg/cmd/roachtest/clusterstats/collector.go b/pkg/cmd/roachtest/clusterstats/collector.go index 1cb1f5113667..79bd1fcb717c 100644 --- a/pkg/cmd/roachtest/clusterstats/collector.go +++ b/pkg/cmd/roachtest/clusterstats/collector.go @@ -84,16 +84,17 @@ func (cs *clusterStatCollector) Exporter() StatExporter { // tagged values. For example, if the query were rebalancing_queriespersecond // at time 110 and there were two stores (1,2) with ip addresses 10.0.0.1 and // 127.0.0.1, for store 2 and store 1 respectively. -// { -// "store": { -// "1": {Time: 100, Value: 777}, -// "2": {Time: 100, Value: 42}, -// }, -// "instance": { -// "10.0.0.1": {Time: 100, Value: 42}, -// "127.0.0.1": {Time: 100, Value: 777}, -// }, -// } +// +// { +// "store": { +// "1": {Time: 100, Value: 777}, +// "2": {Time: 100, Value: 42}, +// }, +// "instance": { +// "10.0.0.1": {Time: 100, Value: 42}, +// "127.0.0.1": {Time: 100, Value: 777}, +// }, +// } func (cs *clusterStatCollector) CollectPoint( ctx context.Context, l *logger.Logger, at time.Time, q string, ) (map[string]map[string]StatPoint, error) { @@ -133,28 +134,29 @@ func (cs *clusterStatCollector) CollectPoint( // were rebalancing_queriespersecond in the interval [100,110] and there were // two stores (1,2) with ip addresses 10.0.0.1 and 127.0.0.1, for store 2 and // store 1 respectively. -// { -// "store": { -// "1": { -// {Time: 100, Value: 777}, -// {Time: 110, Value: 888} -// }, -// "2": { -// {Time: 100, Value: 42}, -// {Time: 110, Value 42}, -// }, -// }, -// "instance": { -// "10.0.0.1": { -// {Time: 100, Value: 42}, -// {Time: 110, Value 42}, -// }, -// "127.0.0.1": { -// {Time: 100, Value: 777}, -// {Time: 110, Value: 888} -// }, -// }, -// } +// +// { +// "store": { +// "1": { +// {Time: 100, Value: 777}, +// {Time: 110, Value: 888} +// }, +// "2": { +// {Time: 100, Value: 42}, +// {Time: 110, Value 42}, +// }, +// }, +// "instance": { +// "10.0.0.1": { +// {Time: 100, Value: 42}, +// {Time: 110, Value 42}, +// }, +// "127.0.0.1": { +// {Time: 100, Value: 777}, +// {Time: 110, Value: 888} +// }, +// }, +// } func (cs *clusterStatCollector) CollectInterval( ctx context.Context, l *logger.Logger, interval Interval, q string, ) (map[string]map[string]StatSeries, error) { diff --git a/pkg/cmd/roachtest/clusterstats/exporter.go b/pkg/cmd/roachtest/clusterstats/exporter.go index 21b640627ec8..15fb7be1ba97 100644 --- a/pkg/cmd/roachtest/clusterstats/exporter.go +++ b/pkg/cmd/roachtest/clusterstats/exporter.go @@ -24,7 +24,9 @@ import ( ) // ClusterStat represents a filtered query by the given LabelName. For example, -// ClusterStat{Query: "rebalancing_queriespersecond", LabelName: "store"} +// +// ClusterStat{Query: "rebalancing_queriespersecond", LabelName: "store"} +// // would collect a QPS stat per store in the cluster. type ClusterStat struct { Query string @@ -201,14 +203,15 @@ func (cs *clusterStatCollector) collectSummaries( // time series into a single one, for the same metric. This can either be a // query (2a) or aggregating function (2b), depending on whether the function // is supported by prometheus. -// (2a) AggQuery.Query declares a prometheus query to be used over the given -// interval. For example, AggQuery.Query = "sum(rebalancing_queriespersecond)" -// would return StatSummary.Value = {600, 600, 600}. -// (2b) AggQuery.AggFn is a substitute for 2a, it aggregates over a collection -// of labeled time series, returning a single time series. For example, -// AggQuery.AggFn = func(...) {return max(...)} would return -// StatSummary.Value{300, 300, 300}. It must also return an AggregateTag to -// identify the resulting timeseries. +// +// (2a) AggQuery.Query declares a prometheus query to be used over the given +// interval. For example, AggQuery.Query = "sum(rebalancing_queriespersecond)" +// would return StatSummary.Value = {600, 600, 600}. +// (2b) AggQuery.AggFn is a substitute for 2a, it aggregates over a collection +// of labeled time series, returning a single time series. For example, +// AggQuery.AggFn = func(...) {return max(...)} would return +// StatSummary.Value{300, 300, 300}. It must also return an AggregateTag to +// identify the resulting timeseries. func (cs *clusterStatCollector) getStatSummary( ctx context.Context, l *logger.Logger, summaryQuery AggQuery, ) (StatSummary, error) { diff --git a/pkg/cmd/roachtest/clusterstats/streamer.go b/pkg/cmd/roachtest/clusterstats/streamer.go index e73f3c0497b7..3f3c6e6d4345 100644 --- a/pkg/cmd/roachtest/clusterstats/streamer.go +++ b/pkg/cmd/roachtest/clusterstats/streamer.go @@ -95,8 +95,9 @@ type StatEvent struct { // the query time is increased bt the collecter interval. The results are // passed the provided processTickFn. This function will run indefinitely, // until either: -// (1) the context cancellation -// (2) processTickFn returns true, indicating that it has finished. +// +// (1) the context cancellation +// (2) processTickFn returns true, indicating that it has finished. func (css *clusterStatStreamer) Run( ctx context.Context, l *logger.Logger, startTime time.Time, ) error { diff --git a/pkg/cmd/roachtest/test_impl.go b/pkg/cmd/roachtest/test_impl.go index 543c87a666e0..d3725a639a54 100644 --- a/pkg/cmd/roachtest/test_impl.go +++ b/pkg/cmd/roachtest/test_impl.go @@ -332,8 +332,9 @@ func (t *testImpl) failWithMsg(msg string) { // Args: // skip: The number of stack frames to exclude from the result. 0 means that -// the caller will be the first frame identified. 1 means the caller's caller -// will be the first, etc. +// +// the caller will be the first frame identified. 1 means the caller's caller +// will be the first, etc. func (t *testImpl) decorate(skip int, s string) string { // Skip two extra frames to account for this function and runtime.Callers // itself. diff --git a/pkg/cmd/roachtest/test_runner.go b/pkg/cmd/roachtest/test_runner.go index 9895e1f4dd8a..4d33db2ad61b 100644 --- a/pkg/cmd/roachtest/test_runner.go +++ b/pkg/cmd/roachtest/test_runner.go @@ -104,8 +104,10 @@ type testRunner struct { // newTestRunner constructs a testRunner. // // cr: The cluster registry with which all clusters will be registered. The -// caller provides this as the caller needs to be able to shut clusters down -// on Ctrl+C. +// +// caller provides this as the caller needs to be able to shut clusters down +// on Ctrl+C. +// // buildVersion: The version of the Cockroach binary against which tests will run. func newTestRunner( cr *clusterRegistry, stopper *stop.Stopper, buildVersion version.Version, @@ -169,9 +171,11 @@ type testOpts struct { // tests: The tests to run. // count: How many times to run each test selected by filter. // parallelism: How many workers to use for running tests. Tests are run -// locally (although generally they run against remote roachprod clusters). -// parallelism bounds the maximum number of tests that run concurrently. Note -// that the concurrency is also affected by cpuQuota. +// +// locally (although generally they run against remote roachprod clusters). +// parallelism bounds the maximum number of tests that run concurrently. Note +// that the concurrency is also affected by cpuQuota. +// // clusterOpt: Options for the clusters to use by tests. // lopt: Options for logging. func (r *testRunner) Run( @@ -419,11 +423,17 @@ type clusterAllocatorFn func( // Args: // name: The worker's name, to be used as a prefix for log messages. // artifactsRootDir: The artifacts dir. Each test's logs are going to be under a -// run_ dir. If empty, test log files will not be created. +// +// run_ dir. If empty, test log files will not be created. +// // literalArtifactsDir: The literal on-agent path where artifacts are stored. -// Only used for teamcity[publishArtifacts] messages. +// +// Only used for teamcity[publishArtifacts] messages. +// // stdout: The Writer to use for messages that need to go to stdout (e.g. the -// "=== RUN" and "--- FAIL" lines). +// +// "=== RUN" and "--- FAIL" lines). +// // teeOpt: The teeing option for future test loggers. // l: The logger to use for more verbose messages. func (r *testRunner) runWorker( @@ -735,7 +745,8 @@ func allStacks() []byte { // // Args: // c: The cluster on which the test will run. runTest() does not wipe or destroy -// the cluster. +// +// the cluster. func (r *testRunner) runTest( ctx context.Context, t *testImpl, @@ -1191,7 +1202,6 @@ type getWorkCallbacks struct { // getWork takes in a cluster; if not nil, tests that can reuse it are // preferred. If a test that can reuse it is not found (or if there's no more // work), the cluster is destroyed (and so its resources are released). -// func (r *testRunner) getWork( ctx context.Context, work *workPool, @@ -1243,7 +1253,8 @@ func (r *testRunner) removeWorker(ctx context.Context, name string) { // runHTTPServer starts a server running in the background. // // httpPort: The port on which to serve the web interface. Pass 0 for allocating -// a port automatically (which will be printed to stdout). +// +// a port automatically (which will be printed to stdout). func (r *testRunner) runHTTPServer(httpPort int, stdout io.Writer) error { http.HandleFunc("/", r.serveHTTP) // Run an http server in the background. diff --git a/pkg/cmd/roachtest/test_test.go b/pkg/cmd/roachtest/test_test.go index 91c9771dee96..6fbbe4da3f32 100644 --- a/pkg/cmd/roachtest/test_test.go +++ b/pkg/cmd/roachtest/test_test.go @@ -13,7 +13,7 @@ package main import ( "bytes" "context" - "io/ioutil" + "io" "math/rand" "regexp" "sort" @@ -82,8 +82,8 @@ func TestMatchOrSkip(t *testing.T) { func nilLogger() *logger.Logger { lcfg := logger.Config{ - Stdout: ioutil.Discard, - Stderr: ioutil.Discard, + Stdout: io.Discard, + Stderr: io.Discard, } l, err := lcfg.NewLogger("" /* path */) if err != nil { @@ -434,8 +434,8 @@ func runExitCodeTest(t *testing.T, injectedError error) error { lopt := loggingOpt{ l: nilLogger(), tee: logger.NoTee, - stdout: ioutil.Discard, - stderr: ioutil.Discard, + stdout: io.Discard, + stderr: io.Discard, artifactsDir: "", } return runner.Run(ctx, tests, 1, 1, clustersOpt{}, testOpts{}, lopt, nil /* clusterAllocator */) diff --git a/pkg/cmd/roachtest/tests/follower_reads.go b/pkg/cmd/roachtest/tests/follower_reads.go index 87a7b262cb29..af4bb9a1fd2f 100644 --- a/pkg/cmd/roachtest/tests/follower_reads.go +++ b/pkg/cmd/roachtest/tests/follower_reads.go @@ -141,24 +141,23 @@ type topologySpec struct { // runFollowerReadsTest is a basic litmus test that follower reads work. // The test does the following: // -// * Creates a multi-region database and table. -// * Configures the database's survival goals. -// * Configures the table's locality setting. -// * Installs a number of rows into that table. -// * Queries the data initially with a recent timestamp and expecting an -// error because the table does not exist in the past immediately following -// creation. -// * If using a REGIONAL table, waits until the required duration has elapsed -// such that the installed data can be read with a follower read issued using -// `follower_read_timestamp()`. -// * Performs a few select query against a single row on all of the nodes and -// then observes the counter metric for store-level follower reads ensuring -// that they occurred on at least two of the nodes. If using a REGIONAL table, -// these reads are stale through the use of `follower_read_timestamp()`. -// * Performs reads against the written data on all of the nodes at a steady -// rate for 20 seconds, ensure that the 90-%ile SQL latencies during that -// time are under 10ms which implies that no WAN RPCs occurred. -// +// - Creates a multi-region database and table. +// - Configures the database's survival goals. +// - Configures the table's locality setting. +// - Installs a number of rows into that table. +// - Queries the data initially with a recent timestamp and expecting an +// error because the table does not exist in the past immediately following +// creation. +// - If using a REGIONAL table, waits until the required duration has elapsed +// such that the installed data can be read with a follower read issued using +// `follower_read_timestamp()`. +// - Performs a few select query against a single row on all of the nodes and +// then observes the counter metric for store-level follower reads ensuring +// that they occurred on at least two of the nodes. If using a REGIONAL table, +// these reads are stale through the use of `follower_read_timestamp()`. +// - Performs reads against the written data on all of the nodes at a steady +// rate for 20 seconds, ensure that the 90-%ile SQL latencies during that +// time are under 10ms which implies that no WAN RPCs occurred. func runFollowerReadsTest( ctx context.Context, t test.Test, diff --git a/pkg/cmd/roachtest/tests/go_helpers.go b/pkg/cmd/roachtest/tests/go_helpers.go index ad31acbea038..f45b2a85c8fa 100644 --- a/pkg/cmd/roachtest/tests/go_helpers.go +++ b/pkg/cmd/roachtest/tests/go_helpers.go @@ -43,13 +43,13 @@ func installGolang( } if err := repeatRunE( - ctx, t, c, node, "download go", `curl -fsSL https://dl.google.com/go/go1.18.4.linux-amd64.tar.gz > /tmp/go.tgz`, + ctx, t, c, node, "download go", `curl -fsSL https://dl.google.com/go/go1.19.1.linux-amd64.tar.gz > /tmp/go.tgz`, ); err != nil { t.Fatal(err) } if err := repeatRunE( ctx, t, c, node, "verify tarball", `sha256sum -c - <test.*::.*::[^ \[\]]*( var sqlAlchemyReleaseTagRegex = regexp.MustCompile(`^rel_(?P\d+)_(?P\d+)_(?P\d+)$`) // TODO(arul): Investigate why we need this and can't install sql alchemy using -// pip. +// +// pip. var supportedSQLAlchemyTag = "rel_1_4_26" // This test runs the SQLAlchemy dialect test suite against a single Cockroach diff --git a/pkg/cmd/roachtest/work_pool.go b/pkg/cmd/roachtest/work_pool.go index 525991750024..ac85e15f6ae6 100644 --- a/pkg/cmd/roachtest/work_pool.go +++ b/pkg/cmd/roachtest/work_pool.go @@ -118,13 +118,13 @@ func (p *workPool) getTestToRun( // selectTestForCluster selects a test to run on a cluster with a given spec. // // Among tests that match the spec, we do the following: -// - If the cluster is already tagged, we only look at tests with the same tag. -// - Otherwise, we'll choose in the following order of preference: -// 1) tests that leave the cluster usable by anybody afterwards -// 2) tests that leave the cluster usable by some other tests -// 2.1) within this OnlyTagged category, we'll prefer the tag with the -// fewest existing clusters. -// 3) tests that leave the cluster unusable by anybody +// - If the cluster is already tagged, we only look at tests with the same tag. +// - Otherwise, we'll choose in the following order of preference: +// 1) tests that leave the cluster usable by anybody afterwards +// 2) tests that leave the cluster usable by some other tests +// 2.1) within this OnlyTagged category, we'll prefer the tag with the +// fewest existing clusters. +// 3) tests that leave the cluster unusable by anybody // // Within each of the categories, we'll give preference to tests with fewer // runs. @@ -175,7 +175,6 @@ func (p *workPool) selectTestForCluster( // allocate. // // ensures: !testToRunRes.noWork || error == nil -// func (p *workPool) selectTest(ctx context.Context, qp *quotapool.IntPool) (testToRunRes, error) { var ttr testToRunRes alloc, err := qp.AcquireFunc(ctx, func(ctx context.Context, pi quotapool.PoolInfo) (uint64, error) { diff --git a/pkg/cmd/testfilter/main.go b/pkg/cmd/testfilter/main.go index bd3e979d4959..eb698c065b51 100644 --- a/pkg/cmd/testfilter/main.go +++ b/pkg/cmd/testfilter/main.go @@ -16,16 +16,21 @@ // are `--mode=(strip|omit|convert)`, where: // // strip: omit output for non-failing tests, pass everything else through. In -// particular, non-test output and tests that never terminate are passed through. +// +// particular, non-test output and tests that never terminate are passed through. +// // omit: print only failing tests. Note that test2json does not close scopes for -// tests that are running in parallel (in the same package) with a "foreground" -// test that panics, so it will pass through *only* the one foreground test. -// Note also that package scopes are omitted; test2json does not reliably close -// them on panic/Exit anyway. +// +// tests that are running in parallel (in the same package) with a "foreground" +// test that panics, so it will pass through *only* the one foreground test. +// Note also that package scopes are omitted; test2json does not reliably close +// them on panic/Exit anyway. +// // convert: -// no filtering is performed, but any test2json input is translated back into -// its pure Go test framework text representation. This is useful for output -// intended for human eyes. +// +// no filtering is performed, but any test2json input is translated back into +// its pure Go test framework text representation. This is useful for output +// intended for human eyes. // // [test2json]: https://golang.org/cmd/test2json/ package main diff --git a/pkg/cmd/uptodate/uptodate.go b/pkg/cmd/uptodate/uptodate.go index bb182746b324..985d0fb9902a 100644 --- a/pkg/cmd/uptodate/uptodate.go +++ b/pkg/cmd/uptodate/uptodate.go @@ -14,7 +14,7 @@ package main import ( "fmt" - "io/ioutil" + "io" "log" "os" @@ -40,7 +40,7 @@ func main() { os.Exit(1) } if !*debug { - log.SetOutput(ioutil.Discard) + log.SetOutput(io.Discard) } output, inputs := pflag.Arg(0), pflag.Args()[1:] diff --git a/pkg/col/coldata/bytes.go b/pkg/col/coldata/bytes.go index da4488a72d5a..2d8a4825b627 100644 --- a/pkg/col/coldata/bytes.go +++ b/pkg/col/coldata/bytes.go @@ -29,18 +29,18 @@ import ( // // If the value is inlined, then the layout of element is used as follows: // -// 24-byte header | 6-byte padding -// element: .............................. | length | true -// Bytes.buffer: N/A +// 24-byte header | 6-byte padding +// element: .............................. | length | true +// Bytes.buffer: N/A // // where 30 dots describe the inlinable space followed by a single byte for the // length followed by a boolean 'true' indicating an inlined value. // // If the value is non-inlined, then the layout of element is used as follows: // -// padding -// element: .offset. | ..len... | ..cap... | xxxxxx | x | false -// Bytes.buffer: xxxxxxxx | offset .... | xxxxxxxx +// padding +// element: .offset. | ..len... | ..cap... | xxxxxx | x | false +// Bytes.buffer: xxxxxxxx | offset .... | xxxxxxxx // // where first 24 bytes contain our custom "header" of a byte slice that is // backed by Bytes.buffer. The following 7 bytes (the padding and the @@ -87,6 +87,7 @@ const BytesMaxInlineLength = int(unsafe.Offsetof(element{}.inlinedLength)) // inlinedSlice returns 30 bytes of space within e that can be used for storing // a value inlined, as a slice. +// //gcassert:inline func (e *element) inlinedSlice() []byte { return (*(*[BytesMaxInlineLength]byte)(unsafe.Pointer(&e.header)))[:] @@ -424,6 +425,7 @@ func (b *Bytes) AppendVal(v []byte) { } // Len returns how many []byte values the receiver contains. +// //gcassert:inline func (b *Bytes) Len() int { return len(b.elements) @@ -455,6 +457,7 @@ func (b *Bytes) ProportionalSize(n int64) int64 { // ElemSize returns the size in bytes of the []byte elem at the given index. // Panics if passed an invalid element. +// //gcassert:inline func (b *Bytes) ElemSize(idx int) int64 { if b.elements[idx].inlined { @@ -473,7 +476,6 @@ func (b *Bytes) ElemSize(idx int) int64 { // - If abbr[i] == abbr[j], it is unknown if b.Get(i) is greater than, less // than, or equal to b.Get(j). A full comparison of all bytes in each is // required. -// func (b *Bytes) Abbreviated() []uint64 { r := make([]uint64, b.Len()) for i := range r { @@ -487,15 +489,14 @@ func (b *Bytes) Abbreviated() []uint64 { // uint64. If the slice has less than 8 bytes, the value returned is the same as // if the slice was filled to 8 bytes with zero value bytes. For example: // -// abbreviate([]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}) -// => 1 -// -// abbreviate([]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00}) -// => 256 +// abbreviate([]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}) +// => 1 // -// abbreviate([]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}) -// => 256 +// abbreviate([]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00}) +// => 256 // +// abbreviate([]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}) +// => 256 func abbreviate(bs []byte) uint64 { if len(bs) >= 8 { return binary.BigEndian.Uint64(bs) @@ -517,6 +518,7 @@ var zeroElements = make([]element, MaxBatchSize) // Namely, this allows us to remove all "holes" (unused space) in b.buffer which // can occur when an old non-inlined element is overwritten by a new element // that is either fully-inlined or non-inlined but larger. +// //gcassert:inline func (b *Bytes) Reset() { if b.isWindow { diff --git a/pkg/col/coldata/native_types.go b/pkg/col/coldata/native_types.go index 708ab44d3825..3e90e82e2989 100644 --- a/pkg/col/coldata/native_types.go +++ b/pkg/col/coldata/native_types.go @@ -43,61 +43,74 @@ type Durations []duration.Duration // Get returns the element at index idx of the vector. The element cannot be // used anymore once the vector is modified. +// //gcassert:inline func (c Bools) Get(idx int) bool { return c[idx] } // Get returns the element at index idx of the vector. The element cannot be // used anymore once the vector is modified. +// //gcassert:inline func (c Int16s) Get(idx int) int16 { return c[idx] } // Get returns the element at index idx of the vector. The element cannot be // used anymore once the vector is modified. +// //gcassert:inline func (c Int32s) Get(idx int) int32 { return c[idx] } // Get returns the element at index idx of the vector. The element cannot be // used anymore once the vector is modified. +// //gcassert:inline func (c Int64s) Get(idx int) int64 { return c[idx] } // Get returns the element at index idx of the vector. The element cannot be // used anymore once the vector is modified. +// //gcassert:inline func (c Float64s) Get(idx int) float64 { return c[idx] } // Get returns the element at index idx of the vector. The element cannot be // used anymore once the vector is modified. +// //gcassert:inline func (c Decimals) Get(idx int) apd.Decimal { return c[idx] } // Get returns the element at index idx of the vector. The element cannot be // used anymore once the vector is modified. +// //gcassert:inline func (c Times) Get(idx int) time.Time { return c[idx] } // Get returns the element at index idx of the vector. The element cannot be // used anymore once the vector is modified. +// //gcassert:inline func (c Durations) Get(idx int) duration.Duration { return c[idx] } // Set sets the element at index idx of the vector to val. +// //gcassert:inline func (c Bools) Set(idx int, val bool) { c[idx] = val } // Set sets the element at index idx of the vector to val. +// //gcassert:inline func (c Int16s) Set(idx int, val int16) { c[idx] = val } // Set sets the element at index idx of the vector to val. +// //gcassert:inline func (c Int32s) Set(idx int, val int32) { c[idx] = val } // Set sets the element at index idx of the vector to val. +// //gcassert:inline func (c Int64s) Set(idx int, val int64) { c[idx] = val } // Set sets the element at index idx of the vector to val. +// //gcassert:inline func (c Float64s) Set(idx int, val float64) { c[idx] = val } @@ -109,47 +122,58 @@ func (c Float64s) Set(idx int, val float64) { c[idx] = val } func (c Decimals) Set(idx int, val apd.Decimal) { c[idx].Set(&val) } // Set sets the element at index idx of the vector to val. +// //gcassert:inline func (c Times) Set(idx int, val time.Time) { c[idx] = val } // Set sets the element at index idx of the vector to val. +// //gcassert:inline func (c Durations) Set(idx int, val duration.Duration) { c[idx] = val } // Len returns the length of the vector. +// //gcassert:inline func (c Bools) Len() int { return len(c) } // Len returns the length of the vector. +// //gcassert:inline func (c Int16s) Len() int { return len(c) } // Len returns the length of the vector. +// //gcassert:inline func (c Int32s) Len() int { return len(c) } // Len returns the length of the vector. +// //gcassert:inline func (c Int64s) Len() int { return len(c) } // Len returns the length of the vector. +// //gcassert:inline func (c Float64s) Len() int { return len(c) } // Len returns the length of the vector. +// //gcassert:inline func (c Decimals) Len() int { return len(c) } // Len returns the length of the vector. +// //gcassert:inline func (c Times) Len() int { return len(c) } // Len returns the length of the vector. +// //gcassert:inline func (c Durations) Len() int { return len(c) } // CopySlice copies src[srcStartIdx:srcEndIdx] into c starting at position // destIdx. +// //gcassert:inline func (c Bools) CopySlice(src Bools, destIdx, srcStartIdx, srcEndIdx int) { copy(c[destIdx:], src[srcStartIdx:srcEndIdx]) @@ -157,6 +181,7 @@ func (c Bools) CopySlice(src Bools, destIdx, srcStartIdx, srcEndIdx int) { // CopySlice copies src[srcStartIdx:srcEndIdx] into c starting at position // destIdx. +// //gcassert:inline func (c Int16s) CopySlice(src Int16s, destIdx, srcStartIdx, srcEndIdx int) { copy(c[destIdx:], src[srcStartIdx:srcEndIdx]) @@ -164,6 +189,7 @@ func (c Int16s) CopySlice(src Int16s, destIdx, srcStartIdx, srcEndIdx int) { // CopySlice copies src[srcStartIdx:srcEndIdx] into c starting at position // destIdx. +// //gcassert:inline func (c Int32s) CopySlice(src Int32s, destIdx, srcStartIdx, srcEndIdx int) { copy(c[destIdx:], src[srcStartIdx:srcEndIdx]) @@ -171,6 +197,7 @@ func (c Int32s) CopySlice(src Int32s, destIdx, srcStartIdx, srcEndIdx int) { // CopySlice copies src[srcStartIdx:srcEndIdx] into c starting at position // destIdx. +// //gcassert:inline func (c Int64s) CopySlice(src Int64s, destIdx, srcStartIdx, srcEndIdx int) { copy(c[destIdx:], src[srcStartIdx:srcEndIdx]) @@ -178,6 +205,7 @@ func (c Int64s) CopySlice(src Int64s, destIdx, srcStartIdx, srcEndIdx int) { // CopySlice copies src[srcStartIdx:srcEndIdx] into c starting at position // destIdx. +// //gcassert:inline func (c Float64s) CopySlice(src Float64s, destIdx, srcStartIdx, srcEndIdx int) { copy(c[destIdx:], src[srcStartIdx:srcEndIdx]) @@ -198,6 +226,7 @@ func (c Decimals) CopySlice(src Decimals, destIdx, srcStartIdx, srcEndIdx int) { // CopySlice copies src[srcStartIdx:srcEndIdx] into c starting at position // destIdx. +// //gcassert:inline func (c Times) CopySlice(src Times, destIdx, srcStartIdx, srcEndIdx int) { copy(c[destIdx:], src[srcStartIdx:srcEndIdx]) @@ -205,39 +234,48 @@ func (c Times) CopySlice(src Times, destIdx, srcStartIdx, srcEndIdx int) { // CopySlice copies src[srcStartIdx:srcEndIdx] into c starting at position // destIdx. +// //gcassert:inline func (c Durations) CopySlice(src Durations, destIdx, srcStartIdx, srcEndIdx int) { copy(c[destIdx:], src[srcStartIdx:srcEndIdx]) } // Window returns the window into the vector. +// //gcassert:inline func (c Bools) Window(start, end int) Bools { return c[start:end] } // Window returns the window into the vector. +// //gcassert:inline func (c Int16s) Window(start, end int) Int16s { return c[start:end] } // Window returns the window into the vector. +// //gcassert:inline func (c Int32s) Window(start, end int) Int32s { return c[start:end] } // Window returns the window into the vector. +// //gcassert:inline func (c Int64s) Window(start, end int) Int64s { return c[start:end] } // Window returns the window into the vector. +// //gcassert:inline func (c Float64s) Window(start, end int) Float64s { return c[start:end] } // Window returns the window into the vector. +// //gcassert:inline func (c Decimals) Window(start, end int) Decimals { return c[start:end] } // Window returns the window into the vector. +// //gcassert:inline func (c Times) Window(start, end int) Times { return c[start:end] } // Window returns the window into the vector. +// //gcassert:inline func (c Durations) Window(start, end int) Durations { return c[start:end] } diff --git a/pkg/col/coldata/testutils.go b/pkg/col/coldata/testutils.go index fbfbe15cf4ee..24fbf1587bc1 100644 --- a/pkg/col/coldata/testutils.go +++ b/pkg/col/coldata/testutils.go @@ -22,8 +22,9 @@ import ( // testingT is a private interface that mirrors the testing.TB methods used. // testing.TB cannot be used directly since testing is an illegal import. // TODO(asubiotto): Remove AssertEquivalentBatches' dependency on testing.TB by -// checking for equality and returning a diff string instead of operating on -// testing.TB. +// +// checking for equality and returning a diff string instead of operating on +// testing.TB. type testingT interface { Helper() Errorf(format string, args ...interface{}) diff --git a/pkg/col/colserde/arrowserde/file_generated.go b/pkg/col/colserde/arrowserde/file_generated.go index 4cb12084d5cb..b63851f31c65 100644 --- a/pkg/col/colserde/arrowserde/file_generated.go +++ b/pkg/col/colserde/arrowserde/file_generated.go @@ -4,9 +4,9 @@ package arrowserde import flatbuffers "github.com/google/flatbuffers/go" -/// ---------------------------------------------------------------------- -/// Arrow File metadata -/// +// / ---------------------------------------------------------------------- +// / Arrow File metadata +// / type Footer struct { _tab flatbuffers.Table } @@ -132,34 +132,34 @@ func (rcv *Block) Table() flatbuffers.Table { return rcv._tab.Table } -/// Index to the start of the RecordBlock (note this is past the Message header) +// / Index to the start of the RecordBlock (note this is past the Message header) func (rcv *Block) Offset() int64 { return rcv._tab.GetInt64(rcv._tab.Pos + flatbuffers.UOffsetT(0)) } -/// Index to the start of the RecordBlock (note this is past the Message header) +// / Index to the start of the RecordBlock (note this is past the Message header) func (rcv *Block) MutateOffset(n int64) bool { return rcv._tab.MutateInt64(rcv._tab.Pos+flatbuffers.UOffsetT(0), n) } -/// Length of the metadata +// / Length of the metadata func (rcv *Block) MetaDataLength() int32 { return rcv._tab.GetInt32(rcv._tab.Pos + flatbuffers.UOffsetT(8)) } -/// Length of the metadata +// / Length of the metadata func (rcv *Block) MutateMetaDataLength(n int32) bool { return rcv._tab.MutateInt32(rcv._tab.Pos+flatbuffers.UOffsetT(8), n) } -/// Length of the data (this is aligned so there can be a gap between this and -/// the metadata). +// / Length of the data (this is aligned so there can be a gap between this and +// / the metadata). func (rcv *Block) BodyLength() int64 { return rcv._tab.GetInt64(rcv._tab.Pos + flatbuffers.UOffsetT(16)) } -/// Length of the data (this is aligned so there can be a gap between this and -/// the metadata). +// / Length of the data (this is aligned so there can be a gap between this and +// / the metadata). func (rcv *Block) MutateBodyLength(n int64) bool { return rcv._tab.MutateInt64(rcv._tab.Pos+flatbuffers.UOffsetT(16), n) } diff --git a/pkg/col/colserde/arrowserde/message_generated.go b/pkg/col/colserde/arrowserde/message_generated.go index 75f3e729585e..f0cac2961502 100644 --- a/pkg/col/colserde/arrowserde/message_generated.go +++ b/pkg/col/colserde/arrowserde/message_generated.go @@ -4,14 +4,14 @@ package arrowserde import flatbuffers "github.com/google/flatbuffers/go" -/// ---------------------------------------------------------------------- -/// The root Message type -/// This union enables us to easily send different message types without -/// redundant storage, and in the future we can easily add new message types. -/// -/// Arrow implementations do not need to implement all of the message types, -/// which may include experimental metadata types. For maximum compatibility, -/// it is best to send data using RecordBatch +// / ---------------------------------------------------------------------- +// / The root Message type +// / This union enables us to easily send different message types without +// / redundant storage, and in the future we can easily add new message types. +// / +// / Arrow implementations do not need to implement all of the message types, +// / which may include experimental metadata types. For maximum compatibility, +// / it is best to send data using RecordBatch type MessageHeader = byte const ( @@ -32,15 +32,15 @@ var EnumNamesMessageHeader = map[MessageHeader]string{ MessageHeaderSparseTensor: "SparseTensor", } -/// ---------------------------------------------------------------------- -/// Data structures for describing a table row batch (a collection of -/// equal-length Arrow arrays) -/// Metadata about a field at some level of a nested type tree (but not -/// its children). -/// -/// For example, a List with values [[1, 2, 3], null, [4], [5, 6], null] -/// would have {length: 5, null_count: 2} for its List node, and {length: 6, -/// null_count: 0} for its Int16 node, as separate FieldNode structs +// / ---------------------------------------------------------------------- +// / Data structures for describing a table row batch (a collection of +// / equal-length Arrow arrays) +// / Metadata about a field at some level of a nested type tree (but not +// / its children). +// / +// / For example, a List with values [[1, 2, 3], null, [4], [5, 6], null] +// / would have {length: 5, null_count: 2} for its List node, and {length: 6, +// / null_count: 0} for its Int16 node, as separate FieldNode structs type FieldNode struct { _tab flatbuffers.Struct } @@ -54,28 +54,28 @@ func (rcv *FieldNode) Table() flatbuffers.Table { return rcv._tab.Table } -/// The number of value slots in the Arrow array at this level of a nested -/// tree +// / The number of value slots in the Arrow array at this level of a nested +// / tree func (rcv *FieldNode) Length() int64 { return rcv._tab.GetInt64(rcv._tab.Pos + flatbuffers.UOffsetT(0)) } -/// The number of value slots in the Arrow array at this level of a nested -/// tree +// / The number of value slots in the Arrow array at this level of a nested +// / tree func (rcv *FieldNode) MutateLength(n int64) bool { return rcv._tab.MutateInt64(rcv._tab.Pos+flatbuffers.UOffsetT(0), n) } -/// The number of observed nulls. Fields with null_count == 0 may choose not -/// to write their physical validity bitmap out as a materialized buffer, -/// instead setting the length of the bitmap buffer to 0. +// / The number of observed nulls. Fields with null_count == 0 may choose not +// / to write their physical validity bitmap out as a materialized buffer, +// / instead setting the length of the bitmap buffer to 0. func (rcv *FieldNode) NullCount() int64 { return rcv._tab.GetInt64(rcv._tab.Pos + flatbuffers.UOffsetT(8)) } -/// The number of observed nulls. Fields with null_count == 0 may choose not -/// to write their physical validity bitmap out as a materialized buffer, -/// instead setting the length of the bitmap buffer to 0. +// / The number of observed nulls. Fields with null_count == 0 may choose not +// / to write their physical validity bitmap out as a materialized buffer, +// / instead setting the length of the bitmap buffer to 0. func (rcv *FieldNode) MutateNullCount(n int64) bool { return rcv._tab.MutateInt64(rcv._tab.Pos+flatbuffers.UOffsetT(8), n) } @@ -89,9 +89,9 @@ func CreateFieldNode( return builder.Offset() } -/// A data header describing the shared memory layout of a "record" or "row" -/// batch. Some systems call this a "row batch" internally and others a "record -/// batch". +// / A data header describing the shared memory layout of a "record" or "row" +// / batch. Some systems call this a "row batch" internally and others a "record +// / batch". type RecordBatch struct { _tab flatbuffers.Table } @@ -112,8 +112,8 @@ func (rcv *RecordBatch) Table() flatbuffers.Table { return rcv._tab } -/// number of records / rows. The arrays in the batch should all have this -/// length +// / number of records / rows. The arrays in the batch should all have this +// / length func (rcv *RecordBatch) Length() int64 { o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) if o != 0 { @@ -122,13 +122,13 @@ func (rcv *RecordBatch) Length() int64 { return 0 } -/// number of records / rows. The arrays in the batch should all have this -/// length +// / number of records / rows. The arrays in the batch should all have this +// / length func (rcv *RecordBatch) MutateLength(n int64) bool { return rcv._tab.MutateInt64Slot(4, n) } -/// Nodes correspond to the pre-ordered flattened logical schema +// / Nodes correspond to the pre-ordered flattened logical schema func (rcv *RecordBatch) Nodes(obj *FieldNode, j int) bool { o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) if o != 0 { @@ -148,13 +148,13 @@ func (rcv *RecordBatch) NodesLength() int { return 0 } -/// Nodes correspond to the pre-ordered flattened logical schema -/// Buffers correspond to the pre-ordered flattened buffer tree -/// -/// The number of buffers appended to this list depends on the schema. For -/// example, most primitive arrays will have 2 buffers, 1 for the validity -/// bitmap and 1 for the values. For struct arrays, there will only be a -/// single buffer for the validity (nulls) bitmap +// / Nodes correspond to the pre-ordered flattened logical schema +// / Buffers correspond to the pre-ordered flattened buffer tree +// / +// / The number of buffers appended to this list depends on the schema. For +// / example, most primitive arrays will have 2 buffers, 1 for the validity +// / bitmap and 1 for the values. For struct arrays, there will only be a +// / single buffer for the validity (nulls) bitmap func (rcv *RecordBatch) Buffers(obj *Buffer, j int) bool { o := flatbuffers.UOffsetT(rcv._tab.Offset(8)) if o != 0 { @@ -174,12 +174,12 @@ func (rcv *RecordBatch) BuffersLength() int { return 0 } -/// Buffers correspond to the pre-ordered flattened buffer tree -/// -/// The number of buffers appended to this list depends on the schema. For -/// example, most primitive arrays will have 2 buffers, 1 for the validity -/// bitmap and 1 for the values. For struct arrays, there will only be a -/// single buffer for the validity (nulls) bitmap +// / Buffers correspond to the pre-ordered flattened buffer tree +// / +// / The number of buffers appended to this list depends on the schema. For +// / example, most primitive arrays will have 2 buffers, 1 for the validity +// / bitmap and 1 for the values. For struct arrays, there will only be a +// / single buffer for the validity (nulls) bitmap func RecordBatchStart(builder *flatbuffers.Builder) { builder.StartObject(3) } @@ -204,12 +204,12 @@ func RecordBatchEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { return builder.EndObject() } -/// For sending dictionary encoding information. Any Field can be -/// dictionary-encoded, but in this case none of its children may be -/// dictionary-encoded. -/// There is one vector / column per dictionary, but that vector / column -/// may be spread across multiple dictionary batches by using the isDelta -/// flag +// / For sending dictionary encoding information. Any Field can be +// / dictionary-encoded, but in this case none of its children may be +// / dictionary-encoded. +// / There is one vector / column per dictionary, but that vector / column +// / may be spread across multiple dictionary batches by using the isDelta +// / flag type DictionaryBatch struct { _tab flatbuffers.Table } @@ -255,8 +255,8 @@ func (rcv *DictionaryBatch) Data(obj *RecordBatch) *RecordBatch { return nil } -/// If isDelta is true the values in the dictionary are to be appended to a -/// dictionary with the indicated id +// / If isDelta is true the values in the dictionary are to be appended to a +// / dictionary with the indicated id func (rcv *DictionaryBatch) IsDelta() byte { o := flatbuffers.UOffsetT(rcv._tab.Offset(8)) if o != 0 { @@ -265,8 +265,8 @@ func (rcv *DictionaryBatch) IsDelta() byte { return 0 } -/// If isDelta is true the values in the dictionary are to be appended to a -/// dictionary with the indicated id +// / If isDelta is true the values in the dictionary are to be appended to a +// / dictionary with the indicated id func (rcv *DictionaryBatch) MutateIsDelta(n byte) bool { return rcv._tab.MutateByteSlot(8, n) } diff --git a/pkg/col/colserde/arrowserde/schema_generated.go b/pkg/col/colserde/arrowserde/schema_generated.go index 45d41631a256..9f494d0d925a 100644 --- a/pkg/col/colserde/arrowserde/schema_generated.go +++ b/pkg/col/colserde/arrowserde/schema_generated.go @@ -90,9 +90,9 @@ var EnumNamesIntervalUnit = map[IntervalUnit]string{ IntervalUnitDAY_TIME: "DAY_TIME", } -/// ---------------------------------------------------------------------- -/// Top-level Type value, enabling extensible type-specific metadata. We can -/// add new logical types to Type without breaking backwards compatibility +// / ---------------------------------------------------------------------- +// / Top-level Type value, enabling extensible type-specific metadata. We can +// / add new logical types to Type without breaking backwards compatibility type Type = byte const ( @@ -137,8 +137,8 @@ var EnumNamesType = map[Type]string{ TypeMap: "Map", } -/// ---------------------------------------------------------------------- -/// Endianness of the platform producing the data +// / ---------------------------------------------------------------------- +// / Endianness of the platform producing the data type Endianness = int16 const ( @@ -151,7 +151,7 @@ var EnumNamesEndianness = map[Endianness]string{ EndiannessBig: "Big", } -/// These are stored in the flatbuffer in the Type union below +// / These are stored in the flatbuffer in the Type union below type Null struct { _tab flatbuffers.Table } @@ -179,9 +179,9 @@ func NullEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { return builder.EndObject() } -/// A Struct_ in the flatbuffer metadata is the same as an Arrow Struct -/// (according to the physical memory layout). We used Struct_ here as -/// Struct is a reserved word in Flatbuffers +// / A Struct_ in the flatbuffer metadata is the same as an Arrow Struct +// / (according to the physical memory layout). We used Struct_ here as +// / Struct is a reserved word in Flatbuffers type Struct_ struct { _tab flatbuffers.Table } @@ -256,7 +256,7 @@ func (rcv *FixedSizeList) Table() flatbuffers.Table { return rcv._tab } -/// Number of list items per value +// / Number of list items per value func (rcv *FixedSizeList) ListSize() int32 { o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) if o != 0 { @@ -265,7 +265,7 @@ func (rcv *FixedSizeList) ListSize() int32 { return 0 } -/// Number of list items per value +// / Number of list items per value func (rcv *FixedSizeList) MutateListSize(n int32) bool { return rcv._tab.MutateInt32Slot(4, n) } @@ -280,30 +280,30 @@ func FixedSizeListEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { return builder.EndObject() } -/// A Map is a logical nested type that is represented as -/// -/// List> -/// -/// In this layout, the keys and values are each respectively contiguous. We do -/// not constrain the key and value types, so the application is responsible -/// for ensuring that the keys are hashable and unique. Whether the keys are sorted -/// may be set in the metadata for this field -/// -/// In a Field with Map type, the Field has a child Struct field, which then -/// has two children: key type and the second the value type. The names of the -/// child fields may be respectively "entry", "key", and "value", but this is -/// not enforced -/// -/// Map -/// - child[0] entry: Struct -/// - child[0] key: K -/// - child[1] value: V -/// -/// Neither the "entry" field nor the "key" field may be nullable. -/// -/// The metadata is structured so that Arrow systems without special handling -/// for Map can make Map an alias for List. The "layout" attribute for the Map -/// field must have the same contents as a List. +// / A Map is a logical nested type that is represented as +// / +// / List> +// / +// / In this layout, the keys and values are each respectively contiguous. We do +// / not constrain the key and value types, so the application is responsible +// / for ensuring that the keys are hashable and unique. Whether the keys are sorted +// / may be set in the metadata for this field +// / +// / In a Field with Map type, the Field has a child Struct field, which then +// / has two children: key type and the second the value type. The names of the +// / child fields may be respectively "entry", "key", and "value", but this is +// / not enforced +// / +// / Map +// / - child[0] entry: Struct +// / - child[0] key: K +// / - child[1] value: V +// / +// / Neither the "entry" field nor the "key" field may be nullable. +// / +// / The metadata is structured so that Arrow systems without special handling +// / for Map can make Map an alias for List. The "layout" attribute for the Map +// / field must have the same contents as a List. type Map struct { _tab flatbuffers.Table } @@ -324,7 +324,7 @@ func (rcv *Map) Table() flatbuffers.Table { return rcv._tab } -/// Set to true if the keys within each value are sorted +// / Set to true if the keys within each value are sorted func (rcv *Map) KeysSorted() byte { o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) if o != 0 { @@ -333,7 +333,7 @@ func (rcv *Map) KeysSorted() byte { return 0 } -/// Set to true if the keys within each value are sorted +// / Set to true if the keys within each value are sorted func (rcv *Map) MutateKeysSorted(n byte) bool { return rcv._tab.MutateByteSlot(4, n) } @@ -348,10 +348,10 @@ func MapEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { return builder.EndObject() } -/// A union is a complex type with children in Field -/// By default ids in the type vector refer to the offsets in the children -/// optionally typeIds provides an indirection between the child offset and the type id -/// for each child typeIds[offset] is the id used in the type vector +// / A union is a complex type with children in Field +// / By default ids in the type vector refer to the offsets in the children +// / optionally typeIds provides an indirection between the child offset and the type id +// / for each child typeIds[offset] is the id used in the type vector type Union struct { _tab flatbuffers.Table } @@ -516,7 +516,7 @@ func FloatingPointEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { return builder.EndObject() } -/// Unicode with UTF-8 encoding +// / Unicode with UTF-8 encoding type Utf8 struct { _tab flatbuffers.Table } @@ -591,7 +591,7 @@ func (rcv *FixedSizeBinary) Table() flatbuffers.Table { return rcv._tab } -/// Number of bytes per value +// / Number of bytes per value func (rcv *FixedSizeBinary) ByteWidth() int32 { o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) if o != 0 { @@ -600,7 +600,7 @@ func (rcv *FixedSizeBinary) ByteWidth() int32 { return 0 } -/// Number of bytes per value +// / Number of bytes per value func (rcv *FixedSizeBinary) MutateByteWidth(n int32) bool { return rcv._tab.MutateInt32Slot(4, n) } @@ -662,7 +662,7 @@ func (rcv *Decimal) Table() flatbuffers.Table { return rcv._tab } -/// Total number of decimal digits +// / Total number of decimal digits func (rcv *Decimal) Precision() int32 { o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) if o != 0 { @@ -671,12 +671,12 @@ func (rcv *Decimal) Precision() int32 { return 0 } -/// Total number of decimal digits +// / Total number of decimal digits func (rcv *Decimal) MutatePrecision(n int32) bool { return rcv._tab.MutateInt32Slot(4, n) } -/// Number of digits after the decimal point "." +// / Number of digits after the decimal point "." func (rcv *Decimal) Scale() int32 { o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) if o != 0 { @@ -685,7 +685,7 @@ func (rcv *Decimal) Scale() int32 { return 0 } -/// Number of digits after the decimal point "." +// / Number of digits after the decimal point "." func (rcv *Decimal) MutateScale(n int32) bool { return rcv._tab.MutateInt32Slot(6, n) } @@ -703,12 +703,12 @@ func DecimalEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { return builder.EndObject() } -/// Date is either a 32-bit or 64-bit type representing elapsed time since UNIX -/// epoch (1970-01-01), stored in either of two units: -/// -/// * Milliseconds (64 bits) indicating UNIX time elapsed since the epoch (no -/// leap seconds), where the values are evenly divisible by 86400000 -/// * Days (32 bits) since the UNIX epoch +// / Date is either a 32-bit or 64-bit type representing elapsed time since UNIX +// / epoch (1970-01-01), stored in either of two units: +// / +// / * Milliseconds (64 bits) indicating UNIX time elapsed since the epoch (no +// / leap seconds), where the values are evenly divisible by 86400000 +// / * Days (32 bits) since the UNIX epoch type Date struct { _tab flatbuffers.Table } @@ -751,9 +751,9 @@ func DateEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { return builder.EndObject() } -/// Time type. The physical storage type depends on the unit -/// - SECOND and MILLISECOND: 32 bits -/// - MICROSECOND and NANOSECOND: 64 bits +// / Time type. The physical storage type depends on the unit +// / - SECOND and MILLISECOND: 32 bits +// / - MICROSECOND and NANOSECOND: 64 bits type Time struct { _tab flatbuffers.Table } @@ -811,12 +811,12 @@ func TimeEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { return builder.EndObject() } -/// Time elapsed from the Unix epoch, 00:00:00.000 on 1 January 1970, excluding -/// leap seconds, as a 64-bit integer. Note that UNIX time does not include -/// leap seconds. -/// -/// The Timestamp metadata supports both "time zone naive" and "time zone -/// aware" timestamps. Read about the timezone attribute for more detail +// / Time elapsed from the Unix epoch, 00:00:00.000 on 1 January 1970, excluding +// / leap seconds, as a 64-bit integer. Note that UNIX time does not include +// / leap seconds. +// / +// / The Timestamp metadata supports both "time zone naive" and "time zone +// / aware" timestamps. Read about the timezone attribute for more detail type Timestamp struct { _tab flatbuffers.Table } @@ -849,26 +849,26 @@ func (rcv *Timestamp) MutateUnit(n int16) bool { return rcv._tab.MutateInt16Slot(4, n) } -/// The time zone is a string indicating the name of a time zone, one of: -/// -/// * As used in the Olson time zone database (the "tz database" or -/// "tzdata"), such as "America/New_York" -/// * An absolute time zone offset of the form +XX:XX or -XX:XX, such as +07:30 -/// -/// Whether a timezone string is present indicates different semantics about -/// the data: -/// -/// * If the time zone is null or equal to an empty string, the data is "time -/// zone naive" and shall be displayed *as is* to the user, not localized -/// to the locale of the user. This data can be though of as UTC but -/// without having "UTC" as the time zone, it is not considered to be -/// localized to any time zone -/// -/// * If the time zone is set to a valid value, values can be displayed as -/// "localized" to that time zone, even though the underlying 64-bit -/// integers are identical to the same data stored in UTC. Converting -/// between time zones is a metadata-only operation and does not change the -/// underlying values +// / The time zone is a string indicating the name of a time zone, one of: +// / +// / * As used in the Olson time zone database (the "tz database" or +// / "tzdata"), such as "America/New_York" +// / * An absolute time zone offset of the form +XX:XX or -XX:XX, such as +07:30 +// / +// / Whether a timezone string is present indicates different semantics about +// / the data: +// / +// / * If the time zone is null or equal to an empty string, the data is "time +// / zone naive" and shall be displayed *as is* to the user, not localized +// / to the locale of the user. This data can be though of as UTC but +// / without having "UTC" as the time zone, it is not considered to be +// / localized to any time zone +// / +// / * If the time zone is set to a valid value, values can be displayed as +// / "localized" to that time zone, even though the underlying 64-bit +// / integers are identical to the same data stored in UTC. Converting +// / between time zones is a metadata-only operation and does not change the +// / underlying values func (rcv *Timestamp) Timezone() []byte { o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) if o != 0 { @@ -877,26 +877,26 @@ func (rcv *Timestamp) Timezone() []byte { return nil } -/// The time zone is a string indicating the name of a time zone, one of: -/// -/// * As used in the Olson time zone database (the "tz database" or -/// "tzdata"), such as "America/New_York" -/// * An absolute time zone offset of the form +XX:XX or -XX:XX, such as +07:30 -/// -/// Whether a timezone string is present indicates different semantics about -/// the data: -/// -/// * If the time zone is null or equal to an empty string, the data is "time -/// zone naive" and shall be displayed *as is* to the user, not localized -/// to the locale of the user. This data can be though of as UTC but -/// without having "UTC" as the time zone, it is not considered to be -/// localized to any time zone -/// -/// * If the time zone is set to a valid value, values can be displayed as -/// "localized" to that time zone, even though the underlying 64-bit -/// integers are identical to the same data stored in UTC. Converting -/// between time zones is a metadata-only operation and does not change the -/// underlying values +// / The time zone is a string indicating the name of a time zone, one of: +// / +// / * As used in the Olson time zone database (the "tz database" or +// / "tzdata"), such as "America/New_York" +// / * An absolute time zone offset of the form +XX:XX or -XX:XX, such as +07:30 +// / +// / Whether a timezone string is present indicates different semantics about +// / the data: +// / +// / * If the time zone is null or equal to an empty string, the data is "time +// / zone naive" and shall be displayed *as is* to the user, not localized +// / to the locale of the user. This data can be though of as UTC but +// / without having "UTC" as the time zone, it is not considered to be +// / localized to any time zone +// / +// / * If the time zone is set to a valid value, values can be displayed as +// / "localized" to that time zone, even though the underlying 64-bit +// / integers are identical to the same data stored in UTC. Converting +// / between time zones is a metadata-only operation and does not change the +// / underlying values func TimestampStart(builder *flatbuffers.Builder) { builder.StartObject(2) } @@ -952,9 +952,9 @@ func IntervalEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { return builder.EndObject() } -/// ---------------------------------------------------------------------- -/// user defined key value pairs to add custom metadata to arrow -/// key namespacing is the responsibility of the user +// / ---------------------------------------------------------------------- +// / user defined key value pairs to add custom metadata to arrow +// / key namespacing is the responsibility of the user type KeyValue struct { _tab flatbuffers.Table } @@ -1004,8 +1004,8 @@ func KeyValueEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { return builder.EndObject() } -/// ---------------------------------------------------------------------- -/// Dictionary encoding metadata +// / ---------------------------------------------------------------------- +// / Dictionary encoding metadata type DictionaryEncoding struct { _tab flatbuffers.Table } @@ -1026,9 +1026,9 @@ func (rcv *DictionaryEncoding) Table() flatbuffers.Table { return rcv._tab } -/// The known dictionary id in the application where this data is used. In -/// the file or streaming formats, the dictionary ids are found in the -/// DictionaryBatch messages +// / The known dictionary id in the application where this data is used. In +// / the file or streaming formats, the dictionary ids are found in the +// / DictionaryBatch messages func (rcv *DictionaryEncoding) Id() int64 { o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) if o != 0 { @@ -1037,15 +1037,15 @@ func (rcv *DictionaryEncoding) Id() int64 { return 0 } -/// The known dictionary id in the application where this data is used. In -/// the file or streaming formats, the dictionary ids are found in the -/// DictionaryBatch messages +// / The known dictionary id in the application where this data is used. In +// / the file or streaming formats, the dictionary ids are found in the +// / DictionaryBatch messages func (rcv *DictionaryEncoding) MutateId(n int64) bool { return rcv._tab.MutateInt64Slot(4, n) } -/// The dictionary indices are constrained to be positive integers. If this -/// field is null, the indices must be signed int32 +// / The dictionary indices are constrained to be positive integers. If this +// / field is null, the indices must be signed int32 func (rcv *DictionaryEncoding) IndexType(obj *Int) *Int { o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) if o != 0 { @@ -1059,12 +1059,12 @@ func (rcv *DictionaryEncoding) IndexType(obj *Int) *Int { return nil } -/// The dictionary indices are constrained to be positive integers. If this -/// field is null, the indices must be signed int32 -/// By default, dictionaries are not ordered, or the order does not have -/// semantic meaning. In some statistical, applications, dictionary-encoding -/// is used to represent ordered categorical data, and we provide a way to -/// preserve that metadata here +// / The dictionary indices are constrained to be positive integers. If this +// / field is null, the indices must be signed int32 +// / By default, dictionaries are not ordered, or the order does not have +// / semantic meaning. In some statistical, applications, dictionary-encoding +// / is used to represent ordered categorical data, and we provide a way to +// / preserve that metadata here func (rcv *DictionaryEncoding) IsOrdered() byte { o := flatbuffers.UOffsetT(rcv._tab.Offset(8)) if o != 0 { @@ -1073,10 +1073,10 @@ func (rcv *DictionaryEncoding) IsOrdered() byte { return 0 } -/// By default, dictionaries are not ordered, or the order does not have -/// semantic meaning. In some statistical, applications, dictionary-encoding -/// is used to represent ordered categorical data, and we provide a way to -/// preserve that metadata here +// / By default, dictionaries are not ordered, or the order does not have +// / semantic meaning. In some statistical, applications, dictionary-encoding +// / is used to represent ordered categorical data, and we provide a way to +// / preserve that metadata here func (rcv *DictionaryEncoding) MutateIsOrdered(n byte) bool { return rcv._tab.MutateByteSlot(8, n) } @@ -1097,13 +1097,13 @@ func DictionaryEncodingEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { return builder.EndObject() } -/// ---------------------------------------------------------------------- -/// A field represents a named column in a record / row batch or child of a -/// nested type. -/// -/// - children is only for nested Arrow arrays -/// - For primitive types, children will have length 0 -/// - nullable should default to true in general +// / ---------------------------------------------------------------------- +// / A field represents a named column in a record / row batch or child of a +// / nested type. +// / +// / - children is only for nested Arrow arrays +// / - For primitive types, children will have length 0 +// / - nullable should default to true in general type Field struct { _tab flatbuffers.Table } @@ -1254,8 +1254,8 @@ func FieldEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { return builder.EndObject() } -/// ---------------------------------------------------------------------- -/// A Buffer represents a single contiguous memory segment +// / ---------------------------------------------------------------------- +// / A Buffer represents a single contiguous memory segment type Buffer struct { _tab flatbuffers.Struct } @@ -1269,26 +1269,26 @@ func (rcv *Buffer) Table() flatbuffers.Table { return rcv._tab.Table } -/// The relative offset into the shared memory page where the bytes for this -/// buffer starts +// / The relative offset into the shared memory page where the bytes for this +// / buffer starts func (rcv *Buffer) Offset() int64 { return rcv._tab.GetInt64(rcv._tab.Pos + flatbuffers.UOffsetT(0)) } -/// The relative offset into the shared memory page where the bytes for this -/// buffer starts +// / The relative offset into the shared memory page where the bytes for this +// / buffer starts func (rcv *Buffer) MutateOffset(n int64) bool { return rcv._tab.MutateInt64(rcv._tab.Pos+flatbuffers.UOffsetT(0), n) } -/// The absolute length (in bytes) of the memory buffer. The memory is found -/// from offset (inclusive) to offset + length (non-inclusive). +// / The absolute length (in bytes) of the memory buffer. The memory is found +// / from offset (inclusive) to offset + length (non-inclusive). func (rcv *Buffer) Length() int64 { return rcv._tab.GetInt64(rcv._tab.Pos + flatbuffers.UOffsetT(8)) } -/// The absolute length (in bytes) of the memory buffer. The memory is found -/// from offset (inclusive) to offset + length (non-inclusive). +// / The absolute length (in bytes) of the memory buffer. The memory is found +// / from offset (inclusive) to offset + length (non-inclusive). func (rcv *Buffer) MutateLength(n int64) bool { return rcv._tab.MutateInt64(rcv._tab.Pos+flatbuffers.UOffsetT(8), n) } @@ -1300,8 +1300,8 @@ func CreateBuffer(builder *flatbuffers.Builder, offset int64, length int64) flat return builder.Offset() } -/// ---------------------------------------------------------------------- -/// A Schema describes the columns in a row batch +// / ---------------------------------------------------------------------- +// / A Schema describes the columns in a row batch type Schema struct { _tab flatbuffers.Table } @@ -1322,9 +1322,9 @@ func (rcv *Schema) Table() flatbuffers.Table { return rcv._tab } -/// endianness of the buffer -/// it is Little Endian by default -/// if endianness doesn't match the underlying system then the vectors need to be converted +// / endianness of the buffer +// / it is Little Endian by default +// / if endianness doesn't match the underlying system then the vectors need to be converted func (rcv *Schema) Endianness() int16 { o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) if o != 0 { @@ -1333,9 +1333,9 @@ func (rcv *Schema) Endianness() int16 { return 0 } -/// endianness of the buffer -/// it is Little Endian by default -/// if endianness doesn't match the underlying system then the vectors need to be converted +// / endianness of the buffer +// / it is Little Endian by default +// / if endianness doesn't match the underlying system then the vectors need to be converted func (rcv *Schema) MutateEndianness(n int16) bool { return rcv._tab.MutateInt16Slot(4, n) } diff --git a/pkg/col/colserde/arrowserde/tensor_generated.go b/pkg/col/colserde/arrowserde/tensor_generated.go index 0d30ab7dd410..432da3eafa0b 100644 --- a/pkg/col/colserde/arrowserde/tensor_generated.go +++ b/pkg/col/colserde/arrowserde/tensor_generated.go @@ -18,9 +18,9 @@ var EnumNamesSparseTensorIndex = map[SparseTensorIndex]string{ SparseTensorIndexSparseMatrixIndexCSR: "SparseMatrixIndexCSR", } -/// ---------------------------------------------------------------------- -/// Data structures for dense tensors -/// Shape data for a single axis in a tensor +// / ---------------------------------------------------------------------- +// / Data structures for dense tensors +// / Shape data for a single axis in a tensor type TensorDim struct { _tab flatbuffers.Table } @@ -41,7 +41,7 @@ func (rcv *TensorDim) Table() flatbuffers.Table { return rcv._tab } -/// Length of dimension +// / Length of dimension func (rcv *TensorDim) Size() int64 { o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) if o != 0 { @@ -50,12 +50,12 @@ func (rcv *TensorDim) Size() int64 { return 0 } -/// Length of dimension +// / Length of dimension func (rcv *TensorDim) MutateSize(n int64) bool { return rcv._tab.MutateInt64Slot(4, n) } -/// Name of the dimension, optional +// / Name of the dimension, optional func (rcv *TensorDim) Name() []byte { o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) if o != 0 { @@ -64,7 +64,7 @@ func (rcv *TensorDim) Name() []byte { return nil } -/// Name of the dimension, optional +// / Name of the dimension, optional func TensorDimStart(builder *flatbuffers.Builder) { builder.StartObject(2) } @@ -110,8 +110,8 @@ func (rcv *Tensor) MutateTypeType(n byte) bool { return rcv._tab.MutateByteSlot(4, n) } -/// The type of data contained in a value cell. Currently only fixed-width -/// value types are supported, no strings or nested types +// / The type of data contained in a value cell. Currently only fixed-width +// / value types are supported, no strings or nested types func (rcv *Tensor) Type(obj *flatbuffers.Table) bool { o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) if o != 0 { @@ -121,9 +121,9 @@ func (rcv *Tensor) Type(obj *flatbuffers.Table) bool { return false } -/// The type of data contained in a value cell. Currently only fixed-width -/// value types are supported, no strings or nested types -/// The dimensions of the tensor, optionally named +// / The type of data contained in a value cell. Currently only fixed-width +// / value types are supported, no strings or nested types +// / The dimensions of the tensor, optionally named func (rcv *Tensor) Shape(obj *TensorDim, j int) bool { o := flatbuffers.UOffsetT(rcv._tab.Offset(8)) if o != 0 { @@ -144,8 +144,8 @@ func (rcv *Tensor) ShapeLength() int { return 0 } -/// The dimensions of the tensor, optionally named -/// Non-negative byte offsets to advance one value cell along each dimension +// / The dimensions of the tensor, optionally named +// / Non-negative byte offsets to advance one value cell along each dimension func (rcv *Tensor) Strides(j int) int64 { o := flatbuffers.UOffsetT(rcv._tab.Offset(10)) if o != 0 { @@ -163,8 +163,8 @@ func (rcv *Tensor) StridesLength() int { return 0 } -/// Non-negative byte offsets to advance one value cell along each dimension -/// The location and size of the tensor's data +// / Non-negative byte offsets to advance one value cell along each dimension +// / The location and size of the tensor's data func (rcv *Tensor) Data(obj *Buffer) *Buffer { o := flatbuffers.UOffsetT(rcv._tab.Offset(12)) if o != 0 { @@ -178,7 +178,7 @@ func (rcv *Tensor) Data(obj *Buffer) *Buffer { return nil } -/// The location and size of the tensor's data +// / The location and size of the tensor's data func TensorStart(builder *flatbuffers.Builder) { builder.StartObject(5) } @@ -207,9 +207,9 @@ func TensorEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { return builder.EndObject() } -/// ---------------------------------------------------------------------- -/// EXPERIMENTAL: Data structures for sparse tensors -/// Coordinate format of sparse tensor index. +// / ---------------------------------------------------------------------- +// / EXPERIMENTAL: Data structures for sparse tensors +// / Coordinate format of sparse tensor index. type SparseTensorIndexCOO struct { _tab flatbuffers.Table } @@ -230,29 +230,29 @@ func (rcv *SparseTensorIndexCOO) Table() flatbuffers.Table { return rcv._tab } -/// COO's index list are represented as a NxM matrix, -/// where N is the number of non-zero values, -/// and M is the number of dimensions of a sparse tensor. -/// indicesBuffer stores the location and size of this index matrix. -/// The type of index value is long, so the stride for the index matrix is unnecessary. -/// -/// For example, let X be a 2x3x4x5 tensor, and it has the following 6 non-zero values: -/// -/// X[0, 1, 2, 0] := 1 -/// X[1, 1, 2, 3] := 2 -/// X[0, 2, 1, 0] := 3 -/// X[0, 1, 3, 0] := 4 -/// X[0, 1, 2, 1] := 5 -/// X[1, 2, 0, 4] := 6 -/// -/// In COO format, the index matrix of X is the following 4x6 matrix: -/// -/// [[0, 0, 0, 0, 1, 1], -/// [1, 1, 1, 2, 1, 2], -/// [2, 2, 3, 1, 2, 0], -/// [0, 1, 0, 0, 3, 4]] -/// -/// Note that the indices are sorted in lexicographical order. +// / COO's index list are represented as a NxM matrix, +// / where N is the number of non-zero values, +// / and M is the number of dimensions of a sparse tensor. +// / indicesBuffer stores the location and size of this index matrix. +// / The type of index value is long, so the stride for the index matrix is unnecessary. +// / +// / For example, let X be a 2x3x4x5 tensor, and it has the following 6 non-zero values: +// / +// / X[0, 1, 2, 0] := 1 +// / X[1, 1, 2, 3] := 2 +// / X[0, 2, 1, 0] := 3 +// / X[0, 1, 3, 0] := 4 +// / X[0, 1, 2, 1] := 5 +// / X[1, 2, 0, 4] := 6 +// / +// / In COO format, the index matrix of X is the following 4x6 matrix: +// / +// / [[0, 0, 0, 0, 1, 1], +// / [1, 1, 1, 2, 1, 2], +// / [2, 2, 3, 1, 2, 0], +// / [0, 1, 0, 0, 3, 4]] +// / +// / Note that the indices are sorted in lexicographical order. func (rcv *SparseTensorIndexCOO) IndicesBuffer(obj *Buffer) *Buffer { o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) if o != 0 { @@ -266,29 +266,29 @@ func (rcv *SparseTensorIndexCOO) IndicesBuffer(obj *Buffer) *Buffer { return nil } -/// COO's index list are represented as a NxM matrix, -/// where N is the number of non-zero values, -/// and M is the number of dimensions of a sparse tensor. -/// indicesBuffer stores the location and size of this index matrix. -/// The type of index value is long, so the stride for the index matrix is unnecessary. -/// -/// For example, let X be a 2x3x4x5 tensor, and it has the following 6 non-zero values: -/// -/// X[0, 1, 2, 0] := 1 -/// X[1, 1, 2, 3] := 2 -/// X[0, 2, 1, 0] := 3 -/// X[0, 1, 3, 0] := 4 -/// X[0, 1, 2, 1] := 5 -/// X[1, 2, 0, 4] := 6 -/// -/// In COO format, the index matrix of X is the following 4x6 matrix: -/// -/// [[0, 0, 0, 0, 1, 1], -/// [1, 1, 1, 2, 1, 2], -/// [2, 2, 3, 1, 2, 0], -/// [0, 1, 0, 0, 3, 4]] -/// -/// Note that the indices are sorted in lexicographical order. +// / COO's index list are represented as a NxM matrix, +// / where N is the number of non-zero values, +// / and M is the number of dimensions of a sparse tensor. +// / indicesBuffer stores the location and size of this index matrix. +// / The type of index value is long, so the stride for the index matrix is unnecessary. +// / +// / For example, let X be a 2x3x4x5 tensor, and it has the following 6 non-zero values: +// / +// / X[0, 1, 2, 0] := 1 +// / X[1, 1, 2, 3] := 2 +// / X[0, 2, 1, 0] := 3 +// / X[0, 1, 3, 0] := 4 +// / X[0, 1, 2, 1] := 5 +// / X[1, 2, 0, 4] := 6 +// / +// / In COO format, the index matrix of X is the following 4x6 matrix: +// / +// / [[0, 0, 0, 0, 1, 1], +// / [1, 1, 1, 2, 1, 2], +// / [2, 2, 3, 1, 2, 0], +// / [0, 1, 0, 0, 3, 4]] +// / +// / Note that the indices are sorted in lexicographical order. func SparseTensorIndexCOOStart(builder *flatbuffers.Builder) { builder.StartObject(1) } @@ -301,7 +301,7 @@ func SparseTensorIndexCOOEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT return builder.EndObject() } -/// Compressed Sparse Row format, that is matrix-specific. +// / Compressed Sparse Row format, that is matrix-specific. type SparseMatrixIndexCSR struct { _tab flatbuffers.Table } @@ -322,28 +322,28 @@ func (rcv *SparseMatrixIndexCSR) Table() flatbuffers.Table { return rcv._tab } -/// indptrBuffer stores the location and size of indptr array that -/// represents the range of the rows. -/// The i-th row spans from indptr[i] to indptr[i+1] in the data. -/// The length of this array is 1 + (the number of rows), and the type -/// of index value is long. -/// -/// For example, let X be the following 6x4 matrix: -/// -/// X := [[0, 1, 2, 0], -/// [0, 0, 3, 0], -/// [0, 4, 0, 5], -/// [0, 0, 0, 0], -/// [6, 0, 7, 8], -/// [0, 9, 0, 0]]. -/// -/// The array of non-zero values in X is: -/// -/// values(X) = [1, 2, 3, 4, 5, 6, 7, 8, 9]. -/// -/// And the indptr of X is: -/// -/// indptr(X) = [0, 2, 3, 5, 5, 8, 10]. +// / indptrBuffer stores the location and size of indptr array that +// / represents the range of the rows. +// / The i-th row spans from indptr[i] to indptr[i+1] in the data. +// / The length of this array is 1 + (the number of rows), and the type +// / of index value is long. +// / +// / For example, let X be the following 6x4 matrix: +// / +// / X := [[0, 1, 2, 0], +// / [0, 0, 3, 0], +// / [0, 4, 0, 5], +// / [0, 0, 0, 0], +// / [6, 0, 7, 8], +// / [0, 9, 0, 0]]. +// / +// / The array of non-zero values in X is: +// / +// / values(X) = [1, 2, 3, 4, 5, 6, 7, 8, 9]. +// / +// / And the indptr of X is: +// / +// / indptr(X) = [0, 2, 3, 5, 5, 8, 10]. func (rcv *SparseMatrixIndexCSR) IndptrBuffer(obj *Buffer) *Buffer { o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) if o != 0 { @@ -357,35 +357,35 @@ func (rcv *SparseMatrixIndexCSR) IndptrBuffer(obj *Buffer) *Buffer { return nil } -/// indptrBuffer stores the location and size of indptr array that -/// represents the range of the rows. -/// The i-th row spans from indptr[i] to indptr[i+1] in the data. -/// The length of this array is 1 + (the number of rows), and the type -/// of index value is long. -/// -/// For example, let X be the following 6x4 matrix: -/// -/// X := [[0, 1, 2, 0], -/// [0, 0, 3, 0], -/// [0, 4, 0, 5], -/// [0, 0, 0, 0], -/// [6, 0, 7, 8], -/// [0, 9, 0, 0]]. -/// -/// The array of non-zero values in X is: -/// -/// values(X) = [1, 2, 3, 4, 5, 6, 7, 8, 9]. -/// -/// And the indptr of X is: -/// -/// indptr(X) = [0, 2, 3, 5, 5, 8, 10]. -/// indicesBuffer stores the location and size of the array that -/// contains the column indices of the corresponding non-zero values. -/// The type of index value is long. -/// -/// For example, the indices of the above X is: -/// -/// indices(X) = [1, 2, 2, 1, 3, 0, 2, 3, 1]. +// / indptrBuffer stores the location and size of indptr array that +// / represents the range of the rows. +// / The i-th row spans from indptr[i] to indptr[i+1] in the data. +// / The length of this array is 1 + (the number of rows), and the type +// / of index value is long. +// / +// / For example, let X be the following 6x4 matrix: +// / +// / X := [[0, 1, 2, 0], +// / [0, 0, 3, 0], +// / [0, 4, 0, 5], +// / [0, 0, 0, 0], +// / [6, 0, 7, 8], +// / [0, 9, 0, 0]]. +// / +// / The array of non-zero values in X is: +// / +// / values(X) = [1, 2, 3, 4, 5, 6, 7, 8, 9]. +// / +// / And the indptr of X is: +// / +// / indptr(X) = [0, 2, 3, 5, 5, 8, 10]. +// / indicesBuffer stores the location and size of the array that +// / contains the column indices of the corresponding non-zero values. +// / The type of index value is long. +// / +// / For example, the indices of the above X is: +// / +// / indices(X) = [1, 2, 2, 1, 3, 0, 2, 3, 1]. func (rcv *SparseMatrixIndexCSR) IndicesBuffer(obj *Buffer) *Buffer { o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) if o != 0 { @@ -399,13 +399,13 @@ func (rcv *SparseMatrixIndexCSR) IndicesBuffer(obj *Buffer) *Buffer { return nil } -/// indicesBuffer stores the location and size of the array that -/// contains the column indices of the corresponding non-zero values. -/// The type of index value is long. -/// -/// For example, the indices of the above X is: -/// -/// indices(X) = [1, 2, 2, 1, 3, 0, 2, 3, 1]. +// / indicesBuffer stores the location and size of the array that +// / contains the column indices of the corresponding non-zero values. +// / The type of index value is long. +// / +// / For example, the indices of the above X is: +// / +// / indices(X) = [1, 2, 2, 1, 3, 0, 2, 3, 1]. func SparseMatrixIndexCSRStart(builder *flatbuffers.Builder) { builder.StartObject(2) } @@ -455,9 +455,9 @@ func (rcv *SparseTensor) MutateTypeType(n byte) bool { return rcv._tab.MutateByteSlot(4, n) } -/// The type of data contained in a value cell. -/// Currently only fixed-width value types are supported, -/// no strings or nested types. +// / The type of data contained in a value cell. +// / Currently only fixed-width value types are supported, +// / no strings or nested types. func (rcv *SparseTensor) Type(obj *flatbuffers.Table) bool { o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) if o != 0 { @@ -467,10 +467,10 @@ func (rcv *SparseTensor) Type(obj *flatbuffers.Table) bool { return false } -/// The type of data contained in a value cell. -/// Currently only fixed-width value types are supported, -/// no strings or nested types. -/// The dimensions of the tensor, optionally named. +// / The type of data contained in a value cell. +// / Currently only fixed-width value types are supported, +// / no strings or nested types. +// / The dimensions of the tensor, optionally named. func (rcv *SparseTensor) Shape(obj *TensorDim, j int) bool { o := flatbuffers.UOffsetT(rcv._tab.Offset(8)) if o != 0 { @@ -491,8 +491,8 @@ func (rcv *SparseTensor) ShapeLength() int { return 0 } -/// The dimensions of the tensor, optionally named. -/// The number of non-zero values in a sparse tensor. +// / The dimensions of the tensor, optionally named. +// / The number of non-zero values in a sparse tensor. func (rcv *SparseTensor) NonZeroLength() int64 { o := flatbuffers.UOffsetT(rcv._tab.Offset(10)) if o != 0 { @@ -501,7 +501,7 @@ func (rcv *SparseTensor) NonZeroLength() int64 { return 0 } -/// The number of non-zero values in a sparse tensor. +// / The number of non-zero values in a sparse tensor. func (rcv *SparseTensor) MutateNonZeroLength(n int64) bool { return rcv._tab.MutateInt64Slot(10, n) } @@ -518,7 +518,7 @@ func (rcv *SparseTensor) MutateSparseIndexType(n byte) bool { return rcv._tab.MutateByteSlot(12, n) } -/// Sparse tensor index +// / Sparse tensor index func (rcv *SparseTensor) SparseIndex(obj *flatbuffers.Table) bool { o := flatbuffers.UOffsetT(rcv._tab.Offset(14)) if o != 0 { @@ -528,8 +528,8 @@ func (rcv *SparseTensor) SparseIndex(obj *flatbuffers.Table) bool { return false } -/// Sparse tensor index -/// The location and size of the tensor's data +// / Sparse tensor index +// / The location and size of the tensor's data func (rcv *SparseTensor) Data(obj *Buffer) *Buffer { o := flatbuffers.UOffsetT(rcv._tab.Offset(16)) if o != 0 { @@ -543,7 +543,7 @@ func (rcv *SparseTensor) Data(obj *Buffer) *Buffer { return nil } -/// The location and size of the tensor's data +// / The location and size of the tensor's data func SparseTensorStart(builder *flatbuffers.Builder) { builder.StartObject(7) } diff --git a/pkg/compose/BUILD.bazel b/pkg/compose/BUILD.bazel index 6c855fbb105f..c8aa0a79eb39 100644 --- a/pkg/compose/BUILD.bazel +++ b/pkg/compose/BUILD.bazel @@ -11,6 +11,7 @@ go_library( go_test( name = "compose_test", srcs = ["compose_test.go"], + args = ["-test.timeout=295s"], data = [ "//c-deps:libgeos", "//pkg/compose:compare/docker-compose.yml", diff --git a/pkg/compose/compare/compare/BUILD.bazel b/pkg/compose/compare/compare/BUILD.bazel index 3d5ca362ec30..c26cf35aea18 100644 --- a/pkg/compose/compare/compare/BUILD.bazel +++ b/pkg/compose/compare/compare/BUILD.bazel @@ -11,6 +11,7 @@ go_library( go_test( name = "compare_test", srcs = ["compare_test.go"], + args = ["-test.timeout=295s"], embed = [":compare"], gotags = ["compose"], tags = ["integration"], diff --git a/pkg/config/zonepb/zone_yaml.go b/pkg/config/zonepb/zone_yaml.go index 1aba07ec0f9c..236b09beaa5d 100644 --- a/pkg/config/zonepb/zone_yaml.go +++ b/pkg/config/zonepb/zone_yaml.go @@ -78,11 +78,11 @@ var _ yaml.Unmarshaler = &ConstraintsList{} // // We use two different formats here, dependent on whether per-replica // constraints are being used in ConstraintsList: -// 1. A legacy format when there are 0 or 1 Constraints and NumReplicas is -// zero: -// [c1, c2, c3] -// 2. A per-replica format when NumReplicas is non-zero: -// {"c1,c2,c3": numReplicas1, "c4,c5": numReplicas2} +// 1. A legacy format when there are 0 or 1 Constraints and NumReplicas is +// zero: +// [c1, c2, c3] +// 2. A per-replica format when NumReplicas is non-zero: +// {"c1,c2,c3": numReplicas1, "c4,c5": numReplicas2} func (c ConstraintsList) MarshalYAML() (interface{}, error) { // If per-replica Constraints aren't in use, marshal everything into a list // for compatibility with pre-2.0-style configs. diff --git a/pkg/featureflag/feature_flags.go b/pkg/featureflag/feature_flags.go index 420c41287b15..5a77543150e5 100644 --- a/pkg/featureflag/feature_flags.go +++ b/pkg/featureflag/feature_flags.go @@ -72,7 +72,7 @@ func CheckEnabled( } // metaFeatureDenialMetric is a metric counting the statements denied by a -//feature flag. +// feature flag. var metaFeatureDenialMetric = metric.Metadata{ Name: "sql.feature_flag_denial", Help: "Counter of the number of statements denied by a feature flag", diff --git a/pkg/gen/docs.bzl b/pkg/gen/docs.bzl index 26d3f0de15c4..6308262b57a3 100644 --- a/pkg/gen/docs.bzl +++ b/pkg/gen/docs.bzl @@ -285,7 +285,6 @@ DOCS_SRCS = [ "//docs/generated/sql:functions.md", "//docs/generated/sql:operators.md", "//docs/generated/sql:window_functions.md", - "//docs/generated/swagger:spec.json", "//docs/generated:eventlog.md", "//docs/generated:logformats.md", "//docs/generated:logging.md", diff --git a/pkg/gen/genbzl/main.go b/pkg/gen/genbzl/main.go index b1aad473febc..f427ea8f46bc 100644 --- a/pkg/gen/genbzl/main.go +++ b/pkg/gen/genbzl/main.go @@ -76,8 +76,7 @@ func generate(outDir string) error { // The targets should be thought of as the following expression, constructed // additively in code. // -// build/...:* + //docs/...:* + //pkg/...:* - //pkg//ui/...:* - //pkg/gen/...:* -// +// build/...:* + //docs/...:* + //pkg/...:* - //pkg//ui/...:* - //pkg/gen/...:* func getQueryData() (*queryData, error) { dirs := []string{"build", "docs"} ents, err := os.ReadDir("pkg") diff --git a/pkg/geo/geo.go b/pkg/geo/geo.go index c59dff50abd9..6f250786debd 100644 --- a/pkg/geo/geo.go +++ b/pkg/geo/geo.go @@ -701,7 +701,8 @@ func AdjustGeomTSRID(t geom.T, srid geopb.SRID) { // IsLinearRingCCW returns whether a given linear ring is counter clock wise. // See 2.07 of http://www.faqs.org/faqs/graphics/algorithms-faq/. // "Find the lowest vertex (or, if there is more than one vertex with the same lowest coordinate, -// the rightmost of those vertices) and then take the cross product of the edges fore and aft of it." +// +// the rightmost of those vertices) and then take the cross product of the edges fore and aft of it." func IsLinearRingCCW(linearRing *geom.LinearRing) bool { smallestIdx := 0 smallest := linearRing.Coord(0) diff --git a/pkg/geo/geogfn/best_projection.go b/pkg/geo/geogfn/best_projection.go index 07f29e285d00..1ff7c34857c8 100644 --- a/pkg/geo/geogfn/best_projection.go +++ b/pkg/geo/geogfn/best_projection.go @@ -25,11 +25,12 @@ import ( // geometry-type projection. // // The algorithm is described by ST_Buffer/ST_Intersection documentation (paraphrased): -// It first determines the best SRID that fits the bounding box of the 2 geography objects (ST_Intersection only). -// It favors a north/south pole projection, then UTM, then LAEA for smaller zones, otherwise falling back -// to web mercator. -// If geography objects are within one half zone UTM but not the same UTM it will pick one of those. -// After the calculation is complete, it will fall back to WGS84 Geography. +// +// It first determines the best SRID that fits the bounding box of the 2 geography objects (ST_Intersection only). +// It favors a north/south pole projection, then UTM, then LAEA for smaller zones, otherwise falling back +// to web mercator. +// If geography objects are within one half zone UTM but not the same UTM it will pick one of those. +// After the calculation is complete, it will fall back to WGS84 Geography. func BestGeomProjection(boundingRect s2.Rect) (geoprojbase.Proj4Text, error) { center := boundingRect.Center() diff --git a/pkg/geo/geogfn/covers.go b/pkg/geo/geogfn/covers.go index b71fc0d8495f..d8c9953cc59f 100644 --- a/pkg/geo/geogfn/covers.go +++ b/pkg/geo/geogfn/covers.go @@ -25,19 +25,20 @@ import ( // precision for Covers will be for up to 1cm. // // Current limitations (which are also limitations in PostGIS): -// * POLYGON/LINESTRING only works as "contains" - if any point of the LINESTRING -// touches the boundary of the polygon, we will return false but should be true - e.g. +// +// - POLYGON/LINESTRING only works as "contains" - if any point of the LINESTRING +// touches the boundary of the polygon, we will return false but should be true - e.g. // SELECT st_covers( -// 'multipolygon(((0.0 0.0, 1.0 0.0, 1.0 1.0, 0.0 1.0, 0.0 0.0)), ((1.0 0.0, 2.0 0.0, 2.0 1.0, 1.0 1.0, 1.0 0.0)))', -// 'linestring(0.0 0.0, 1.0 0.0)'::geography +// 'multipolygon(((0.0 0.0, 1.0 0.0, 1.0 1.0, 0.0 1.0, 0.0 0.0)), ((1.0 0.0, 2.0 0.0, 2.0 1.0, 1.0 1.0, 1.0 0.0)))', +// 'linestring(0.0 0.0, 1.0 0.0)'::geography // ); // -// * Furthermore, LINESTRINGS that are covered in multiple POLYGONs inside -// MULTIPOLYGON but NOT within a single POLYGON in the MULTIPOLYGON -// currently return false but should be true, e.g. +// - Furthermore, LINESTRINGS that are covered in multiple POLYGONs inside +// MULTIPOLYGON but NOT within a single POLYGON in the MULTIPOLYGON +// currently return false but should be true, e.g. // SELECT st_covers( -// 'multipolygon(((0.0 0.0, 1.0 0.0, 1.0 1.0, 0.0 1.0, 0.0 0.0)), ((1.0 0.0, 2.0 0.0, 2.0 1.0, 1.0 1.0, 1.0 0.0)))', -// 'linestring(0.0 0.0, 2.0 0.0)'::geography +// 'multipolygon(((0.0 0.0, 1.0 0.0, 1.0 1.0, 0.0 1.0, 0.0 0.0)), ((1.0 0.0, 2.0 0.0, 2.0 1.0, 1.0 1.0, 1.0 0.0)))', +// 'linestring(0.0 0.0, 2.0 0.0)'::geography // ); func Covers(a geo.Geography, b geo.Geography) (bool, error) { if a.SRID() != b.SRID() { diff --git a/pkg/geo/geogfn/distance.go b/pkg/geo/geogfn/distance.go index 3b8019ec1cba..607217d0bfc1 100644 --- a/pkg/geo/geogfn/distance.go +++ b/pkg/geo/geogfn/distance.go @@ -187,9 +187,9 @@ func (c *s2GeodistEdgeCrosser) ChainCrossing(p geodist.Point) (bool, geodist.Poi // PostGIS evaluates the distance between spheroid regions by computing the min of // the pair-wise distance between the cross-product of the regions in A and the regions // in B, where the pair-wise distance is computed as: -// * Find the two closest points between the pairs of regions using the sphere -// for distance calculations. -// * Compute the spheroid distance between the two closest points. +// - Find the two closest points between the pairs of regions using the sphere +// for distance calculations. +// - Compute the spheroid distance between the two closest points. // // This is technically incorrect, since it is possible that the two closest points on // the spheroid are different than the two closest points on the sphere. diff --git a/pkg/geo/geoindex/geoindex.go b/pkg/geo/geoindex/geoindex.go index eb61ab4edea8..53e34515ffba 100644 --- a/pkg/geo/geoindex/geoindex.go +++ b/pkg/geo/geoindex/geoindex.go @@ -386,17 +386,17 @@ func (rc simpleCovererImpl) covering(regions []s2.Region) s2.CellUnion { // cells below c). For example, consider a portion of the cell quad-tree // below: // -// c0 -// | -// c3 -// | -// +---+---+ -// | | -// c13 c15 -// | | -// c53 +--+--+ -// | | -// c61 c64 +// c0 +// | +// c3 +// | +// +---+---+ +// | | +// c13 c15 +// | | +// c53 +--+--+ +// | | +// c61 c64 // // Shape s could have a regular covering c15, c53, where c15 has 4 child cells // c61..c64, and shape s only intersects wit c61, c64. A different shape x @@ -584,39 +584,43 @@ func coveredBy(_ context.Context, rc *s2.RegionCoverer, r []s2.Region) RPKeyExpr // The quad-trees stored in presentCells together represent a set expression. // This expression specifies: -// - the path for each leaf to the root of that quad-tree. The index entries -// on each such path represent the shapes that cover that leaf. Hence these -// index entries for a single path need to be unioned to give the shapes -// that cover the leaf. -// - The full expression specifies the shapes that cover all the leaves, so -// the union expressions for the paths must be intersected with each other. +// - the path for each leaf to the root of that quad-tree. The index entries +// on each such path represent the shapes that cover that leaf. Hence these +// index entries for a single path need to be unioned to give the shapes +// that cover the leaf. +// - The full expression specifies the shapes that cover all the leaves, so +// the union expressions for the paths must be intersected with each other. // // Reusing an example from earlier in this file, say the quad-tree is: -// c0 -// | -// c3 -// | -// +---+---+ -// | | -// c13 c15 -// | | -// c53 +--+--+ -// | | -// c61 c64 +// +// c0 +// | +// c3 +// | +// +---+---+ +// | | +// c13 c15 +// | | +// c53 +--+--+ +// | | +// c61 c64 // // This tree represents the following expression (where I(c) are the index // entries stored at cell c): -// (I(c64) \union I(c15) \union I(c3) \union I(c0)) \intersection -// (I(c61) \union I(c15) \union I(c3) \union I(c0)) \intersection -// (I(c53) \union I(c13) \union I(c3) \union I(c0)) +// +// (I(c64) \union I(c15) \union I(c3) \union I(c0)) \intersection +// (I(c61) \union I(c15) \union I(c3) \union I(c0)) \intersection +// (I(c53) \union I(c13) \union I(c3) \union I(c0)) +// // In this example all the union sub-expressions have the same number of terms // but that does not need to be true. // // The above expression can be factored to eliminate repetition of the // same cell. The factored expression for this example is: -// I(c0) \union I(c3) \union -// ((I(c13) \union I(c53)) \intersection -// (I(c15) \union (I(c61) \intersection I(c64))) +// +// I(c0) \union I(c3) \union +// ((I(c13) \union I(c53)) \intersection +// (I(c15) \union (I(c61) \intersection I(c64))) // // This function generates this factored expression represented in reverse // polish notation. diff --git a/pkg/geo/geomfn/affine_transforms.go b/pkg/geo/geomfn/affine_transforms.go index 3247f522e3d0..a75d71950ab8 100644 --- a/pkg/geo/geomfn/affine_transforms.go +++ b/pkg/geo/geomfn/affine_transforms.go @@ -23,16 +23,21 @@ import ( // AffineMatrix defines an affine transformation matrix for a geom object. // It is expected to be of the form: -// a b c x_off -// d e f y_off -// g h i z_off -// 0 0 0 1 +// +// a b c x_off +// d e f y_off +// g h i z_off +// 0 0 0 1 +// // Which gets applies onto a coordinate of form: -// (x y z 0)^T +// +// (x y z 0)^T +// // With the following transformation: -// x' = a*x + b*y + c*z + x_off -// y' = d*x + e*y + f*z + y_off -// z' = g*x + h*y + i*z + z_off +// +// x' = a*x + b*y + c*z + x_off +// y' = d*x + e*y + f*z + y_off +// z' = g*x + h*y + i*z + z_off type AffineMatrix [][]float64 // Affine applies a 3D affine transformation onto the given geometry. diff --git a/pkg/geo/geomfn/de9im.go b/pkg/geo/geomfn/de9im.go index 745073ed48f6..b6d296492a86 100644 --- a/pkg/geo/geomfn/de9im.go +++ b/pkg/geo/geomfn/de9im.go @@ -68,12 +68,12 @@ func MatchesDE9IM(relation string, pattern string) (bool, error) { // relationByteMatchesPatternByte matches a single byte of a DE-9IM relation // against the DE-9IM pattern. // Pattern matches are as follows: -// * '*': allow anything. -// * '0' / '1' / '2': match exactly. -// * 't'/'T': allow only if the relation is true. This means the relation must be -// '0' (point), '1' (line) or '2' (area) - which is the dimensionality of the -// intersection. -// * 'f'/'F': allow only if relation is also false, which is of the form 'f'/'F'. +// - '*': allow anything. +// - '0' / '1' / '2': match exactly. +// - 't'/'T': allow only if the relation is true. This means the relation must be +// '0' (point), '1' (line) or '2' (area) - which is the dimensionality of the +// intersection. +// - 'f'/'F': allow only if relation is also false, which is of the form 'f'/'F'. func relationByteMatchesPatternByte(r byte, p byte) (bool, error) { switch util.ToLowerSingleByte(p) { case '*': diff --git a/pkg/gossip/gossip.go b/pkg/gossip/gossip.go index 17e023783b9f..dd27f1d47e89 100644 --- a/pkg/gossip/gossip.go +++ b/pkg/gossip/gossip.go @@ -288,11 +288,14 @@ type Gossip struct { // expected to already contain the node ID. // // grpcServer: The server on which the new Gossip instance will register its RPC -// service. Can be nil, in which case the Gossip will not register the -// service. +// +// service. Can be nil, in which case the Gossip will not register the +// service. +// // rpcContext: The context used to connect to other nodes. Can be nil for tests -// that also specify a nil grpcServer and that plan on using the Gossip in a -// restricted way by populating it with data manually. +// +// that also specify a nil grpcServer and that plan on using the Gossip in a +// restricted way by populating it with data manually. func New( ambient log.AmbientContext, clusterID *base.ClusterIDContainer, @@ -348,11 +351,14 @@ func New( // ClusterIDContainer and NodeIDContainer internally. Used for testing. // // grpcServer: The server on which the new Gossip instance will register its RPC -// service. Can be nil, in which case the Gossip will not register the -// service. +// +// service. Can be nil, in which case the Gossip will not register the +// service. +// // rpcContext: The context used to connect to other nodes. Can be nil for tests -// that also specify a nil grpcServer and that plan on using the Gossip in a -// restricted way by populating it with data manually. +// +// that also specify a nil grpcServer and that plan on using the Gossip in a +// restricted way by populating it with data manually. func NewTest( nodeID roachpb.NodeID, rpcContext *rpc.Context, diff --git a/pkg/gossip/gossip_test.go b/pkg/gossip/gossip_test.go index a96a731d6340..51734f5458ce 100644 --- a/pkg/gossip/gossip_test.go +++ b/pkg/gossip/gossip_test.go @@ -877,7 +877,9 @@ func TestGossipPropagation(t *testing.T) { // n1: decommissioned // n2: gossip node-liveness:1 // n3: node-liveness range lease acquired (does not gossip node-liveness:1 -// record because it is unchanged) +// +// record because it is unchanged) +// // n2: restarted // - connects as gossip client to n3 // - sends a batch of gossip records to n3 diff --git a/pkg/internal/sqlsmith/sqlsmith_test.go b/pkg/internal/sqlsmith/sqlsmith_test.go index a8e2670d1ef2..45df6ddb06b3 100644 --- a/pkg/internal/sqlsmith/sqlsmith_test.go +++ b/pkg/internal/sqlsmith/sqlsmith_test.go @@ -74,9 +74,9 @@ func TestSetups(t *testing.T) { // // If this test fails, there is likely a bug in: // -// 1. sqlsmith that makes valid INSERTs impossible or very unlikely -// 2. Or rand-tables that makes it impossible or very unlikely to ever -// generate a successful INSERT +// 1. sqlsmith that makes valid INSERTs impossible or very unlikely +// 2. Or rand-tables that makes it impossible or very unlikely to ever +// generate a successful INSERT // // Note that there is a small but non-zero chance that this test produces a // false-negative. diff --git a/pkg/internal/sqlsmith/tlp.go b/pkg/internal/sqlsmith/tlp.go index 9a71af44fea2..66971241ca58 100644 --- a/pkg/internal/sqlsmith/tlp.go +++ b/pkg/internal/sqlsmith/tlp.go @@ -21,16 +21,16 @@ import ( // CombinedTLP returns a single SQL query that compares the results of the two // TLP queries: // -// WITH unpart AS MATERIALIZED ( -// -// ), part AS MATERIALIZED ( -// -// ), undiff AS ( -// TABLE unpart EXCEPT ALL TABLE part -// ), diff AS ( -// TABLE part EXCEPT ALL TABLE unpart -// ) -// SELECT (SELECT count(*) FROM undiff), (SELECT count(*) FROM diff) +// WITH unpart AS MATERIALIZED ( +// +// ), part AS MATERIALIZED ( +// +// ), undiff AS ( +// TABLE unpart EXCEPT ALL TABLE part +// ), diff AS ( +// TABLE part EXCEPT ALL TABLE unpart +// ) +// SELECT (SELECT count(*) FROM undiff), (SELECT count(*) FROM diff) // // This combined query can be used to check TLP equality with SQL comparison, // which will sometimes differ from TLP equality checked with string comparison. @@ -82,15 +82,15 @@ func (s *Smither) GenerateTLP() (unpartitioned, partitioned string, args []inter // // The first query returned is an unpartitioned query of the form: // -// SELECT *, p, NOT (p), (p) IS NULL, true, false, false FROM table +// SELECT *, p, NOT (p), (p) IS NULL, true, false, false FROM table // // The second query returned is a partitioned query of the form: // -// SELECT *, p, NOT (p), (p) IS NULL, p, NOT (p), (p) IS NULL FROM table WHERE (p) -// UNION ALL -// SELECT *, p, NOT (p), (p) IS NULL, NOT(p), p, (p) IS NULL FROM table WHERE NOT (p) -// UNION ALL -// SELECT *, p, NOT (p), (p) IS NULL, (p) IS NULL, (p) IS NOT NULL, (NOT(p)) IS NOT NULL FROM table WHERE (p) IS NULL +// SELECT *, p, NOT (p), (p) IS NULL, p, NOT (p), (p) IS NULL FROM table WHERE (p) +// UNION ALL +// SELECT *, p, NOT (p), (p) IS NULL, NOT(p), p, (p) IS NULL FROM table WHERE NOT (p) +// UNION ALL +// SELECT *, p, NOT (p), (p) IS NULL, (p) IS NULL, (p) IS NOT NULL, (NOT(p)) IS NOT NULL FROM table WHERE (p) IS NULL // // The last 3 boolean columns serve as a correctness check. The unpartitioned // query projects true, false, false at the end so that the partitioned queries @@ -253,15 +253,15 @@ func (s *Smither) generateOuterJoinTLP() (unpartitioned, partitioned string) { // // The first query returned is an unpartitioned query of the form: // -// SELECT * FROM table1 JOIN table2 ON TRUE +// SELECT * FROM table1 JOIN table2 ON TRUE // // The second query returned is a partitioned query of the form: // -// SELECT * FROM table1 JOIN table2 ON (p) -// UNION ALL -// SELECT * FROM table1 JOIN table2 ON NOT (p) -// UNION ALL -// SELECT * FROM table1 JOIN table2 ON (p) IS NULL +// SELECT * FROM table1 JOIN table2 ON (p) +// UNION ALL +// SELECT * FROM table1 JOIN table2 ON NOT (p) +// UNION ALL +// SELECT * FROM table1 JOIN table2 ON (p) IS NULL // // From the first query, we have a CROSS JOIN of the two tables (JOIN ON TRUE). // Recall our TLP logical guarantee that a given predicate p always evaluates to @@ -326,23 +326,23 @@ func (s *Smither) generateInnerJoinTLP() (unpartitioned, partitioned string) { // // The first query returned is an unpartitioned query of the form: // -// SELECT MAX(first) FROM (SELECT * FROM table) table(first) +// SELECT MAX(first) FROM (SELECT * FROM table) table(first) // // The second query returned is a partitioned query of the form: // -// SELECT MAX(agg) FROM ( -// SELECT MAX(first) AS agg FROM ( -// SELECT * FROM table WHERE p -// ) table(first) -// UNION ALL -// SELECT MAX(first) AS agg FROM ( -// SELECT * FROM table WHERE NOT (p) -// ) table(first) -// UNION ALL -// SELECT MAX(first) AS agg FROM ( -// SELECT * FROM table WHERE (p) IS NULL -// ) table(first) -// ) +// SELECT MAX(agg) FROM ( +// SELECT MAX(first) AS agg FROM ( +// SELECT * FROM table WHERE p +// ) table(first) +// UNION ALL +// SELECT MAX(first) AS agg FROM ( +// SELECT * FROM table WHERE NOT (p) +// ) table(first) +// UNION ALL +// SELECT MAX(first) AS agg FROM ( +// SELECT * FROM table WHERE (p) IS NULL +// ) table(first) +// ) // // Note that all instances of MAX can be replaced with MIN to get the // corresponding MIN version of the queries. For the COUNT version, we @@ -410,13 +410,13 @@ func (s *Smither) generateAggregationTLP() (unpartitioned, partitioned string) { // // The first query returned is an unpartitioned query of the form: // -// SELECT DISTINCT {cols...} FROM table +// SELECT DISTINCT {cols...} FROM table // // The second query returned is a partitioned query of the form: // -// SELECT DISTINCT {cols...} FROM table WHERE (p) UNION -// SELECT DISTINCT {cols...} FROM table WHERE NOT (p) UNION -// SELECT DISTINCT {cols...} FROM table WHERE (p) IS NULL +// SELECT DISTINCT {cols...} FROM table WHERE (p) UNION +// SELECT DISTINCT {cols...} FROM table WHERE NOT (p) UNION +// SELECT DISTINCT {cols...} FROM table WHERE (p) IS NULL // // If the resulting values of the two queries are not equal, there is a logical // bug. diff --git a/pkg/jobs/config.go b/pkg/jobs/config.go index 6055d58b749b..3bfbc7c439d2 100644 --- a/pkg/jobs/config.go +++ b/pkg/jobs/config.go @@ -184,18 +184,18 @@ func jitter(dur time.Duration) time.Duration { // using lastRun, which is updated in onExecute(). // // Common usage pattern: -// lc, cleanup := makeLoopController(...) -// defer cleanup() -// for { -// select { -// case <- lc.update: -// lc.onUpdate() or lc.onUpdateWithBound() -// case <- lc.timer.C: -// executeJob() -// lc.onExecute() or lc.onExecuteWithBound -// } -// } // +// lc, cleanup := makeLoopController(...) +// defer cleanup() +// for { +// select { +// case <- lc.update: +// lc.onUpdate() or lc.onUpdateWithBound() +// case <- lc.timer.C: +// executeJob() +// lc.onExecute() or lc.onExecuteWithBound +// } +// } type loopController struct { timer *timeutil.Timer lastRun time.Time diff --git a/pkg/jobs/registry.go b/pkg/jobs/registry.go index 0ed2d228a005..af47616eea7c 100644 --- a/pkg/jobs/registry.go +++ b/pkg/jobs/registry.go @@ -1071,7 +1071,6 @@ func (r *Registry) Unpause(ctx context.Context, txn *kv.Txn, id jobspb.JobID) er // canceled. // // Resumers are created through registered Constructor functions. -// type Resumer interface { // Resume is called when a job is started or resumed. execCtx is a sql.JobExecCtx. Resume(ctx context.Context, execCtx interface{}) error @@ -1093,8 +1092,8 @@ type RegisterOption func(opts *registerOptions) // Storage I/O costs (i.e. from reads/writes) from tenant accounting, based on // this principle: // -// Jobs that are not triggered by user actions should be exempted from cost -// control. +// Jobs that are not triggered by user actions should be exempted from cost +// control. // // For example, SQL stats compaction, span reconciler, and long-running // migration jobs are not triggered by user actions, and so should be exempted. diff --git a/pkg/jobs/update.go b/pkg/jobs/update.go index 28122d16129c..575e5cae4e7b 100644 --- a/pkg/jobs/update.go +++ b/pkg/jobs/update.go @@ -124,14 +124,14 @@ func UpdateHighwaterProgressed(highWater hlc.Timestamp, md JobMetadata, ju *JobU // // Sample usage: // -// err := j.Update(ctx, func(_ *client.Txn, md jobs.JobMetadata, ju *jobs.JobUpdater) error { -// if md.Status != StatusRunning { -// return errors.New("job no longer running") -// } -// md.UpdateStatus(StatusPaused) -// // -// md.UpdatePayload(md.Payload) -// } +// err := j.Update(ctx, func(_ *client.Txn, md jobs.JobMetadata, ju *jobs.JobUpdater) error { +// if md.Status != StatusRunning { +// return errors.New("job no longer running") +// } +// md.UpdateStatus(StatusPaused) +// // +// md.UpdatePayload(md.Payload) +// } // // Note that there are various convenience wrappers (like FractionProgressed) // defined in jobs.go. diff --git a/pkg/keys/doc.go b/pkg/keys/doc.go index 6a24c5a14954..8f96a1d007c1 100644 --- a/pkg/keys/doc.go +++ b/pkg/keys/doc.go @@ -21,42 +21,41 @@ // // This is the ten-thousand foot view of the keyspace: // -// +------------------+ -// | (empty) | /Min -// | \x01... | /Local ---------------------+ -// | | | -// | ... | | local keys -// | | | -// | | ---------------------+ -// | | ---------------------+ -// | \x02... | /Meta1 ----+ | -// | \x03... | /Meta2 | | -// | \x04... | /System | | -// | | | system keys | -// | ... | | | -// | | ----+ | -// | \x89... | /Table/1 ----+ | -// | \x8a... | /Table/2 | | -// | | | system tenant | -// | ... | | | global keys -// | | ----+ | -// | \xfe\x8a\x89... | /Tenant/2/Table/1 ----+ | -// | \xfe\x8a\x8a... | /Tenant/2/Table/2 | | -// | | | tenant 2 | -// | ... | | | -// | | ----+ | -// | \xfe... | /Tenant/... ----+ | -// | \xfe... | | | -// | | | tenant ... | -// | ... | | | -// | | ----+ | -// | \xff\xff | /Max ---------------------+ -// +------------------+ +// +------------------+ +// | (empty) | /Min +// | \x01... | /Local ---------------------+ +// | | | +// | ... | | local keys +// | | | +// | | ---------------------+ +// | | ---------------------+ +// | \x02... | /Meta1 ----+ | +// | \x03... | /Meta2 | | +// | \x04... | /System | | +// | | | system keys | +// | ... | | | +// | | ----+ | +// | \x89... | /Table/1 ----+ | +// | \x8a... | /Table/2 | | +// | | | system tenant | +// | ... | | | global keys +// | | ----+ | +// | \xfe\x8a\x89... | /Tenant/2/Table/1 ----+ | +// | \xfe\x8a\x8a... | /Tenant/2/Table/2 | | +// | | | tenant 2 | +// | ... | | | +// | | ----+ | +// | \xfe... | /Tenant/... ----+ | +// | \xfe... | | | +// | | | tenant ... | +// | ... | | | +// | | ----+ | +// | \xff\xff | /Max ---------------------+ +// +------------------+ // // When keys are pretty printed, the logical name to the right of the table is // shown instead of the raw byte sequence. // -// // 1. Key Ranges // // The keyspace is divided into contiguous, non-overlapping chunks called @@ -67,20 +66,19 @@ // exist over the "resolved" keyspace, refer to the "Key Addressing" section // below for more details. // -// // 2. Local vs. Global Keys // // There are broadly two types of keys, "local" and "global": // -// (i) Local keys, such as store- and range-specific metadata, are keys that -// must be physically collocated with the store and/or ranges they refer to but -// also logically separated so that they do not pollute the user key space. -// This is further elaborated on in the "Key Addressing" section below. Local -// data also includes data "local" to a node, such as the store metadata and -// the raft log, which is where the name originated. +// (i) Local keys, such as store- and range-specific metadata, are keys that +// must be physically collocated with the store and/or ranges they refer to but +// also logically separated so that they do not pollute the user key space. +// This is further elaborated on in the "Key Addressing" section below. Local +// data also includes data "local" to a node, such as the store metadata and +// the raft log, which is where the name originated. // -// (ii) Non-local keys (for e.g. meta1, meta2, system, and SQL keys) are -// collectively referred to as "global" keys. +// (ii) Non-local keys (for e.g. meta1, meta2, system, and SQL keys) are +// collectively referred to as "global" keys. // // NB: The empty key (/Min) is a special case. No data is stored there, but it // is used as the start key of the first range descriptor and as the starting @@ -89,7 +87,6 @@ // (Check `keymap` below for a more precise breakdown of the local and global // keyspace.) // -// // 2. Key Addressing // // We also have this concept of the "address" for a key. Keys get "resolved" @@ -114,7 +111,6 @@ // collocation. By being able to logically sort the range descriptor key next to // the range itself, we're able to collocate the two. // -// // 3. (replicated) Range-ID local keys vs. Range local keys // // Deciding between replicated range-ID local keys and range local keys is not diff --git a/pkg/keys/printer.go b/pkg/keys/printer.go index 9fe24df37799..a4782cba27da 100644 --- a/pkg/keys/printer.go +++ b/pkg/keys/printer.go @@ -714,9 +714,13 @@ func init() { // PrettyPrintRange pretty prints a compact representation of a key range. The // output is of the form: -// commonPrefix{remainingStart-remainingEnd} +// +// commonPrefix{remainingStart-remainingEnd} +// // If the end key is empty, the output is of the form: -// start +// +// start +// // It prints at most maxChars, truncating components as needed. See // TestPrettyPrintRange for some examples. func PrettyPrintRange(start, end roachpb.Key, maxChars int) string { diff --git a/pkg/kv/batch.go b/pkg/kv/batch.go index 97167d687f91..06c21ca2a1d5 100644 --- a/pkg/kv/batch.go +++ b/pkg/kv/batch.go @@ -367,8 +367,8 @@ func (b *Batch) get(key interface{}, forUpdate bool) { // Get retrieves the value for a key. A new result will be appended to the batch // which will contain a single row. // -// r, err := db.Get("a") -// // string(r.Rows[0].Key) == "a" +// r, err := db.Get("a") +// // string(r.Rows[0].Key) == "a" // // key can be either a byte slice or a string. func (b *Batch) Get(key interface{}) { @@ -379,8 +379,8 @@ func (b *Batch) Get(key interface{}) { // is acquired on the key, if it exists. A new result will be appended to the // batch which will contain a single row. // -// r, err := db.GetForUpdate("a") -// // string(r.Rows[0].Key) == "a" +// r, err := db.GetForUpdate("a") +// // string(r.Rows[0].Key) == "a" // // key can be either a byte slice or a string. func (b *Batch) GetForUpdate(key interface{}) { diff --git a/pkg/kv/client_test.go b/pkg/kv/client_test.go index 8ca5f859a2cb..59335438b583 100644 --- a/pkg/kv/client_test.go +++ b/pkg/kv/client_test.go @@ -8,7 +8,9 @@ // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. -/* Package client_test tests clients against a fully-instantiated +/* + Package client_test tests clients against a fully-instantiated + cockroach cluster (a single node, but bootstrapped, gossiped, etc.). */ package kv_test @@ -48,7 +50,8 @@ var testUser = username.TestUser // checkKVs verifies that a KeyValue slice contains the expected keys and // values. The values can be either integers or strings; the expected results // are passed as alternating keys and values, e.g: -// checkScanResult(t, result, key1, val1, key2, val2) +// +// checkScanResult(t, result, key1, val1, key2, val2) func checkKVs(t *testing.T, kvs []kv.KeyValue, expected ...interface{}) { t.Helper() expLen := len(expected) / 2 diff --git a/pkg/kv/db.go b/pkg/kv/db.go index e1ebbd3083e8..c45d71fa99bd 100644 --- a/pkg/kv/db.go +++ b/pkg/kv/db.go @@ -317,8 +317,8 @@ func NewDBWithContext( // Get retrieves the value for a key, returning the retrieved key/value or an // error. It is not considered an error for the key not to exist. // -// r, err := db.Get("a") -// // string(r.Key) == "a" +// r, err := db.Get("a") +// // string(r.Key) == "a" // // key can be either a byte slice or a string. func (db *DB) Get(ctx context.Context, key interface{}) (KeyValue, error) { @@ -331,8 +331,8 @@ func (db *DB) Get(ctx context.Context, key interface{}) (KeyValue, error) { // or an error. An unreplicated, exclusive lock is acquired on the key, if it // exists. It is not considered an error for the key not to exist. // -// r, err := db.GetForUpdate("a") -// // string(r.Key) == "a" +// r, err := db.GetForUpdate("a") +// // string(r.Key) == "a" // // key can be either a byte slice or a string. func (db *DB) GetForUpdate(ctx context.Context, key interface{}) (KeyValue, error) { @@ -888,13 +888,14 @@ func (db *DB) NewTxn(ctx context.Context, debugName string) *Txn { // use TxnWithAdmissionControl. // // For example: -// err := db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { -// if kv, err := txn.Get(ctx, key); err != nil { -// return err -// } -// // ... -// return nil -// }) +// +// err := db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { +// if kv, err := txn.Get(ctx, key); err != nil { +// return err +// } +// // ... +// return nil +// }) // // Note that once the transaction encounters a retryable error, the txn object // is marked as poisoned and all future ops fail fast until the retry. The diff --git a/pkg/kv/doc.go b/pkg/kv/doc.go index 146b6f899011..bc60ee57ae79 100644 --- a/pkg/kv/doc.go +++ b/pkg/kv/doc.go @@ -16,7 +16,7 @@ http://www.cockroachlabs.com/blog/sql-in-cockroachdb-mapping-table-data-to-key-v Package kv provides clients for accessing the various externally-facing Cockroach database endpoints. -DB Client +# DB Client The DB client is a fully-featured client of Cockroach's key-value database. It provides a simple, synchronous interface well-suited to parallel updates and diff --git a/pkg/kv/kvclient/kvcoord/batch.go b/pkg/kv/kvclient/kvcoord/batch.go index 100721d0701f..e36ca77de358 100644 --- a/pkg/kv/kvclient/kvcoord/batch.go +++ b/pkg/kv/kvclient/kvcoord/batch.go @@ -29,38 +29,38 @@ import ( // // It is designed to be used roughly as follows: // -// rs := keys.Range(requests) -// ri.Seek(scanDir, rs.Key) -// if !ri.NeedAnother(rs) { -// // All requests fit within a single range, don't use the helper. -// ... -// } -// helper := NewBatchTruncationHelper(scanDir, requests) -// for ri.Valid() { -// curRangeRS := rs.Intersect(ri.Token().Desc()) -// curRangeReqs, positions, seekKey := helper.Truncate(curRangeRS) -// // Process curRangeReqs that touch a single range and then use positions -// // to reassemble the result. -// ... -// ri.Seek(scanDir, seekKey) -// } +// rs := keys.Range(requests) +// ri.Seek(scanDir, rs.Key) +// if !ri.NeedAnother(rs) { +// // All requests fit within a single range, don't use the helper. +// ... +// } +// helper := NewBatchTruncationHelper(scanDir, requests) +// for ri.Valid() { +// curRangeRS := rs.Intersect(ri.Token().Desc()) +// curRangeReqs, positions, seekKey := helper.Truncate(curRangeRS) +// // Process curRangeReqs that touch a single range and then use positions +// // to reassemble the result. +// ... +// ri.Seek(scanDir, seekKey) +// } // // The helper utilizes two different strategies depending on whether the // requests use local keys or not: // -// - a "legacy" strategy is used when requests use local keys. This strategy -// utilizes "legacy" methods that operate on the original requests without -// keeping any additional bookkeeping. In particular, it leads to truncating -// already processed requests as well as to iterating over the fully processed -// requests when searching for the next seek key. +// - a "legacy" strategy is used when requests use local keys. This strategy +// utilizes "legacy" methods that operate on the original requests without +// keeping any additional bookkeeping. In particular, it leads to truncating +// already processed requests as well as to iterating over the fully processed +// requests when searching for the next seek key. // -// - an "optimized" strategy is used when requests only use global keys. -// Although this strategy has the same worst-case complexity of O(N * R) as -// the "legacy" strategy (where N is the number of requests, R is the number -// of ranges that all requests fit int, the worst-case is achieved when all -// requests are range-spanning and each request spans all R ranges), in -// practice it is much faster. See the comments on truncateAsc() and -// truncateDesc() for the details. +// - an "optimized" strategy is used when requests only use global keys. +// Although this strategy has the same worst-case complexity of O(N * R) as +// the "legacy" strategy (where N is the number of requests, R is the number +// of ranges that all requests fit int, the worst-case is achieved when all +// requests are range-spanning and each request spans all R ranges), in +// practice it is much faster. See the comments on truncateAsc() and +// truncateDesc() for the details. // // The gist of the optimized strategy is sorting all of the requests according // to the keys upfront and then, on each truncation iteration, examining only a @@ -328,9 +328,9 @@ func (h *BatchTruncationHelper) MemUsage() int64 { // // For example, if // -// reqs = Put[a], Put[c], Put[b], -// rs = [a,bb], -// BatchTruncationHelper.Init(Ascending, reqs) +// reqs = Put[a], Put[c], Put[b], +// rs = [a,bb], +// BatchTruncationHelper.Init(Ascending, reqs) // // then BatchTruncationHelper.Truncate(rs) returns (Put[a], Put[b]), positions // [0,2] as well as seekKey 'c'. @@ -340,9 +340,9 @@ func (h *BatchTruncationHelper) MemUsage() int64 { // Init(). // // NOTE: it is assumed that -// 1. Truncate has been called on the previous ranges that intersect with -// keys.Range(reqs); -// 2. rs is intersected with the current range boundaries. +// 1. Truncate has been called on the previous ranges that intersect with +// keys.Range(reqs); +// 2. rs is intersected with the current range boundaries. func (h *BatchTruncationHelper) Truncate( rs roachpb.RSpan, ) ([]roachpb.RequestUnion, []int, roachpb.RKey, error) { @@ -397,18 +397,18 @@ func (h *BatchTruncationHelper) Truncate( // // Let's go through an example. Say we have seven original requests: // -// requests : Scan(i, l), Get(d), Scan(h, k), Scan(g, i), Get(i), Scan(d, f), Scan(b, h) -// positions: 0 1 2 3 4 5 6 +// requests : Scan(i, l), Get(d), Scan(h, k), Scan(g, i), Get(i), Scan(d, f), Scan(b, h) +// positions: 0 1 2 3 4 5 6 // // as well three ranges to iterate over: // -// ranges: range[a, e), range[e, i), range[i, m). +// ranges: range[a, e), range[e, i), range[i, m). // // In Init(), we have reordered the requests according to their start keys: // -// requests : Scan(b, h), Get(d), Scan(d, f), Scan(g, i), Scan(h, k), Get(i), Scan(i, l) -// positions: 6 1 5 3 2 4 0 -// headers : [b, h) [d) [d, f) [g, i) [h, k) [i) [i, l) +// requests : Scan(b, h), Get(d), Scan(d, f), Scan(g, i), Scan(h, k), Get(i), Scan(i, l) +// positions: 6 1 5 3 2 4 0 +// headers : [b, h) [d) [d, f) [g, i) [h, k) [i) [i, l) // // On the first call to Truncate(), we're using the range [a, e). We only need // to look at the first four requests since the fourth request starts after the @@ -418,12 +418,14 @@ func (h *BatchTruncationHelper) Truncate( // mark the 2nd request Get(d) as fully processed. // // The first call prepares -// truncReqs = [Scan(b, e), Get(d), Scan(d, e)], positions = [6, 1, 5] +// +// truncReqs = [Scan(b, e), Get(d), Scan(d, e)], positions = [6, 1, 5] +// // and the internal state is now // -// requests : Scan(b, h), Get(d), Scan(d, f), Scan(g, i), Scan(h, k), Get(i), Scan(i, l) -// positions: 6 -1 5 3 2 4 0 -// headers : [e, h) [e, f) [g, i) [h, k) [i) [i, l) +// requests : Scan(b, h), Get(d), Scan(d, f), Scan(g, i), Scan(h, k), Get(i), Scan(i, l) +// positions: 6 -1 5 3 2 4 0 +// headers : [e, h) [e, f) [g, i) [h, k) [i) [i, l) // // Then the optimized next() function determines the seekKey as 'e' and keeps // the startIdx at 0. @@ -437,12 +439,14 @@ func (h *BatchTruncationHelper) Truncate( // 3rd, and the 4th requests as fully processed. // // The second call prepares -// truncReqs = [Scan(e, h), Scan(e, f), Scan(g, i), Scan(h, i)], positions = [6, 5, 3, 2] +// +// truncReqs = [Scan(e, h), Scan(e, f), Scan(g, i), Scan(h, i)], positions = [6, 5, 3, 2] +// // and the internal state is now // -// requests : Scan(b, h), Get(d), Scan(d, f), Scan(g, i), Scan(h, k), Get(i), Scan(i, l) -// positions: -1 -1 -1 -1 2 4 0 -// headers : [i, k) [i) [i, l) +// requests : Scan(b, h), Get(d), Scan(d, f), Scan(g, i), Scan(h, k), Get(i), Scan(i, l) +// positions: -1 -1 -1 -1 2 4 0 +// headers : [i, k) [i) [i, l) // // Then the optimized next() function determines the seekKey as 'i' and sets // the startIdx at 4 (meaning that all first four requests have been fully @@ -454,12 +458,14 @@ func (h *BatchTruncationHelper) Truncate( // value and mark all of them as processed. // // The third call prepares -// truncReqs = [Scan(i, k), Get(i), Scan(i, l)], positions = [2, 4, 0] +// +// truncReqs = [Scan(i, k), Get(i), Scan(i, l)], positions = [2, 4, 0] +// // and the internal state is now // -// requests : Scan(b, h), Get(d), Scan(d, f), Scan(g, i), Scan(h, k), Get(i), Scan(i, l) -// positions: -1 -1 -1 -1 -1 -1 -1 -// headers : +// requests : Scan(b, h), Get(d), Scan(d, f), Scan(g, i), Scan(h, k), Get(i), Scan(i, l) +// positions: -1 -1 -1 -1 -1 -1 -1 +// headers : // // Then the optimized next() function determines the seekKey as KeyMax and sets // the startIdx at 7 (meaning that all requests have been fully processed), and @@ -552,19 +558,19 @@ func (h *BatchTruncationHelper) truncateAsc( // // Let's go through an example. Say we have seven original requests: // -// requests : Scan(i, l), Get(d), Scan(h, k), Scan(g, i), Get(i), Scan(d, f), Scan(b, h) -// positions: 0 1 2 3 4 5 6 +// requests : Scan(i, l), Get(d), Scan(h, k), Scan(g, i), Get(i), Scan(d, f), Scan(b, h) +// positions: 0 1 2 3 4 5 6 // // as well three ranges to iterate over: // -// ranges: range[i, m), range[e, i), range[a, e). +// ranges: range[i, m), range[e, i), range[a, e). // // In Init(), we have reordered the requests according to their end keys with // the descending direction (below, i' denotes Key("i").Next()): // -// requests : Scan(i, l), Scan(h, k), Get(i), Scan(g, i), Scan(b, h), Scan(d, f), Get(d) -// positions: 0 2 4 3 6 5 1 -// headers : [i, l) [h, k) [i, i') [g, i) [b, h) [d, f) [d, d') +// requests : Scan(i, l), Scan(h, k), Get(i), Scan(g, i), Scan(b, h), Scan(d, f), Get(d) +// positions: 0 2 4 3 6 5 1 +// headers : [i, l) [h, k) [i, i') [g, i) [b, h) [d, f) [d, d') // // On the first call to Truncate(), we're using the range [i, m). We only need // to look at the first four requests since the fourth request ends before the @@ -574,12 +580,14 @@ func (h *BatchTruncationHelper) truncateAsc( // mark the 1st and the 3rd requests as fully processed. // // The first call prepares -// truncReqs = [Scan(i, l), Scan(i, k), Get(i)], positions = [0, 2, 4] +// +// truncReqs = [Scan(i, l), Scan(i, k), Get(i)], positions = [0, 2, 4] +// // and the internal state is now // -// requests : Scan(i, l), Scan(h, k), Get(i), Scan(g, i), Scan(b, h), Scan(d, f), Get(d) -// positions: -1 2 -1 3 6 5 1 -// headers : [h, i) [g, i) [b, h) [d, f) [d, d') +// requests : Scan(i, l), Scan(h, k), Get(i), Scan(g, i), Scan(b, h), Scan(d, f), Get(d) +// positions: -1 2 -1 3 6 5 1 +// headers : [h, i) [g, i) [b, h) [d, f) [d, d') // // Then the optimized prev() function determines the seekKey as 'i' and moves // the startIdx to 1. @@ -592,12 +600,14 @@ func (h *BatchTruncationHelper) truncateAsc( // mark the 2nd and the 4th requests as fully processed. // // The second call prepares -// truncReqs = [Scan(h, i), Scan(g, i), Scan(e, h), Scan(e, f)], positions = [2, 3, 6, 5] +// +// truncReqs = [Scan(h, i), Scan(g, i), Scan(e, h), Scan(e, f)], positions = [2, 3, 6, 5] +// // and the internal state is now // -// requests : Scan(i, l), Scan(h, k), Get(i), Scan(g, i), Scan(b, h), Scan(d, f), Get(d) -// positions: -1 -1 -1 -1 6 5 1 -// headers : [b, e) [d, e) [d, d') +// requests : Scan(i, l), Scan(h, k), Get(i), Scan(g, i), Scan(b, h), Scan(d, f), Get(d) +// positions: -1 -1 -1 -1 6 5 1 +// headers : [b, e) [d, e) [d, d') // // Then the optimized prev() function determines the seekKey as 'e' and sets // the startIdx at 4 (meaning that all first four requests have been fully @@ -609,12 +619,14 @@ func (h *BatchTruncationHelper) truncateAsc( // value and mark all of them as processed. // // The third call prepares -// truncReqs = [Scan(b, e), Scan(d, e), Get(d)], positions = [6, 5, 1] +// +// truncReqs = [Scan(b, e), Scan(d, e), Get(d)], positions = [6, 5, 1] +// // and the internal state is now // -// requests : Scan(i, l), Scan(h, k), Get(i), Scan(g, i), Scan(b, h), Scan(d, f), Get(d) -// positions: -1 -1 -1 -1 -1 -1 -1 -// headers : +// requests : Scan(i, l), Scan(h, k), Get(i), Scan(g, i), Scan(b, h), Scan(d, f), Get(d) +// positions: -1 -1 -1 -1 -1 -1 -1 +// headers : // // Then the optimized prev() function determines the seekKey as KeyMin and sets // the startIdx at 7 (meaning that all requests have been fully processed), and @@ -1014,11 +1026,11 @@ func (h *orderRestorationHelper) memUsage() int64 { // Let's go through a quick example. Say we have five original requests and the // following setup: // -// truncReqs = [Scan(a, c), Get(b), Scan(c, d)], positions = [3, 0, 4] +// truncReqs = [Scan(a, c), Get(b), Scan(c, d)], positions = [3, 0, 4] // // We first populate the found map: // -// found = [1, -1, -1, 0, 2] +// found = [1, -1, -1, 0, 2] // // meaning that requests at positions 0, 3, 4 are present in truncReqs. Then we // iterate over the found map, and for all non-negative found values, we include diff --git a/pkg/kv/kvclient/kvcoord/dist_sender_rangefeed_canceler.go b/pkg/kv/kvclient/kvcoord/dist_sender_rangefeed_canceler.go index d08a9497a050..d42b8b188c11 100644 --- a/pkg/kv/kvclient/kvcoord/dist_sender_rangefeed_canceler.go +++ b/pkg/kv/kvclient/kvcoord/dist_sender_rangefeed_canceler.go @@ -25,8 +25,6 @@ import ( // // However, issues[^1] at the KV layer could prevent this. // -// [^1]: https://github.com/cockroachdb/cockroach/issues/86818 -// // The canceler is notified via ping() whenever the associated RangeFeed receives an event. // Should ping() not be called for the configured threshold duration, the provided cancel // function will be invoked. @@ -42,6 +40,8 @@ import ( // The canceler detects changes to the configured threshold duration on each call // to ping(), i.e. in the common case of no stuck rangefeeds, it will ~immediately // pick up the new value and apply it. +// +// [^1]: https://github.com/cockroachdb/cockroach/issues/86818 type stuckRangeFeedCanceler struct { threshold func() time.Duration cancel context.CancelFunc diff --git a/pkg/kv/kvclient/kvcoord/dist_sender_server_test.go b/pkg/kv/kvclient/kvcoord/dist_sender_server_test.go index 716639ec20fe..c1d1ef4a0a58 100644 --- a/pkg/kv/kvclient/kvcoord/dist_sender_server_test.go +++ b/pkg/kv/kvclient/kvcoord/dist_sender_server_test.go @@ -146,12 +146,17 @@ type checkOptions struct { // // Args: // expSatisfied: A set of indexes into spans representing the scans that -// have been completed and don't need a ResumeSpan. For these scans, having no -// results and also no resume span is acceptable by this function. +// +// have been completed and don't need a ResumeSpan. For these scans, having no +// results and also no resume span is acceptable by this function. +// // resultsMode: Specifies how strict the result checking is supposed to be. -// expCount +// +// expCount +// // expCount: If resultsMode == AcceptPrefix, this is the total number of -// expected results. Ignored for resultsMode == Strict. +// +// expected results. Ignored for resultsMode == Strict. func checkSpanResults( t *testing.T, spans [][]string, results []kv.Result, expResults [][]string, opt checkOptions, ) { @@ -3232,15 +3237,15 @@ func TestTxnCoordSenderRetries(t *testing.T) { // EndTxn batch and a STAGING txn record written by a newer attempt of that // batch. // Namely, the scenario is as follows: -// 1. client sends CPut(a) + CPut(b) + EndTxn. The CPut(a) is split by the -// DistSender from the rest. Note that the parallel commit mechanism is in -// effect here. -// 2. One of the two sides gets a WriteTooOldError, the other succeeds. -// The client needs to refresh. -// 3. The refresh succeeds. -// 4. The client resends the whole batch (note that we don't keep track of the -// previous partial success). -// 5. The batch is split again, and one of the two sides fails. +// 1. client sends CPut(a) + CPut(b) + EndTxn. The CPut(a) is split by the +// DistSender from the rest. Note that the parallel commit mechanism is in +// effect here. +// 2. One of the two sides gets a WriteTooOldError, the other succeeds. +// The client needs to refresh. +// 3. The refresh succeeds. +// 4. The client resends the whole batch (note that we don't keep track of the +// previous partial success). +// 5. The batch is split again, and one of the two sides fails. // // This tests checks that, for the different combinations of failures across the // two attempts of the request, the transaction is not erroneously considered to @@ -3416,17 +3421,17 @@ func TestTxnCoordSenderRetriesAcrossEndTxn(t *testing.T) { // above the previous time when they've been refreshed, not from the // transaction's original read timestamp. To wit, the following scenario should // NOT result in a failed refresh: -// - txn starts at ts 100 -// - someone else writes "a" @ 200 -// - txn attempts to write "a" and is pushed to (200,1). The refresh succeeds. -// - txn reads something that has a value in [100,200]. For example, "a", which -// it just wrote. -// - someone else writes "b" @ 300 -// - txn attempts to write "b" and is pushed to (300,1). This refresh must also -// succeed. If this Refresh request would check for values in the range -// [100-300], it would fail (as it would find a@200). But since it only checks -// for values in the range [200-300] (i.e. values written beyond the timestamp -// that was refreshed before), we're good. +// - txn starts at ts 100 +// - someone else writes "a" @ 200 +// - txn attempts to write "a" and is pushed to (200,1). The refresh succeeds. +// - txn reads something that has a value in [100,200]. For example, "a", which +// it just wrote. +// - someone else writes "b" @ 300 +// - txn attempts to write "b" and is pushed to (300,1). This refresh must also +// succeed. If this Refresh request would check for values in the range +// [100-300], it would fail (as it would find a@200). But since it only checks +// for values in the range [200-300] (i.e. values written beyond the timestamp +// that was refreshed before), we're good. func TestRefreshNoFalsePositive(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) diff --git a/pkg/kv/kvclient/kvcoord/split_test.go b/pkg/kv/kvclient/kvcoord/split_test.go index 573e8dcee4c0..b48a189dfa38 100644 --- a/pkg/kv/kvclient/kvcoord/split_test.go +++ b/pkg/kv/kvclient/kvcoord/split_test.go @@ -262,9 +262,9 @@ func TestRangeSplitsWithSameKeyTwice(t *testing.T) { // TestSplitStickyBit checks that the sticky bit is set when performing a manual // split. There are two cases to consider: -// 1. Range is split so sticky bit is updated on RHS. -// 2. Range is already split and split key is the start key of a range, so update -// the sticky bit of that range, but no range is split. +// 1. Range is split so sticky bit is updated on RHS. +// 2. Range is already split and split key is the start key of a range, so update +// the sticky bit of that range, but no range is split. func TestRangeSplitsStickyBit(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) diff --git a/pkg/kv/kvclient/kvcoord/transport_race.go b/pkg/kv/kvclient/kvcoord/transport_race.go index 3b43c5d68469..9615535b2faa 100644 --- a/pkg/kv/kvclient/kvcoord/transport_race.go +++ b/pkg/kv/kvclient/kvcoord/transport_race.go @@ -16,7 +16,7 @@ package kvcoord import ( "context" "encoding/json" - "io/ioutil" + "io" "math/rand" "reflect" "sync/atomic" @@ -114,7 +114,7 @@ func GRPCTransportFactory( // are evicted in FIFO order. const size = 1000 bas := make([]*roachpb.BatchRequest, size) - encoder := json.NewEncoder(ioutil.Discard) + encoder := json.NewEncoder(io.Discard) for { iters++ start := timeutil.Now() diff --git a/pkg/kv/kvclient/kvcoord/txn_coord_sender.go b/pkg/kv/kvclient/kvcoord/txn_coord_sender.go index 7f3156a33287..efc7e0083b02 100644 --- a/pkg/kv/kvclient/kvcoord/txn_coord_sender.go +++ b/pkg/kv/kvclient/kvcoord/txn_coord_sender.go @@ -41,6 +41,7 @@ var DisableCommitSanityCheck = envutil.EnvOrDefaultBool("COCKROACH_DISABLE_COMMI // txnState represents states relating to whether an EndTxn request needs // to be sent. +// //go:generate stringer -type=txnState type txnState int diff --git a/pkg/kv/kvclient/kvcoord/txn_correctness_test.go b/pkg/kv/kvclient/kvcoord/txn_correctness_test.go index 2df43ee0bdb3..2b807d9639b8 100644 --- a/pkg/kv/kvclient/kvcoord/txn_correctness_test.go +++ b/pkg/kv/kvclient/kvcoord/txn_correctness_test.go @@ -877,7 +877,8 @@ func checkConcurrency(name string, txns []string, verify *verifier, t *testing.T // reader must not see intermediate results from the reader/writer. // // Read skew would typically fail with a history such as: -// R1(A) R2(B) I2(B) R2(A) I2(A) R1(B) C1 C2 +// +// R1(A) R2(B) I2(B) R2(A) I2(A) R1(B) C1 C2 func TestTxnDBReadSkewAnomaly(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) @@ -909,11 +910,13 @@ func TestTxnDBReadSkewAnomaly(t *testing.T) { // depending on priority. // // Lost update would typically fail with a history such as: -// R1(A) R2(A) I1(A) I2(A) C1 C2 +// +// R1(A) R2(A) I1(A) I2(A) C1 C2 // // However, the following variant will cause a lost update in // READ_COMMITTED and in practice requires REPEATABLE_READ to avoid. -// R1(A) R2(A) I1(A) C1 I2(A) C2 +// +// R1(A) R2(A) I1(A) C1 I2(A) C2 func TestTxnDBLostUpdateAnomaly(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) @@ -942,7 +945,8 @@ func TestTxnDBLostUpdateAnomaly(t *testing.T) { // even on keys which have no values written. // // Lost delete would typically fail with a history such as: -// D2(A) R1(A) D2(B) C2 W1(B,A) C1 +// +// D2(A) R1(A) D2(B) C2 W1(B,A) C1 func TestTxnDBLostDeleteAnomaly(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) @@ -978,7 +982,8 @@ func TestTxnDBLostDeleteAnomaly(t *testing.T) { // test is retained for good measure. // // Lost delete range would typically fail with a history such as: -// D2(A) DR2(B-C) R1(A) C2 W1(B,A) C1 +// +// D2(A) DR2(B-C) R1(A) C2 W1(B,A) C1 func TestTxnDBLostDeleteRangeAnomaly(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) @@ -1010,7 +1015,8 @@ func TestTxnDBLostDeleteRangeAnomaly(t *testing.T) { // ranges when settling concurrency issues. // // Phantom reads would typically fail with a history such as: -// R2(B) SC1(A-C) I2(B) C2 SC1(A-C) C1 +// +// R2(B) SC1(A-C) I2(B) C2 SC1(A-C) C1 func TestTxnDBPhantomReadAnomaly(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) @@ -1034,7 +1040,8 @@ func TestTxnDBPhantomReadAnomaly(t *testing.T) { // functionality causes read/write conflicts. // // Phantom deletes would typically fail with a history such as: -// R2(B) DR1(A-C) I2(B) C2 SC1(A-C) W1(D,A+B) C1 +// +// R2(B) DR1(A-C) I2(B) C2 SC1(A-C) W1(D,A+B) C1 func TestTxnDBPhantomDeleteAnomaly(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) @@ -1063,7 +1070,8 @@ func TestTxnDBPhantomDeleteAnomaly(t *testing.T) { // "skew". // // Write skew would typically fail with a history such as: -// SC1(A-C) SC2(A-C) W1(A,A+B+1) C1 W2(B,A+B+1) C2 +// +// SC1(A-C) SC2(A-C) W1(A,A+B+1) C1 W2(B,A+B+1) C2 // // In the test below, each txn reads A and B and increments one by 1. // The read values and increment are then summed and written either to diff --git a/pkg/kv/kvclient/kvcoord/txn_interceptor_committer.go b/pkg/kv/kvclient/kvcoord/txn_interceptor_committer.go index 895fe3820c6f..2a5b7573476e 100644 --- a/pkg/kv/kvclient/kvcoord/txn_interceptor_committer.go +++ b/pkg/kv/kvclient/kvcoord/txn_interceptor_committer.go @@ -50,12 +50,12 @@ var parallelCommitsEnabled = settings.RegisterBoolSetting( // // Parallel commits works by defining a committed transaction as a transaction // that meets one of the two following commit conditions: -// 1. a transaction is *explicitly committed* if it has a transaction record with -// a COMMITTED status -// 2. a transaction is *implicitly committed* if it has a transaction record with -// a STAGING status and intents written for all writes declared as "in-flight" -// on the transaction record at equal or lower timestamps than the transaction -// record's commit timestamp +// 1. a transaction is *explicitly committed* if it has a transaction record with +// a COMMITTED status +// 2. a transaction is *implicitly committed* if it has a transaction record with +// a STAGING status and intents written for all writes declared as "in-flight" +// on the transaction record at equal or lower timestamps than the transaction +// record's commit timestamp // // A transaction may move from satisfying the implicit commit condition to // satisfying the explicit commit condition. This is desirable because it moves @@ -88,20 +88,20 @@ var parallelCommitsEnabled = settings.RegisterBoolSetting( // satisfied and the transaction is still in-progress (and could still be // committed or aborted at a later time). There are a number of reasons why // some of the requests in the final batch may have failed: -// - intent writes: these requests may fail to write an intent due to a logical +// - intent writes: these requests may fail to write an intent due to a logical // error like a ConditionFailedError. They also could have succeeded at writing // an intent but failed to write it at the desired timestamp because they ran // into the timestamp cache or another committed value. In the first case, the // txnCommitter will receive an error. In the second, it will generate one in // needTxnRetryAfterStaging. -// - query intents: these requests may fail because they discover that one of the +// - query intents: these requests may fail because they discover that one of the // previously issued writes has failed; either because it never left an intent // or because it left one at too high of a timestamp. In this case, the request // will return an error because the requests all have the ErrorIfMissing option // set. It will also prevent the write from ever succeeding in the future, which // ensures that the transaction will never suddenly become implicitly committed // at a later point due to the write eventually succeeding (e.g. after a replay). -// - end txn: this request may fail with a TransactionRetryError for any number of +// - end txn: this request may fail with a TransactionRetryError for any number of // reasons, such as if the transaction's provisional commit timestamp has been // pushed past its read timestamp. In all of these cases, an error will be // returned and the transaction record will not be staged. diff --git a/pkg/kv/kvclient/kvcoord/txn_interceptor_pipeliner.go b/pkg/kv/kvclient/kvcoord/txn_interceptor_pipeliner.go index 1342d572fc22..87c039cbf837 100644 --- a/pkg/kv/kvclient/kvcoord/txn_interceptor_pipeliner.go +++ b/pkg/kv/kvclient/kvcoord/txn_interceptor_pipeliner.go @@ -105,30 +105,30 @@ var rejectTxnOverTrackedWritesBudget = settings.RegisterBoolSetting( // Chaining on to in-flight async writes is important for two main reasons to // txnPipeliner: // -// 1. requests proposed to Raft will not necessarily succeed. For any number of -// reasons, the request may make it through Raft and be discarded or fail to -// ever even be replicated. A transaction must check that all async writes -// succeeded before committing. However, when these proposals do fail, their -// errors aren't particularly interesting to a transaction. This is because -// these errors are not deterministic Transaction-domain errors that a -// transaction must adhere to for correctness such as conditional-put errors or -// other symptoms of constraint violations. These kinds of errors are all -// discovered during write *evaluation*, which an async write will perform -// synchronously before consensus. Any error during consensus is outside of the -// Transaction-domain and can always trigger a transaction retry. +// 1. requests proposed to Raft will not necessarily succeed. For any number of +// reasons, the request may make it through Raft and be discarded or fail to +// ever even be replicated. A transaction must check that all async writes +// succeeded before committing. However, when these proposals do fail, their +// errors aren't particularly interesting to a transaction. This is because +// these errors are not deterministic Transaction-domain errors that a +// transaction must adhere to for correctness such as conditional-put errors or +// other symptoms of constraint violations. These kinds of errors are all +// discovered during write *evaluation*, which an async write will perform +// synchronously before consensus. Any error during consensus is outside of the +// Transaction-domain and can always trigger a transaction retry. // -// 2. transport layers beneath the txnPipeliner do not provide strong enough -// ordering guarantees between concurrent requests in the same transaction to -// avoid needing explicit chaining. For instance, DistSender uses unary gRPC -// requests instead of gRPC streams, so it can't natively expose strong ordering -// guarantees. Perhaps more importantly, even when a command has acquired latches -// and evaluated on a Replica, it is not guaranteed to be applied before -// interfering commands. This is because the command may be retried outside of -// the serialization of the spanlatch manager for any number of reasons, such as -// leaseholder changes. When the command re-acquired its latches, it's possible -// that interfering commands may jump ahead of it. To combat this, the -// txnPipeliner uses chaining to throw an error when these re-orderings would -// have affected the order that transactional requests evaluate in. +// 2. transport layers beneath the txnPipeliner do not provide strong enough +// ordering guarantees between concurrent requests in the same transaction to +// avoid needing explicit chaining. For instance, DistSender uses unary gRPC +// requests instead of gRPC streams, so it can't natively expose strong ordering +// guarantees. Perhaps more importantly, even when a command has acquired latches +// and evaluated on a Replica, it is not guaranteed to be applied before +// interfering commands. This is because the command may be retried outside of +// the serialization of the spanlatch manager for any number of reasons, such as +// leaseholder changes. When the command re-acquired its latches, it's possible +// that interfering commands may jump ahead of it. To combat this, the +// txnPipeliner uses chaining to throw an error when these re-orderings would +// have affected the order that transactional requests evaluate in. // // The interceptor proves all in-flight writes before explicitly committing a // transaction by tacking on a QueryIntent request for each one to the front of @@ -161,28 +161,28 @@ var rejectTxnOverTrackedWritesBudget = settings.RegisterBoolSetting( // possible, even if no other overlapping requests force them to be proven. The // approaches are: // -// 1. launch a background process after each successful async write to query its -// intents and wait for it to succeed. This would effectively solve the issue, -// but at the cost of many more goroutines and many more QueryIntent requests, -// most of which would be redundant because their corresponding write wouldn't -// complete until after an EndTxn synchronously needed to prove them anyway. +// 1. launch a background process after each successful async write to query its +// intents and wait for it to succeed. This would effectively solve the issue, +// but at the cost of many more goroutines and many more QueryIntent requests, +// most of which would be redundant because their corresponding write wouldn't +// complete until after an EndTxn synchronously needed to prove them anyway. // -// 2. to address the issue of an unbounded number of background goroutines -// proving writes in approach 1, a single background goroutine could be run -// that repeatedly loops over all in-flight writes and attempts to prove -// them. This approach was used in an early revision of #26599 and has the nice -// property that only one batch of QueryIntent requests is ever active at a -// given time. It may be revisited, but for now it is not used for the same -// reason as approach 1: most of its QueryIntent requests will be useless -// because a transaction will send an EndTxn immediately after sending all -// of its writes. +// 2. to address the issue of an unbounded number of background goroutines +// proving writes in approach 1, a single background goroutine could be run +// that repeatedly loops over all in-flight writes and attempts to prove +// them. This approach was used in an early revision of #26599 and has the nice +// property that only one batch of QueryIntent requests is ever active at a +// given time. It may be revisited, but for now it is not used for the same +// reason as approach 1: most of its QueryIntent requests will be useless +// because a transaction will send an EndTxn immediately after sending all +// of its writes. // -// 3. turn the KV interface into a streaming protocol (#8360) that could support -// returning multiple results. This would allow clients to return immediately -// after a writes "evaluation" phase completed but hold onto a handle to the -// request and be notified immediately after its "replication" phase completes. -// This would allow txnPipeliner to prove in-flight writes immediately after -// they finish consensus without any extra RPCs. +// 3. turn the KV interface into a streaming protocol (#8360) that could support +// returning multiple results. This would allow clients to return immediately +// after a writes "evaluation" phase completed but hold onto a handle to the +// request and be notified immediately after its "replication" phase completes. +// This would allow txnPipeliner to prove in-flight writes immediately after +// they finish consensus without any extra RPCs. // // So far, none of these approaches have been integrated. // diff --git a/pkg/kv/kvclient/kvcoord/txn_interceptor_seq_num_allocator.go b/pkg/kv/kvclient/kvcoord/txn_interceptor_seq_num_allocator.go index 5c0227b0b6ee..eb6c0e6c1d43 100644 --- a/pkg/kv/kvclient/kvcoord/txn_interceptor_seq_num_allocator.go +++ b/pkg/kv/kvclient/kvcoord/txn_interceptor_seq_num_allocator.go @@ -24,39 +24,38 @@ import ( // // Sequence numbers serve a few roles in the transaction model: // -// 1. they are used to enforce an ordering between read and write operations in a -// single transaction that go to the same key. Each read request that travels -// through the interceptor is assigned the sequence number of the most recent -// write. Each write request that travels through the interceptor is assigned -// a sequence number larger than any previously allocated. +// 1. they are used to enforce an ordering between read and write operations in a +// single transaction that go to the same key. Each read request that travels +// through the interceptor is assigned the sequence number of the most recent +// write. Each write request that travels through the interceptor is assigned +// a sequence number larger than any previously allocated. // -// This is true even for leaf transaction coordinators. In their case, they are -// provided the sequence number of the most recent write during construction. -// Because they only perform read operations and never issue writes, they assign -// each read this sequence number without ever incrementing their own counter. -// In this way, sequence numbers are maintained correctly across a distributed -// tree of transaction coordinators. +// This is true even for leaf transaction coordinators. In their case, they are +// provided the sequence number of the most recent write during construction. +// Because they only perform read operations and never issue writes, they assign +// each read this sequence number without ever incrementing their own counter. +// In this way, sequence numbers are maintained correctly across a distributed +// tree of transaction coordinators. // -// 2. they are used to uniquely identify write operations. Because every write -// request is given a new sequence number, the tuple (txn_id, txn_epoch, seq) -// uniquely identifies a write operation across an entire cluster. This property -// is exploited when determining the status of an individual write by looking -// for its intent. We perform such an operation using the QueryIntent request -// type when pipelining transactional writes. We will do something similar -// during the recovery stage of implicitly committed transactions. +// 2. they are used to uniquely identify write operations. Because every write +// request is given a new sequence number, the tuple (txn_id, txn_epoch, seq) +// uniquely identifies a write operation across an entire cluster. This property +// is exploited when determining the status of an individual write by looking +// for its intent. We perform such an operation using the QueryIntent request +// type when pipelining transactional writes. We will do something similar +// during the recovery stage of implicitly committed transactions. // -// 3. they are used to determine whether a batch contains the entire write set -// for a transaction. See BatchRequest.IsCompleteTransaction. -// -// 4. they are used to provide idempotency for replays and re-issues. The MVCC -// layer is sequence number-aware and ensures that reads at a given sequence -// number ignore writes in the same transaction at larger sequence numbers. -// Likewise, writes at a sequence number become no-ops if an intent with the -// same sequence is already present. If an intent with the same sequence is not -// already present but an intent with a larger sequence number is, an error is -// returned. Likewise, if an intent with the same sequence is present but its -// value is different than what we recompute, an error is returned. +// 3. they are used to determine whether a batch contains the entire write set +// for a transaction. See BatchRequest.IsCompleteTransaction. // +// 4. they are used to provide idempotency for replays and re-issues. The MVCC +// layer is sequence number-aware and ensures that reads at a given sequence +// number ignore writes in the same transaction at larger sequence numbers. +// Likewise, writes at a sequence number become no-ops if an intent with the +// same sequence is already present. If an intent with the same sequence is not +// already present but an intent with a larger sequence number is, an error is +// returned. Likewise, if an intent with the same sequence is present but its +// value is different than what we recompute, an error is returned. type txnSeqNumAllocator struct { wrapped lockedSender diff --git a/pkg/kv/kvclient/kvcoord/txn_test.go b/pkg/kv/kvclient/kvcoord/txn_test.go index fb970a17f1d4..c250b994b856 100644 --- a/pkg/kv/kvclient/kvcoord/txn_test.go +++ b/pkg/kv/kvclient/kvcoord/txn_test.go @@ -131,7 +131,7 @@ func BenchmarkSingleRoundtripWithLatency(b *testing.B) { // The transaction history looks as follows ("2" refers to the // independent goroutine's actions) // -// R1(A) W2(A,"hi") W1(A,"oops!") C1 [serializable restart] R1(A) W1(A,"correct") C1 +// R1(A) W2(A,"hi") W1(A,"oops!") C1 [serializable restart] R1(A) W1(A,"correct") C1 func TestLostUpdate(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) diff --git a/pkg/kv/kvclient/kvstreamer/streamer.go b/pkg/kv/kvclient/kvstreamer/streamer.go index ff77efe3b489..d27bc1e74201 100644 --- a/pkg/kv/kvclient/kvstreamer/streamer.go +++ b/pkg/kv/kvclient/kvstreamer/streamer.go @@ -160,34 +160,34 @@ func (r Result) Release(ctx context.Context) { // // The example usage is roughly as follows: // -// s := NewStreamer(...) -// s.Init(OperationMode, Hints) -// ... -// for needMoreKVs { -// // Check whether there are results to the previously enqueued requests. -// // This will block if no results are available, but there are some -// // enqueued requests. -// results, err := s.GetResults(ctx) -// // err check -// ... -// if len(results) > 0 { -// processResults(results) -// // return to the client -// ... -// // when results are no longer needed, Release() them -// } -// // All previously enqueued requests have already been responded to. -// if moreRequestsToEnqueue { -// err := s.Enqueue(ctx, requests) -// // err check -// ... -// } else { -// // done -// ... -// } -// } -// ... -// s.Close() +// s := NewStreamer(...) +// s.Init(OperationMode, Hints) +// ... +// for needMoreKVs { +// // Check whether there are results to the previously enqueued requests. +// // This will block if no results are available, but there are some +// // enqueued requests. +// results, err := s.GetResults(ctx) +// // err check +// ... +// if len(results) > 0 { +// processResults(results) +// // return to the client +// ... +// // when results are no longer needed, Release() them +// } +// // All previously enqueued requests have already been responded to. +// if moreRequestsToEnqueue { +// err := s.Enqueue(ctx, requests) +// // err check +// ... +// } else { +// // done +// ... +// } +// } +// ... +// s.Close() // // The Streamer builds on top of the BatchRequest API provided by the DistSender // and aims to allow for executing the requests in parallel (to improve the @@ -432,7 +432,8 @@ func (s *Streamer) Init( // will be sorted in the order of the lookup index if the index contains only // ascending columns. // TODO(drewk): lift the restriction that index columns must be ASC in order to -// return results in lookup order. +// +// return results in lookup order. // // It is the caller's responsibility to ensure that the memory footprint of reqs // (i.e. roachpb.Spans inside of the requests) is reasonable. Enqueue will diff --git a/pkg/kv/kvclient/rangecache/range_cache.go b/pkg/kv/kvclient/rangecache/range_cache.go index 0b88ada953f7..2ec90046c879 100644 --- a/pkg/kv/kvclient/rangecache/range_cache.go +++ b/pkg/kv/kvclient/rangecache/range_cache.go @@ -130,27 +130,27 @@ type RangeCache struct { // possible events that may have happened causing our cache to be stale. For // each of these, we try to coalesce all requests that will end up on the same // range post-event together. -// - Split: for a split, only the right half of the split will attempt to evict -// the stale descriptor because only the right half will be sending to -// the wrong range. Once this stale descriptor is evicted, keys from -// both halves of the split will miss the cache. Because both sides of -// the split will now map to the same EvictionToken, it is important to -// use EvictAndReplace if possible to insert one of the two new descriptors. -// This way, no requests to that descriptor will ever miss the cache and -// risk being coalesced into the other request. If this is not possible, -// the lookup will still work, but it will require multiple lookups, which -// will be launched in series when requests find that their desired key -// is outside of the returned descriptor. -// - Merges: for a merge, the left half of the merge will never notice. The right -// half of the merge will suddenly find its descriptor to be stale, so -// it will evict and lookup the new descriptor. We set the key to hash -// to the start of the stale descriptor for lookup requests to the right -// half of the merge so that all requests will be coalesced to the same -// lookupRequest. -// - Rebal: for a rebalance, the entire descriptor will suddenly go stale and -// requests to it will evict the descriptor. We set the key to hash to -// the start of the stale descriptor for lookup requests to the rebalanced -// descriptor so that all requests will be coalesced to the same lookupRequest. +// - Split: for a split, only the right half of the split will attempt to evict +// the stale descriptor because only the right half will be sending to +// the wrong range. Once this stale descriptor is evicted, keys from +// both halves of the split will miss the cache. Because both sides of +// the split will now map to the same EvictionToken, it is important to +// use EvictAndReplace if possible to insert one of the two new descriptors. +// This way, no requests to that descriptor will ever miss the cache and +// risk being coalesced into the other request. If this is not possible, +// the lookup will still work, but it will require multiple lookups, which +// will be launched in series when requests find that their desired key +// is outside of the returned descriptor. +// - Merges: for a merge, the left half of the merge will never notice. The right +// half of the merge will suddenly find its descriptor to be stale, so +// it will evict and lookup the new descriptor. We set the key to hash +// to the start of the stale descriptor for lookup requests to the right +// half of the merge so that all requests will be coalesced to the same +// lookupRequest. +// - Rebal: for a rebalance, the entire descriptor will suddenly go stale and +// requests to it will evict the descriptor. We set the key to hash to +// the start of the stale descriptor for lookup requests to the rebalanced +// descriptor so that all requests will be coalesced to the same lookupRequest. // // Note that the above description assumes that useReverseScan is false for simplicity. // If useReverseScan is true, we need to use the end key of the stale descriptor instead. @@ -310,6 +310,7 @@ func (et *EvictionToken) clear() { // // Note that the returned descriptor might have Generation = 0. This means that // the descriptor is speculative; it is not know to have committed. +// //gcassert:noescape func (et EvictionToken) Desc() *roachpb.RangeDescriptor { if !et.Valid() { diff --git a/pkg/kv/kvnemesis/doc.go b/pkg/kv/kvnemesis/doc.go index a1c6ce3234fb..6ed0e4fdbc42 100644 --- a/pkg/kv/kvnemesis/doc.go +++ b/pkg/kv/kvnemesis/doc.go @@ -23,19 +23,19 @@ // guarantees. // // TODO -// - CPut/InitPut/Increment -// - ClearRange/RevertRange -// - AdminRelocateRange -// - AdminUnsplit -// - AdminScatter -// - CheckConsistency -// - ExportRequest -// - AddSSTable -// - Root and leaf transactions -// - GCRequest -// - Protected timestamps -// - Transactions being abandoned by their coordinator -// - Continuing txns after CPut and WriteIntent errors (generally continuing -// after errors is not allowed, but it is allowed after ConditionFailedError and -// WriteIntentError as a special case) +// - CPut/InitPut/Increment +// - ClearRange/RevertRange +// - AdminRelocateRange +// - AdminUnsplit +// - AdminScatter +// - CheckConsistency +// - ExportRequest +// - AddSSTable +// - Root and leaf transactions +// - GCRequest +// - Protected timestamps +// - Transactions being abandoned by their coordinator +// - Continuing txns after CPut and WriteIntent errors (generally continuing +// after errors is not allowed, but it is allowed after ConditionFailedError and +// WriteIntentError as a special case) package kvnemesis diff --git a/pkg/kv/kvprober/planner.go b/pkg/kv/kvprober/planner.go index 36042f70dd22..4f9e3e9d1517 100644 --- a/pkg/kv/kvprober/planner.go +++ b/pkg/kv/kvprober/planner.go @@ -110,10 +110,10 @@ func newMeta2Planner( // two design goals for our approach to probabilistically selecting a place in // the keyspace to probe: // -// 1. That the approach is efficient enough. Resource requirements shouldn't -// scale with the number of ranges in the cluster, for example. -// 2. That the approach is available enough in times of outage that the -// prober is able to generate useful signal when we need it most. +// 1. That the approach is efficient enough. Resource requirements shouldn't +// scale with the number of ranges in the cluster, for example. +// 2. That the approach is available enough in times of outage that the +// prober is able to generate useful signal when we need it most. // // How do we do it? The first option we considered was to probe // crdb_internal.ranges_no_leases. We reject that approach in favor of making a @@ -135,19 +135,19 @@ func newMeta2Planner( // should not scale up as the number of ranges in the cluster grows. // // Memory: -// - The meta2Planner struct's mem usage scales with -// size(the Plan struct) * the kv.prober.planner.n_probes_at_a_time cluster -// setting. -// - The Plan function's mem usage scales with -// size(KV pairs holding range descriptors) * the -// kv.prober.planner.n_probes_at_a_time cluster setting. +// - The meta2Planner struct's mem usage scales with +// size(the Plan struct) * the kv.prober.planner.n_probes_at_a_time cluster +// setting. +// - The Plan function's mem usage scales with +// size(KV pairs holding range descriptors) * the +// kv.prober.planner.n_probes_at_a_time cluster setting. // // CPU: -// - Again scales with the kv.prober.planner.n_probes_at_a_time cluster -// setting. Note the proto unmarshalling. We also shuffle a slice of size -// kv.prober.planner.n_probes_at_a_time. If the setting is set to a high -// number, we pay a higher CPU cost less often; if it's set to a low number, -// we pay a smaller CPU cost more often. +// - Again scales with the kv.prober.planner.n_probes_at_a_time cluster +// setting. Note the proto unmarshalling. We also shuffle a slice of size +// kv.prober.planner.n_probes_at_a_time. If the setting is set to a high +// number, we pay a higher CPU cost less often; if it's set to a low number, +// we pay a smaller CPU cost more often. func (p *meta2Planner) next(ctx context.Context) (Step, error) { if len(p.plan) == 0 { // Protect CRDB from planning executing too often, due to either issues diff --git a/pkg/kv/kvserver/addressing.go b/pkg/kv/kvserver/addressing.go index 7f28df11a35e..8587eaa55a49 100644 --- a/pkg/kv/kvserver/addressing.go +++ b/pkg/kv/kvserver/addressing.go @@ -81,7 +81,7 @@ func updateRangeAddressing(b *kv.Batch, desc *roachpb.RangeDescriptor) error { // 3. If desc.EndKey is normal user key: // - meta2(desc.EndKey) // 3a. If desc.StartKey is not normal user key: -// - meta1(KeyMax) +// - meta1(KeyMax) func rangeAddressing(b *kv.Batch, desc *roachpb.RangeDescriptor, action metaAction) error { // 1. handle illegal case of start or end key being meta1. if bytes.HasPrefix(desc.EndKey, keys.Meta1Prefix) || diff --git a/pkg/kv/kvserver/allocator/allocatorimpl/allocator.go b/pkg/kv/kvserver/allocator/allocatorimpl/allocator.go index f921ed1e078d..acc1e7f6f701 100644 --- a/pkg/kv/kvserver/allocator/allocatorimpl/allocator.go +++ b/pkg/kv/kvserver/allocator/allocatorimpl/allocator.go @@ -1432,11 +1432,11 @@ func (a Allocator) RebalanceTarget( // // The return values are, in order: // -// 1. The target on which to add a new replica, -// 2. An existing replica to remove, -// 3. a JSON string for use in the range log, and -// 4. a boolean indicationg whether 1-3 were populated (i.e. whether a rebalance -// opportunity was found). +// 1. The target on which to add a new replica, +// 2. An existing replica to remove, +// 3. a JSON string for use in the range log, and +// 4. a boolean indicationg whether 1-3 were populated (i.e. whether a rebalance +// opportunity was found). func (a Allocator) RebalanceVoter( ctx context.Context, conf roachpb.SpanConfig, @@ -2152,22 +2152,22 @@ func (a Allocator) shouldTransferLeaseForAccessLocality( // #13232 or the leaseholder_locality.md RFC for more details), but the general // logic behind each part of the formula is as follows: // -// * LeaseRebalancingAggressiveness: Allow the aggressiveness to be tuned via -// a cluster setting. -// * 0.1: Constant factor to reduce aggressiveness by default -// * math.Log10(remoteWeight/sourceWeight): Comparison of the remote replica's -// weight to the local replica's weight. Taking the log of the ratio instead -// of using the ratio directly makes things symmetric -- i.e. r1 comparing -// itself to r2 will come to the same conclusion as r2 comparing itself to r1. -// * math.Log1p(remoteLatencyMillis): This will be 0 if there's no latency, -// removing the weight/latency factor from consideration. Otherwise, it grows -// the aggressiveness for stores that are farther apart. Note that Log1p grows -// faster than Log10 as its argument gets larger, which is intentional to -// increase the importance of latency. -// * overfullScore and underfullScore: rebalanceThreshold helps us get an idea -// of the ideal number of leases on each store. We then calculate these to -// compare how close each node is to its ideal state and use the differences -// from the ideal state on each node to compute a final score. +// - LeaseRebalancingAggressiveness: Allow the aggressiveness to be tuned via +// a cluster setting. +// - 0.1: Constant factor to reduce aggressiveness by default +// - math.Log10(remoteWeight/sourceWeight): Comparison of the remote replica's +// weight to the local replica's weight. Taking the log of the ratio instead +// of using the ratio directly makes things symmetric -- i.e. r1 comparing +// itself to r2 will come to the same conclusion as r2 comparing itself to r1. +// - math.Log1p(remoteLatencyMillis): This will be 0 if there's no latency, +// removing the weight/latency factor from consideration. Otherwise, it grows +// the aggressiveness for stores that are farther apart. Note that Log1p grows +// faster than Log10 as its argument gets larger, which is intentional to +// increase the importance of latency. +// - overfullScore and underfullScore: rebalanceThreshold helps us get an idea +// of the ideal number of leases on each store. We then calculate these to +// compare how close each node is to its ideal state and use the differences +// from the ideal state on each node to compute a final score. // // Returns a total score for the replica that takes into account the number of // leases already on each store. Also returns the raw "adjustment" value that's diff --git a/pkg/kv/kvserver/allocator/allocatorimpl/allocator_test.go b/pkg/kv/kvserver/allocator/allocatorimpl/allocator_test.go index 3fb2113e9430..a8fe34cf6c93 100644 --- a/pkg/kv/kvserver/allocator/allocatorimpl/allocator_test.go +++ b/pkg/kv/kvserver/allocator/allocatorimpl/allocator_test.go @@ -4168,11 +4168,16 @@ func TestAllocatorRebalanceNonVoters(t *testing.T) { // TestAllocatorRebalanceReadAmpCheck ensures that rebalancing voters: // (1) Respects storeHealthEnforcement setting, by ignoring L0 Sublevels in -// rebalancing decisions when disabled or set to log only. +// +// rebalancing decisions when disabled or set to log only. +// // (2) Considers L0 sublevels when set to rebalanceOnly or allocate in -// conjunction with the mean. +// +// conjunction with the mean. +// // (3) Does not attempt to rebalance off of the store when read amplification -// is high, as this setting is only used for filtering candidates. +// +// is high, as this setting is only used for filtering candidates. func TestAllocatorRebalanceReadAmpCheck(t *testing.T) { defer leaktest.AfterTest(t)() ctx := context.Background() diff --git a/pkg/kv/kvserver/allocator/storepool/store_pool.go b/pkg/kv/kvserver/allocator/storepool/store_pool.go index 58be0beb4f55..d6d7266cb41b 100644 --- a/pkg/kv/kvserver/allocator/storepool/store_pool.go +++ b/pkg/kv/kvserver/allocator/storepool/store_pool.go @@ -139,17 +139,17 @@ func MakeStorePoolNodeLivenessFunc(nodeLiveness *liveness.NodeLiveness) NodeLive // The timeline of the states that a liveness goes through as time passes after // the respective liveness record is written is the following: // -// -----|-------LIVE---|------UNAVAILABLE---|------DEAD------------> time -// tWrite tExp tExp+threshold +// -----|-------LIVE---|------UNAVAILABLE---|------DEAD------------> time +// tWrite tExp tExp+threshold // // Explanation: // -// - Let's say a node write its liveness record at tWrite. It sets the -// Expiration field of the record as tExp=tWrite+livenessThreshold. -// The node is considered LIVE (or DECOMMISSIONING or DRAINING). -// - At tExp, the IsLive() method starts returning false. The state becomes -// UNAVAILABLE (or stays DECOMMISSIONING or DRAINING). -// - Once threshold passes, the node is considered DEAD (or DECOMMISSIONED). +// - Let's say a node write its liveness record at tWrite. It sets the +// Expiration field of the record as tExp=tWrite+livenessThreshold. +// The node is considered LIVE (or DECOMMISSIONING or DRAINING). +// - At tExp, the IsLive() method starts returning false. The state becomes +// UNAVAILABLE (or stays DECOMMISSIONING or DRAINING). +// - Once threshold passes, the node is considered DEAD (or DECOMMISSIONED). // // NB: There's a bit of discrepancy between what "Decommissioned" represents, as // seen by NodeStatusLiveness, and what "Decommissioned" represents as diff --git a/pkg/kv/kvserver/apply/doc.go b/pkg/kv/kvserver/apply/doc.go index c23e83ae7efc..910f84e5a78c 100644 --- a/pkg/kv/kvserver/apply/doc.go +++ b/pkg/kv/kvserver/apply/doc.go @@ -12,7 +12,7 @@ Package apply provides abstractions and routines associated with the application of committed raft entries to a replicated state machine. -State Machine Replication +# State Machine Replication Raft entry application is the process of taking entries that have been committed to a raft group's "raft log" through raft consensus and using them to drive the @@ -42,7 +42,7 @@ reject the command and handle the rejection in the same way (e.g. decide not to make any state transition). The latter, on the other hand, it not permissible, and is typically handled by crashing the node. -Performance Concerns +# Performance Concerns The state machine replication approach also poses complications that affect performance. @@ -106,7 +106,7 @@ batches (will batching improve system throughput?), and a number of other factors. This package has not begun to answer these questions, but it serves to provide the abstractions necessary to perform such prioritization in the future. -Usage +# Usage The package exports a set of interfaces that users must provide implementations for. Notably, users of the package must provide a StateMachine that encapsulates diff --git a/pkg/kv/kvserver/apply/task.go b/pkg/kv/kvserver/apply/task.go index 1f5a244b5a62..03dc1a7dcf02 100644 --- a/pkg/kv/kvserver/apply/task.go +++ b/pkg/kv/kvserver/apply/task.go @@ -189,7 +189,6 @@ func (t *Task) assertDecoded() { // the method takes a maxIndex parameter that limits the indexes that it will // acknowledge. Typically, callers will supply the highest index that they have // durably written to their raft log for this upper bound. -// func (t *Task) AckCommittedEntriesBeforeApplication(ctx context.Context, maxIndex uint64) error { t.assertDecoded() if !t.anyLocal { diff --git a/pkg/kv/kvserver/asim/state/change_test.go b/pkg/kv/kvserver/asim/state/change_test.go index 09c2b8f3d4ea..379c435ff4f1 100644 --- a/pkg/kv/kvserver/asim/state/change_test.go +++ b/pkg/kv/kvserver/asim/state/change_test.go @@ -90,7 +90,9 @@ func testGetReplLocations(state State, r Range) ([]int, []int) { // (1) changes either all succeed or all fail. // (2) removes fail if it is the leaseholder. // (3) add and removes fail if the removed store is the leaseholder and it -// cannot be transferred to the added store. +// +// cannot be transferred to the added store. +// // (4) In (3) the lease transfers when to the newly added store when possible. func TestReplicaChange(t *testing.T) { testCases := []struct { diff --git a/pkg/kv/kvserver/asim/state/exchange.go b/pkg/kv/kvserver/asim/state/exchange.go index 88345454a4cd..4157f62d4bbf 100644 --- a/pkg/kv/kvserver/asim/state/exchange.go +++ b/pkg/kv/kvserver/asim/state/exchange.go @@ -21,7 +21,9 @@ import ( // Exchange controls the dissemination of a store's state, to every other // store in a simulation. The contract requires that: // (1) Single value per tick: Multiple puts at tick t, with desc d should -// provide an identical d to all other puts for d. +// +// provide an identical d to all other puts for d. +// // (2) Calls to Put are monotonic w.r.t tick value passed in. type Exchange interface { // Put inserts store state(s) at tick t. This state will be visible to diff --git a/pkg/kv/kvserver/batcheval/cmd_end_transaction.go b/pkg/kv/kvserver/batcheval/cmd_end_transaction.go index f71c4d30fb02..3cd8ff8cf2fd 100644 --- a/pkg/kv/kvserver/batcheval/cmd_end_transaction.go +++ b/pkg/kv/kvserver/batcheval/cmd_end_transaction.go @@ -810,40 +810,52 @@ func RunCommitTrigger( // replicas process the SplitTrigger before processing any Raft message for RHS // (right hand side) of the newly split range. Something like: // -// Node A Node B Node C -// ---------------------------------------------------- +// Node A Node B Node C +// ---------------------------------------------------- +// // range 1 | | | -// | | | -// SplitTrigger | | -// | SplitTrigger | -// | | SplitTrigger -// | | | -// ---------------------------------------------------- +// +// | | | +// SplitTrigger | | +// | SplitTrigger | +// | | SplitTrigger +// | | | +// ---------------------------------------------------- +// // split finished on A, B and C | | -// | | | +// +// | | | +// // range 2 | | | -// | ---- MsgVote --> | | -// | ---------------------- MsgVote ---> | +// +// | ---- MsgVote --> | | +// | ---------------------- MsgVote ---> | // // But that ideal ordering is not guaranteed. The split is "finished" when two // of the replicas have appended the end-txn request containing the // SplitTrigger to their Raft log. The following scenario is possible: // -// Node A Node B Node C -// ---------------------------------------------------- +// Node A Node B Node C +// ---------------------------------------------------- +// // range 1 | | | -// | | | -// SplitTrigger | | -// | SplitTrigger | -// | | | -// ---------------------------------------------------- +// +// | | | +// SplitTrigger | | +// | SplitTrigger | +// | | | +// ---------------------------------------------------- +// // split finished on A and B | | -// | | | +// +// | | | +// // range 2 | | | -// | ---- MsgVote --> | | -// | --------------------- MsgVote ---> ??? -// | | | -// | | SplitTrigger +// +// | ---- MsgVote --> | | +// | --------------------- MsgVote ---> ??? +// | | | +// | | SplitTrigger // // In this scenario, C will create range 2 upon reception of the MsgVote from // A, though locally that span of keys is still part of range 1. This is diff --git a/pkg/kv/kvserver/batcheval/cmd_subsume.go b/pkg/kv/kvserver/batcheval/cmd_subsume.go index 5818c29fa318..db5baae593e0 100644 --- a/pkg/kv/kvserver/batcheval/cmd_subsume.go +++ b/pkg/kv/kvserver/batcheval/cmd_subsume.go @@ -51,15 +51,15 @@ func declareKeysSubsume( // // Specifically, the receiving replica guarantees that: // -// 1. it is the leaseholder at the time the request executes, -// 2. when it responds, there are no commands in flight with a timestamp -// greater than the FreezeStart timestamp provided in the response, -// 3. the MVCC statistics in the response reflect the latest writes, -// 4. it, and all future leaseholders for the range, will not process another -// command until they refresh their range descriptor with a consistent read -// from meta2, and -// 5. if it or any future leaseholder for the range finds that its range -// descriptor has been deleted, it self destructs. +// 1. it is the leaseholder at the time the request executes, +// 2. when it responds, there are no commands in flight with a timestamp +// greater than the FreezeStart timestamp provided in the response, +// 3. the MVCC statistics in the response reflect the latest writes, +// 4. it, and all future leaseholders for the range, will not process another +// command until they refresh their range descriptor with a consistent read +// from meta2, and +// 5. if it or any future leaseholder for the range finds that its range +// descriptor has been deleted, it self destructs. // // To achieve guarantees four and five, when issuing a Subsume request, the // caller must have a merge transaction open that has already placed deletion diff --git a/pkg/kv/kvserver/batcheval/intent.go b/pkg/kv/kvserver/batcheval/intent.go index 816f77e2cbb5..8e943f116029 100644 --- a/pkg/kv/kvserver/batcheval/intent.go +++ b/pkg/kv/kvserver/batcheval/intent.go @@ -132,17 +132,17 @@ func acquireUnreplicatedLocksOnKeys( // copyKey copies the provided roachpb.Key into a new byte slice, returning the // copy. It is used in acquireUnreplicatedLocksOnKeys for two reasons: -// 1. the keys in an MVCCScanResult, regardless of the scan format used, point -// to a small number of large, contiguous byte slices. These "MVCCScan -// batches" contain keys and their associated values in the same backing -// array. To avoid holding these entire backing arrays in memory and -// preventing them from being garbage collected indefinitely, we copy the key -// slices before coupling their lifetimes to those of unreplicated locks. -// 2. the KV API has a contract that byte slices returned from KV will not be -// mutated by higher levels. However, we have seen cases (e.g.#64228) where -// this contract is broken due to bugs. To defensively guard against this -// class of memory aliasing bug and prevent keys associated with unreplicated -// locks from being corrupted, we copy them. +// 1. the keys in an MVCCScanResult, regardless of the scan format used, point +// to a small number of large, contiguous byte slices. These "MVCCScan +// batches" contain keys and their associated values in the same backing +// array. To avoid holding these entire backing arrays in memory and +// preventing them from being garbage collected indefinitely, we copy the key +// slices before coupling their lifetimes to those of unreplicated locks. +// 2. the KV API has a contract that byte slices returned from KV will not be +// mutated by higher levels. However, we have seen cases (e.g.#64228) where +// this contract is broken due to bugs. To defensively guard against this +// class of memory aliasing bug and prevent keys associated with unreplicated +// locks from being corrupted, we copy them. func copyKey(k roachpb.Key) roachpb.Key { k2 := make([]byte, len(k)) copy(k2, k) diff --git a/pkg/kv/kvserver/batcheval/result/result.go b/pkg/kv/kvserver/batcheval/result/result.go index 4caa4f95ac74..77f9ae4c79cc 100644 --- a/pkg/kv/kvserver/batcheval/result/result.go +++ b/pkg/kv/kvserver/batcheval/result/result.go @@ -153,9 +153,12 @@ func (lResult *LocalResult) DetachEndTxns(alwaysOnly bool) []EndTxnIntents { // // a) changes to be written to disk when applying the command // b) changes to the state which may require special handling (i.e. code -// execution) on all Replicas +// +// execution) on all Replicas +// // c) data which isn't sent to the followers but the proposer needs for tasks -// it must run when the command has applied (such as resolving intents). +// +// it must run when the command has applied (such as resolving intents). type Result struct { Local LocalResult Replicated kvserverpb.ReplicatedEvalResult diff --git a/pkg/kv/kvserver/batcheval/split_stats_helper.go b/pkg/kv/kvserver/batcheval/split_stats_helper.go index b8ab3301585b..51ffd3b06e7d 100644 --- a/pkg/kv/kvserver/batcheval/split_stats_helper.go +++ b/pkg/kv/kvserver/batcheval/split_stats_helper.go @@ -16,42 +16,42 @@ import "github.com/cockroachdb/cockroach/pkg/storage/enginepb" // split. The quantities known during a split (i.e. while the split trigger // is evaluating) are // -// - AbsPreSplitBothEstimated: the stats of the range before the split trigger, -// i.e. without accounting for any writes in the batch. This can have -// ContainsEstimates set. -// - DeltaBatchEstimated: the writes in the batch, i.e. the stats delta accrued -// from the evaluation of the EndTxn so far (this is mostly the write to the -// transaction record, as well as resolving the intent on the range descriptor, -// but nothing in this code relies on that). Since we have no reason to -// introduce ContainsEstimates in a split trigger, this typically has -// ContainsEstimates unset, but the results will be estimate free either way. -// - AbsPostSplit{Left,Right}: the stats of either the left or right hand side -// range after applying the split, i.e. accounting both for the shrinking as -// well as for the writes in DeltaBatch related to the shrunk keyrange. In -// practice, we obtain this by recomputing the stats using the corresponding -// AbsPostSplit{Left,Right}Fn, and so we don't expect ContainsEstimates to be -// set in them. The choice of which side to scan is controlled by ScanRightFirst. -// - DeltaRangeKey: the stats delta that must be added to the non-computed -// half's stats to account for the splitting of range keys straddling the split -// point. See computeSplitRangeKeyStatsDelta() for details. -// -// We are interested in computing from this the quantities -// -// - AbsPostSplitRight(): the stats of the right hand side created by the split, -// i.e. the data taken over from the left hand side plus whatever was written to -// the right hand side in the process (metadata etc). We can recompute this, but -// try to avoid it unless necessary (when CombinedErrorDelta below is nonzero). -// - DeltaPostSplitLeft(): the stats delta that should be emitted by the split -// trigger itself, i.e. the data which the left hand side (initially comprising -// both halves) loses by moving data into the right hand side (including whatever -// DeltaBatch contained in contributions attributable to the keyspace on the -// left). -// - CombinedErrorDelta: the difference between (AbsPreSplitBoth+DeltaBatch) and -// the recomputation of the pre-split range including the batch. This is zero if -// neither of the inputs contains estimates. If it's not zero, we need to -// recompute from scratch to obtain AbsPostSplitRight. What's interesting about -// this quantity is that we never care what exactly it is, but we do care -// whether it's zero or not because if it's zero we get to do less work. +// - AbsPreSplitBothEstimated: the stats of the range before the split trigger, +// i.e. without accounting for any writes in the batch. This can have +// ContainsEstimates set. +// - DeltaBatchEstimated: the writes in the batch, i.e. the stats delta accrued +// from the evaluation of the EndTxn so far (this is mostly the write to the +// transaction record, as well as resolving the intent on the range descriptor, +// but nothing in this code relies on that). Since we have no reason to +// introduce ContainsEstimates in a split trigger, this typically has +// ContainsEstimates unset, but the results will be estimate free either way. +// - AbsPostSplit{Left,Right}: the stats of either the left or right hand side +// range after applying the split, i.e. accounting both for the shrinking as +// well as for the writes in DeltaBatch related to the shrunk keyrange. In +// practice, we obtain this by recomputing the stats using the corresponding +// AbsPostSplit{Left,Right}Fn, and so we don't expect ContainsEstimates to be +// set in them. The choice of which side to scan is controlled by ScanRightFirst. +// - DeltaRangeKey: the stats delta that must be added to the non-computed +// half's stats to account for the splitting of range keys straddling the split +// point. See computeSplitRangeKeyStatsDelta() for details. +// +// # We are interested in computing from this the quantities +// +// - AbsPostSplitRight(): the stats of the right hand side created by the split, +// i.e. the data taken over from the left hand side plus whatever was written to +// the right hand side in the process (metadata etc). We can recompute this, but +// try to avoid it unless necessary (when CombinedErrorDelta below is nonzero). +// - DeltaPostSplitLeft(): the stats delta that should be emitted by the split +// trigger itself, i.e. the data which the left hand side (initially comprising +// both halves) loses by moving data into the right hand side (including whatever +// DeltaBatch contained in contributions attributable to the keyspace on the +// left). +// - CombinedErrorDelta: the difference between (AbsPreSplitBoth+DeltaBatch) and +// the recomputation of the pre-split range including the batch. This is zero if +// neither of the inputs contains estimates. If it's not zero, we need to +// recompute from scratch to obtain AbsPostSplitRight. What's interesting about +// this quantity is that we never care what exactly it is, but we do care +// whether it's zero or not because if it's zero we get to do less work. // // Moreover, we want both neither of AbsPostSplit{Right,Left} to end up with // estimates. The way splits are set up right now, we sort of get this "for @@ -64,7 +64,7 @@ import "github.com/cockroachdb/cockroach/pkg/storage/enginepb" // because // // (1) AbsPreSplitBoth + DeltaBatch + DeltaRangeKey -// - CombinedErrorDelta = AbsPostSplitLeft + AbsPostSplitRight +// - CombinedErrorDelta = AbsPostSplitLeft + AbsPostSplitRight // // In words, this corresponds to "all bytes are accounted for": from the initial // stats that we have (accounting for the fact that AbsPreSplitBoth+DeltaBatch @@ -84,23 +84,23 @@ import "github.com/cockroachdb/cockroach/pkg/storage/enginepb" // These two equations are easily solved for the unknowns. First, we can express // DeltaPostSplitLeft() in known quantities via (2) as // -// DeltaPostSplitLeft() = AbsPostSplitLeft - AbsPreSplitBothEstimated. +// DeltaPostSplitLeft() = AbsPostSplitLeft - AbsPreSplitBothEstimated. // // Note that if we start out with estimates, DeltaPostSplitLeft() will wipe out // those estimates when added to the absolute stats. // // For AbsPostSplitRight(), there are two cases. First, due to the identity // -// CombinedErrorDelta = AbsPreSplitBothEstimated + DeltaBatchEstimated -// -(AbsPostSplitLeft + AbsPostSplitRight) -// + DeltaRangeKey. +// CombinedErrorDelta = AbsPreSplitBothEstimated + DeltaBatchEstimated +// -(AbsPostSplitLeft + AbsPostSplitRight) +// + DeltaRangeKey. // // and the fact that the second and third lines contain no estimates, we know // that CombinedErrorDelta is zero if the first line contains no estimates. // Using this, we can rearrange as // -// AbsPostSplitRight() = AbsPreSplitBoth + DeltaBatch - AbsPostSplitLeft -// + DeltaRangeKey. +// AbsPostSplitRight() = AbsPreSplitBoth + DeltaBatch - AbsPostSplitLeft +// + DeltaRangeKey. // // where all quantities on the right are known. If CombinedErrorDelta is // nonzero, we effectively have one more unknown in our linear system and we diff --git a/pkg/kv/kvserver/client_lease_test.go b/pkg/kv/kvserver/client_lease_test.go index 868aa426a7cb..f63805890c77 100644 --- a/pkg/kv/kvserver/client_lease_test.go +++ b/pkg/kv/kvserver/client_lease_test.go @@ -372,19 +372,19 @@ func TestTransferLeaseToVoterDemotingWithIncoming(t *testing.T) { // and makes sure that if lease transfer fails during a joint configuration // the previous leaseholder will successfully re-aquire the lease. // The test proceeds as follows: -// - Creates a range with 3 replicas n1, n2, n3, and makes sure the lease is on n1 -// - Makes sure lease transfers on this range fail from now on -// - Invokes AdminChangeReplicas to remove n1 and add n4 -// - This causes the range to go into a joint configuration. A lease transfer -// is attempted to move the lease from n1 to n4 before exiting the joint config, -// but that fails, causing us to remain in the joint configuration with the original -// leaseholder having revoked its lease, but everyone else thinking it's still -// the leaseholder. In this situation, only n1 can re-aquire the lease as long as it is live. -// - We re-enable lease transfers on this range. -// - n1 is able to re-aquire the lease, due to the fix in #83686 which enables a -// VOTER_DEMOTING_LEARNER (n1) replica to get the lease if there's also a VOTER_INCOMING -// which is the case here (n4). -// - n1 transfers the lease away and the range leaves the joint configuration. +// - Creates a range with 3 replicas n1, n2, n3, and makes sure the lease is on n1 +// - Makes sure lease transfers on this range fail from now on +// - Invokes AdminChangeReplicas to remove n1 and add n4 +// - This causes the range to go into a joint configuration. A lease transfer +// is attempted to move the lease from n1 to n4 before exiting the joint config, +// but that fails, causing us to remain in the joint configuration with the original +// leaseholder having revoked its lease, but everyone else thinking it's still +// the leaseholder. In this situation, only n1 can re-aquire the lease as long as it is live. +// - We re-enable lease transfers on this range. +// - n1 is able to re-aquire the lease, due to the fix in #83686 which enables a +// VOTER_DEMOTING_LEARNER (n1) replica to get the lease if there's also a VOTER_INCOMING +// which is the case here (n4). +// - n1 transfers the lease away and the range leaves the joint configuration. func TestTransferLeaseFailureDuringJointConfig(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) diff --git a/pkg/kv/kvserver/client_merge_test.go b/pkg/kv/kvserver/client_merge_test.go index 283044bda94f..0e42078a19de 100644 --- a/pkg/kv/kvserver/client_merge_test.go +++ b/pkg/kv/kvserver/client_merge_test.go @@ -401,18 +401,17 @@ func mergeWithData(t *testing.T, retries int64) { // LHS is properly updated after a merge. The test contains a subtest for each // of the combinations of the following boolean options: // -// - disjointLeaseholders: configures whether or not the leaseholder of the +// - disjointLeaseholders: configures whether or not the leaseholder of the // LHS range is disjoint from the leaseholder of the RHS range. If false, // the leaseholders are collocated before the merge is initiated. // -// - throughSnapshot: configures whether or not the leaseholder of the LHS of +// - throughSnapshot: configures whether or not the leaseholder of the LHS of // the merge hears about and applies the merge through a Raft snapshot, as // opposed to through normal Raft log application. // -// - futureRead: configures whether or not the reads performed on the RHS range +// - futureRead: configures whether or not the reads performed on the RHS range // before the merge is initiated are performed in the future of present // time using synthetic timestamps. -// func TestStoreRangeMergeTimestampCache(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) @@ -2367,17 +2366,17 @@ func TestStoreRangeMergeConcurrentRequests(t *testing.T) { // // Consider the following sequence of events observed in a real cluster: // -// 1. Adjacent ranges Q and R are slated to be merged. Q has replicas on -// stores S1, S2, and S3, while R has replicas on S1, S2, and S4. -// 2. To collocate Q and R, the merge queue adds a replica of R on S3 and -// removes the replica on S4. The replica on S4 is queued for garbage -// collection, but is not yet processed. -// 3. The merge transaction commits, deleting R's range descriptor from the -// meta2 index. -// 4. The replica GC queue processes the former replica of R on S4. It -// performs a consistent lookup of R's start key in the meta2 index to -// determine whether the replica is still a member of R. Since R has been -// deleted, the lookup returns Q's range descriptor, not R's. +// 1. Adjacent ranges Q and R are slated to be merged. Q has replicas on +// stores S1, S2, and S3, while R has replicas on S1, S2, and S4. +// 2. To collocate Q and R, the merge queue adds a replica of R on S3 and +// removes the replica on S4. The replica on S4 is queued for garbage +// collection, but is not yet processed. +// 3. The merge transaction commits, deleting R's range descriptor from the +// meta2 index. +// 4. The replica GC queue processes the former replica of R on S4. It +// performs a consistent lookup of R's start key in the meta2 index to +// determine whether the replica is still a member of R. Since R has been +// deleted, the lookup returns Q's range descriptor, not R's. // // The replica GC queue would previously fail to notice that it had received Q's // range descriptor, not R's. It would then proceed to call store.RemoveReplica diff --git a/pkg/kv/kvserver/client_raft_helpers_test.go b/pkg/kv/kvserver/client_raft_helpers_test.go index 0f81c70f72d6..faaf20b7044c 100644 --- a/pkg/kv/kvserver/client_raft_helpers_test.go +++ b/pkg/kv/kvserver/client_raft_helpers_test.go @@ -217,11 +217,11 @@ type testClusterPartitionedRange struct { // out messages from the partitioned store. When activated the configuration // looks like: // -// [p] -// x x -// / \ -// x x -// [*]<---->[*] +// [p] +// x x +// / \ +// x x +// [*]<---->[*] // // The activated argument controls whether the partition is activated when this // function returns. diff --git a/pkg/kv/kvserver/client_raft_test.go b/pkg/kv/kvserver/client_raft_test.go index 4a77ab618cdf..d2f70b4e8d00 100644 --- a/pkg/kv/kvserver/client_raft_test.go +++ b/pkg/kv/kvserver/client_raft_test.go @@ -1516,11 +1516,11 @@ func TestReplicateAfterRemoveAndSplit(t *testing.T) { // Test that when a Raft group is not able to establish a quorum, its Raft log // does not grow without bound. It tests two different scenarios where this used // to be possible (see #27772): -// 1. The leader proposes a command and cannot establish a quorum. The leader -// continually re-proposes the command. -// 2. The follower proposes a command and forwards it to the leader, who cannot -// establish a quorum. The follower continually re-proposes and forwards the -// command to the leader. +// 1. The leader proposes a command and cannot establish a quorum. The leader +// continually re-proposes the command. +// 2. The follower proposes a command and forwards it to the leader, who cannot +// establish a quorum. The follower continually re-proposes and forwards the +// command to the leader. func TestLogGrowthWhenRefreshingPendingCommands(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) @@ -4948,25 +4948,25 @@ func TestAckWriteBeforeApplication(t *testing.T) { // // Given this behavior there are 4 troubling cases with regards to splits. // -// * In all cases we begin with s1 processing a presplit snapshot for -// r20. After the split the store should have r21/3. +// - In all cases we begin with s1 processing a presplit snapshot for +// r20. After the split the store should have r21/3. // // In the first two cases the following occurs: // -// * s1 receives a message for r21/3 prior to acquiring the split lock -// in r21. This will create an uninitialized r21/3 which may write -// HardState. +// - s1 receives a message for r21/3 prior to acquiring the split lock +// in r21. This will create an uninitialized r21/3 which may write +// HardState. // -// * Before the r20 processes the split r21 is removed and re-added to -// s1 as r21/4. s1 receives a raft message destined for r21/4 and proceeds -// to destroy its uninitialized r21/3, laying down a tombstone at 4 in the -// process. +// - Before the r20 processes the split r21 is removed and re-added to +// s1 as r21/4. s1 receives a raft message destined for r21/4 and proceeds +// to destroy its uninitialized r21/3, laying down a tombstone at 4 in the +// process. // -// (1) s1 processes the split and finds the RHS to be an uninitialized replica -// with a higher replica ID. +// (1) s1 processes the split and finds the RHS to be an uninitialized replica +// with a higher replica ID. // -// (2) s1 crashes before processing the split, forgetting the replica ID of the -// RHS but retaining its tombstone. +// (2) s1 crashes before processing the split, forgetting the replica ID of the +// RHS but retaining its tombstone. // // In both cases we know that the RHS could not have committed anything because // it cannot have gotten a snapshot but we want to be sure to not synthesize a @@ -4975,28 +4975,27 @@ func TestAckWriteBeforeApplication(t *testing.T) { // // In the third and fourth cases: // -// * s1 never receives a message for r21/3. +// - s1 never receives a message for r21/3. // -// * Before the r20 processes the split r21 is removed and re-added to -// s1 as r21/4. s1 receives a raft message destined for r21/4 and has never -// heard about r21/3. +// - Before the r20 processes the split r21 is removed and re-added to +// s1 as r21/4. s1 receives a raft message destined for r21/4 and has never +// heard about r21/3. // -// (3) s1 processes the split and finds the RHS to be an uninitialized replica -// with a higher replica ID (but without a tombstone). This case is very -// similar to (1) +// (3) s1 processes the split and finds the RHS to be an uninitialized replica +// with a higher replica ID (but without a tombstone). This case is very +// similar to (1) // -// (4) s1 crashes still before processing the split, forgetting that it had -// known about r21/4. When it reboots r21/4 is totally partitioned and -// r20 becomes unpartitioned. +// (4) s1 crashes still before processing the split, forgetting that it had +// known about r21/4. When it reboots r21/4 is totally partitioned and +// r20 becomes unpartitioned. // -// * r20 processes the split successfully and initialized r21/3. +// - r20 processes the split successfully and initialized r21/3. // // In the 4th case we find that until we unpartition r21/4 (the RHS) and let it // learn about its removal with a ReplicaTooOldError that it will be initialized // with a CommitIndex at 10 as r21/3, the split's value. After r21/4 becomes // unpartitioned it will learn it is removed by either catching up on its // its log or receiving a ReplicaTooOldError which will lead to a tombstone. -// func TestProcessSplitAfterRightHandSideHasBeenRemoved(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) diff --git a/pkg/kv/kvserver/client_replica_test.go b/pkg/kv/kvserver/client_replica_test.go index 49bb8d260482..64e702d94149 100644 --- a/pkg/kv/kvserver/client_replica_test.go +++ b/pkg/kv/kvserver/client_replica_test.go @@ -220,26 +220,26 @@ func TestLeaseholdersRejectClockUpdateWithJump(t *testing.T) { // overrides an old value. The test uses a "Writer" and a "Reader" // to reproduce an out-of-order put. // -// 1) The Writer executes a cput operation and writes a write intent with -// time T in a txn. -// 2) Before the Writer's txn is committed, the Reader sends a high priority -// get operation with time T+100. This pushes the Writer txn timestamp to -// T+100. The Reader also writes to the same key the Writer did a cput to -// in order to trigger the restart of the Writer's txn. The original -// write intent timestamp is also updated to T+100. -// 3) The Writer starts a new epoch of the txn, but before it writes, the -// Reader sends another high priority get operation with time T+200. This -// pushes the Writer txn timestamp to T+200 to trigger a restart of the -// Writer txn. The Writer will not actually restart until it tries to commit -// the current epoch of the transaction. The Reader updates the timestamp of -// the write intent to T+200. The test deliberately fails the Reader get -// operation, and cockroach doesn't update its timestamp cache. -// 4) The Writer executes the put operation again. This put operation comes -// out-of-order since its timestamp is T+100, while the intent timestamp -// updated at Step 3 is T+200. -// 5) The put operation overrides the old value using timestamp T+100. -// 6) When the Writer attempts to commit its txn, the txn will be restarted -// again at a new epoch timestamp T+200, which will finally succeed. +// 1. The Writer executes a cput operation and writes a write intent with +// time T in a txn. +// 2. Before the Writer's txn is committed, the Reader sends a high priority +// get operation with time T+100. This pushes the Writer txn timestamp to +// T+100. The Reader also writes to the same key the Writer did a cput to +// in order to trigger the restart of the Writer's txn. The original +// write intent timestamp is also updated to T+100. +// 3. The Writer starts a new epoch of the txn, but before it writes, the +// Reader sends another high priority get operation with time T+200. This +// pushes the Writer txn timestamp to T+200 to trigger a restart of the +// Writer txn. The Writer will not actually restart until it tries to commit +// the current epoch of the transaction. The Reader updates the timestamp of +// the write intent to T+200. The test deliberately fails the Reader get +// operation, and cockroach doesn't update its timestamp cache. +// 4. The Writer executes the put operation again. This put operation comes +// out-of-order since its timestamp is T+100, while the intent timestamp +// updated at Step 3 is T+200. +// 5. The put operation overrides the old value using timestamp T+100. +// 6. When the Writer attempts to commit its txn, the txn will be restarted +// again at a new epoch timestamp T+200, which will finally succeed. func TestTxnPutOutOfOrder(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) @@ -508,17 +508,17 @@ func TestTxnReadWithinUncertaintyInterval(t *testing.T) { // This is a regression test for #36431. Before this issue was addressed, // it was possible for the following series of events to lead to a stale // read: -// - txn W is coordinated by node B. It lays down an intent on node A (key k) at -// ts 95. -// - txn W gets pushed to ts 105 (taken from B's clock). It refreshes -// successfully and commits at 105. Node A's clock is at, say, 100; this is -// within clock offset bounds. -// - after all this, txn R starts on node A. It gets assigned ts 100. The txn -// has no uncertainty for node A. -// - txn W's async intent resolution comes around and resolves the intent on -// node A, moving the value fwd from ts 95 to 105. -// - txn R reads key k and doesn't see anything. There's a value at 105, but the -// txn have no uncertainty due to an observed timestamp. This is a stale read. +// - txn W is coordinated by node B. It lays down an intent on node A (key k) at +// ts 95. +// - txn W gets pushed to ts 105 (taken from B's clock). It refreshes +// successfully and commits at 105. Node A's clock is at, say, 100; this is +// within clock offset bounds. +// - after all this, txn R starts on node A. It gets assigned ts 100. The txn +// has no uncertainty for node A. +// - txn W's async intent resolution comes around and resolves the intent on +// node A, moving the value fwd from ts 95 to 105. +// - txn R reads key k and doesn't see anything. There's a value at 105, but the +// txn have no uncertainty due to an observed timestamp. This is a stale read. // // The test's rangedResolution parameter dictates whether the intent is // asynchronously resolved using point or ranged intent resolution. @@ -531,7 +531,6 @@ func TestTxnReadWithinUncertaintyInterval(t *testing.T) { // The test's alreadyResolved parameter dictates whether the intent is // already resolved by the time the reader observes it, or whether the // reader must resolve the intent itself. -// func TestTxnReadWithinUncertaintyIntervalAfterIntentResolution(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) @@ -3234,14 +3233,14 @@ func TestChangeReplicasSwapVoterWithNonVoter(t *testing.T) { // them to be. Tombstones are laid down when replicas are removed. // Replicas are removed for several reasons: // -// (1) In response to a ChangeReplicasTrigger which removes it. -// (2) In response to a ReplicaTooOldError from a sent raft message. -// (3) Due to the replica GC queue detecting a replica is not in the range. -// (3.1) When the replica detects the range has been merged away. -// (4) Due to a raft message addressed to a newer replica ID. -// (4.1) When the older replica is not initialized. -// (5) Due to a merge. -// (6) Due to snapshot which subsumes a range. +// (1) In response to a ChangeReplicasTrigger which removes it. +// (2) In response to a ReplicaTooOldError from a sent raft message. +// (3) Due to the replica GC queue detecting a replica is not in the range. +// (3.1) When the replica detects the range has been merged away. +// (4) Due to a raft message addressed to a newer replica ID. +// (4.1) When the older replica is not initialized. +// (5) Due to a merge. +// (6) Due to snapshot which subsumes a range. // // This test creates all of these scenarios and ensures that tombstones are // written at sane values. @@ -3936,23 +3935,23 @@ func TestChangeReplicasLeaveAtomicRacesWithMerge(t *testing.T) { // At the time of writing this test were three hazardous cases which are now // avoided: // -// (1) The outgoing leaseholder learns about its removal before applying the -// lease transfer. This could happen if it has a lot left to apply but it -// does indeed know in its log that it is either no longer the leaseholder -// or that some of its commands will apply successfully. +// (1) The outgoing leaseholder learns about its removal before applying the +// lease transfer. This could happen if it has a lot left to apply but it +// does indeed know in its log that it is either no longer the leaseholder +// or that some of its commands will apply successfully. // -// (2) The replica learns about its removal after applying the lease transfer -// but it potentially still has pending commands which it thinks might -// have been proposed. This can occur if there are commands which are -// proposed after the lease transfer has been proposed but before the lease -// transfer has applied. This can also occur if commands are re-ordered -// by raft due to a leadership change. +// (2) The replica learns about its removal after applying the lease transfer +// but it potentially still has pending commands which it thinks might +// have been proposed. This can occur if there are commands which are +// proposed after the lease transfer has been proposed but before the lease +// transfer has applied. This can also occur if commands are re-ordered +// by raft due to a leadership change. // -// (3) The replica learns about its removal after applying the lease transfer -// but proposed a command evaluated under the old lease after the lease -// transfer has been applied. This can occur if there are commands evaluate -// before the lease transfer is proposed but are not inserted into the -// proposal buffer until after it has been applied. +// (3) The replica learns about its removal after applying the lease transfer +// but proposed a command evaluated under the old lease after the lease +// transfer has been applied. This can occur if there are commands evaluate +// before the lease transfer is proposed but are not inserted into the +// proposal buffer until after it has been applied. // // None of these cases are possible any longer as latches now prevent writes // from occurring concurrently with TransferLeaseRequests. (1) is prevented @@ -4479,7 +4478,8 @@ func TestProposalOverhead(t *testing.T) { // hit an assertion failure. It used to. // // The test uses a TestCluster to mirror the setup from: -// concurrency/testdata/concurrency_manager/discover_lock_after_lease_race +// +// concurrency/testdata/concurrency_manager/discover_lock_after_lease_race func TestDiscoverIntentAcrossLeaseTransferAwayAndBack(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) @@ -4647,10 +4647,10 @@ func makeReplicationTargets(ids ...int) (targets []roachpb.ReplicationTarget) { // TestTenantID tests that the tenant ID is properly set. // This test examines the following behaviors: // -// (1) When range is split off for a tenant, that it gets the right tenant ID. -// (2) When a replica is created with a raft message, it does not have a -// tenant ID, but then when it is initialized, it gets one. -// (3) When a store starts up, it assigns the right tenant ID. +// (1) When range is split off for a tenant, that it gets the right tenant ID. +// (2) When a replica is created with a raft message, it does not have a +// tenant ID, but then when it is initialized, it gets one. +// (3) When a store starts up, it assigns the right tenant ID. func TestTenantID(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) @@ -5202,9 +5202,9 @@ func BenchmarkOptimisticEvalForLocks(b *testing.B) { } // BenchmarkOptimisticEval benchmarks optimistic evaluation with -// - potentially conflicting latches held by 1PC transactions doing writes. -// - potentially conflicting latches or locks held by transactions doing -// writes. +// - potentially conflicting latches held by 1PC transactions doing writes. +// - potentially conflicting latches or locks held by transactions doing +// writes. func BenchmarkOptimisticEval(b *testing.B) { defer log.Scope(b).Close(b) ctx := context.Background() diff --git a/pkg/kv/kvserver/client_split_test.go b/pkg/kv/kvserver/client_split_test.go index 2ca7c6ecb396..b9148e87236f 100644 --- a/pkg/kv/kvserver/client_split_test.go +++ b/pkg/kv/kvserver/client_split_test.go @@ -3250,22 +3250,32 @@ func TestStoreSplitDisappearingReplicas(t *testing.T) { // // Or, in pictures (s3 looks like s1 throughout and is omitted): // -// s1: [----r1@all-------------] -// s2: [----r1@all-------------] +// s1: [----r1@all-------------] +// s2: [----r1@all-------------] +// // Remove s2: -// s1: [----r1@s1s3------------] -// s2: [----r1@all-------------] (outdated) +// +// s1: [----r1@s1s3------------] +// s2: [----r1@all-------------] (outdated) +// // Split r1: -// s1: [-r1@s1s3-|--r2@s1s3----] -// s2: [----r1@all-------------] (outdated) +// +// s1: [-r1@s1s3-|--r2@s1s3----] +// s2: [----r1@all-------------] (outdated) +// // Add s2: -// s1: [-r1@all-|--r2@s1s3-----] -// s2: [----r1@all-------------] (outdated) +// +// s1: [-r1@all-|--r2@s1s3-----] +// s2: [----r1@all-------------] (outdated) +// // Add learner to s2 on r2 (remains uninitialized due to LHS state blocking it): -// s1: [-r1@s1s3-|--r2@all-----] -// s2: [----r1@all-------------] (outdated), uninitialized replica r2/3 +// +// s1: [-r1@s1s3-|--r2@all-----] +// s2: [----r1@all-------------] (outdated), uninitialized replica r2/3 +// // Remove and re-add learner multiple times: r2/3 becomes r2/100 -// (diagram looks the same except for replacing r2/3) +// +// (diagram looks the same except for replacing r2/3) // // When connectivity is restored, r1@s2 will start to catch up on the raft log // after it learns of its new replicaID. It first processes the replication diff --git a/pkg/kv/kvserver/client_tenant_test.go b/pkg/kv/kvserver/client_tenant_test.go index 2dcd299b6d6b..e47bd45ef276 100644 --- a/pkg/kv/kvserver/client_tenant_test.go +++ b/pkg/kv/kvserver/client_tenant_test.go @@ -43,8 +43,8 @@ import ( // TestTenantsStorageMetrics ensures that tenant storage metrics are properly // set upon split. There's two interesting cases: // -// 1) The common case where the RHS and LHS of the split are co-located -// 2) The rare case where the RHS of the split has already been removed from +// 1. The common case where the RHS and LHS of the split are co-located +// 2. The rare case where the RHS of the split has already been removed from // the store by the time the LHS applies the split. // // This test at time of writing only deals with ensuring that 1) is covered. diff --git a/pkg/kv/kvserver/client_test.go b/pkg/kv/kvserver/client_test.go index e3060da7efb8..634c7f86ba2e 100644 --- a/pkg/kv/kvserver/client_test.go +++ b/pkg/kv/kvserver/client_test.go @@ -8,7 +8,9 @@ // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. -/* Package storage_test provides a means of testing store +/* + Package storage_test provides a means of testing store + functionality which depends on a fully-functional KV client. This cannot be done within the storage package because of circular dependencies. diff --git a/pkg/kv/kvserver/closedts/tracker/tracker.go b/pkg/kv/kvserver/closedts/tracker/tracker.go index e00f1b41af7f..c39f40b46e39 100644 --- a/pkg/kv/kvserver/closedts/tracker/tracker.go +++ b/pkg/kv/kvserver/closedts/tracker/tracker.go @@ -43,7 +43,9 @@ import ( // // externalLock.Lock() // for each command being proposed: -// Tracker.Untrack(tok) +// +// Tracker.Untrack(tok) +// // newClosedTimestamp := min(now() - kv.closed_timestamp.target_duration, Tracker.LowerBound() - 1) // externalLock.Unlock() // diff --git a/pkg/kv/kvserver/concurrency/concurrency_control.go b/pkg/kv/kvserver/concurrency/concurrency_control.go index ee640416c7a3..2358b32efa42 100644 --- a/pkg/kv/kvserver/concurrency/concurrency_control.go +++ b/pkg/kv/kvserver/concurrency/concurrency_control.go @@ -66,54 +66,54 @@ import ( // ignore any queue that has formed on the lock. For other exceptions, see the // later comment for lockTable. // -// Internal Components +// # Internal Components // // The concurrency manager is composed of a number of internal synchronization, // bookkeeping, and queueing structures. Each of these is discussed in more // detail on their interface definition. The following diagram details how the // components are tied together: // -// +---------------------+---------------------------------------------+ -// | concurrency.Manager | | -// +---------------------+ | -// | | -// +------------+ acquire +--------------+ acquire | -// Sequence() |--->--->---| latchManager |<---<---<---<---<---<---+ | -// +------------+ +--------------+ | | -// | / check locks + wait queues | | -// | v if conflict, enter q & drop latches ^ | -// | +---------------------------------------------------+ | | -// | | [ lockTable ] | | | -// | | [ key1 ] -------------+-----------------+ | ^ | -// | | [ key2 ] / lockState: | lockWaitQueue: |----<---<---<----+ -// | | [ key3 ]-{ - lock type | +-[a]<-[b]<-[c] | | | | | -// | | [ key4 ] \ - txn meta | | (no latches) |-->-^ | | -// | | [ key5 ] -------------+-|---------------+ | | | -// | | [ ... ] v | | ^ -// | +---------------------------------|-----------------+ | | if lock found, HandleWriterIntentError() -// | | | | | - enter lockWaitQueue -// | | +- may be remote -+--+ | | - drop latches -// | | | | | | - wait for lock update / release -// | v v ^ | | -// | | +--------------------------+ | ^ -// | | | txnWaitQueue: | | | -// | | | (located on txn record's | | | -// | v | leaseholder replica) | | | -// | | |--------------------------| | ^ -// | | | [txn1] [txn2] [txn3] ... |----<---<---<---<----+ | -// | | +--------------------------+ | | if txn push failed, HandleTransactionPushError() -// | | | | - enter txnWaitQueue -// | | | ^ - drop latches -// | | | | - wait for txn record update -// | | | | | -// | | | | | -// | +--> retain latches --> remain at head of queues ---> evaluate ---> Finish() -// | | -// +----------+ | -// Finish() | ---> exit wait queues ---> drop latches -----------------> respond ... -// +----------+ | -// | | -// +-------------------------------------------------------------------+ +// +---------------------+---------------------------------------------+ +// | concurrency.Manager | | +// +---------------------+ | +// | | +// +------------+ acquire +--------------+ acquire | +// Sequence() |--->--->---| latchManager |<---<---<---<---<---<---+ | +// +------------+ +--------------+ | | +// | / check locks + wait queues | | +// | v if conflict, enter q & drop latches ^ | +// | +---------------------------------------------------+ | | +// | | [ lockTable ] | | | +// | | [ key1 ] -------------+-----------------+ | ^ | +// | | [ key2 ] / lockState: | lockWaitQueue: |----<---<---<----+ +// | | [ key3 ]-{ - lock type | +-[a]<-[b]<-[c] | | | | | +// | | [ key4 ] \ - txn meta | | (no latches) |-->-^ | | +// | | [ key5 ] -------------+-|---------------+ | | | +// | | [ ... ] v | | ^ +// | +---------------------------------|-----------------+ | | if lock found, HandleWriterIntentError() +// | | | | | - enter lockWaitQueue +// | | +- may be remote -+--+ | | - drop latches +// | | | | | | - wait for lock update / release +// | v v ^ | | +// | | +--------------------------+ | ^ +// | | | txnWaitQueue: | | | +// | | | (located on txn record's | | | +// | v | leaseholder replica) | | | +// | | |--------------------------| | ^ +// | | | [txn1] [txn2] [txn3] ... |----<---<---<---<----+ | +// | | +--------------------------+ | | if txn push failed, HandleTransactionPushError() +// | | | | - enter txnWaitQueue +// | | | ^ - drop latches +// | | | | - wait for txn record update +// | | | | | +// | | | | | +// | +--> retain latches --> remain at head of queues ---> evaluate ---> Finish() +// | | +// +----------+ | +// Finish() | ---> exit wait queues ---> drop latches -----------------> respond ... +// +----------+ | +// | | +// +-------------------------------------------------------------------+ // // See the comments on individual components for a more detailed look at their // interface and inner-workings. @@ -351,10 +351,10 @@ type TestingAccessor interface { // // The setting can change across different calls to SequenceReq. The // permissible sequences are: -// - OptimisticEval: when optimistic evaluation succeeds. -// - OptimisticEval, PessimisticAfterFailedOptimisticEval, PessimisticEval*: -// when optimistic evaluation failed. -// - PessimisticEval+: when only pessimistic evaluation was attempted. +// - OptimisticEval: when optimistic evaluation succeeds. +// - OptimisticEval, PessimisticAfterFailedOptimisticEval, PessimisticEval*: +// when optimistic evaluation failed. +// - PessimisticEval+: when only pessimistic evaluation was attempted. type RequestEvalKind int const ( @@ -532,15 +532,15 @@ type latchGuard interface{} // it, where conflicting transactions can queue while waiting for the lock to be // released. // -// +---------------------------------------------------+ -// | [ lockTable ] | -// | [ key1 ] -------------+-----------------+ | -// | [ key2 ] / lockState: | lockWaitQueue: | | -// | [ key3 ]-{ - lock type | <-[a]<-[b]<-[c] | | -// | [ key4 ] \ - txn meta | | | -// | [ key5 ] -------------+-----------------+ | -// | [ ... ] | -// +---------------------------------------------------+ +// +---------------------------------------------------+ +// | [ lockTable ] | +// | [ key1 ] -------------+-----------------+ | +// | [ key2 ] / lockState: | lockWaitQueue: | | +// | [ key3 ]-{ - lock type | <-[a]<-[b]<-[c] | | +// | [ key4 ] \ - txn meta | | | +// | [ key5 ] -------------+-----------------+ | +// | [ ... ] | +// +---------------------------------------------------+ // // The database is read and written using "requests". Transactions are composed // of one or more requests. Isolation is needed across requests. Additionally, @@ -575,15 +575,14 @@ type latchGuard interface{} // conflict then the request that arrived first will typically be sequenced // first. There are some exceptions: // -// - a request that is part of a transaction which has already acquired a lock -// does not need to wait on that lock during sequencing, and can therefore -// ignore any queue that has formed on the lock. -// -// - contending requests that encounter different levels of contention may be -// sequenced in non-FIFO order. This is to allow for more concurrency. e.g. -// if request R1 and R2 contend on key K2, but R1 is also waiting at key K1, -// R2 could slip past R1 and evaluate. +// - a request that is part of a transaction which has already acquired a lock +// does not need to wait on that lock during sequencing, and can therefore +// ignore any queue that has formed on the lock. // +// - contending requests that encounter different levels of contention may be +// sequenced in non-FIFO order. This is to allow for more concurrency. e.g. +// if request R1 and R2 contend on key K2, but R1 is also waiting at key K1, +// R2 could slip past R1 and evaluate. type lockTable interface { requestQueuer @@ -788,10 +787,10 @@ type lockTableGuard interface { // that it is a part of. // // This waiting state responds to a set of state transitions in the lock table: -// - a conflicting lock is released -// - a conflicting lock is updated such that it no longer conflicts -// - a conflicting request in the lock wait-queue acquires the lock -// - a conflicting request in the lock wait-queue exits the lock wait-queue +// - a conflicting lock is released +// - a conflicting lock is updated such that it no longer conflicts +// - a conflicting request in the lock wait-queue acquires the lock +// - a conflicting request in the lock wait-queue exits the lock wait-queue // // These state transitions are typically reactive - the waiter can simply wait // for locks to be released or lock wait-queues to be exited by other actors. @@ -844,9 +843,9 @@ type lockTableWaiter interface { // // The first of these situations is failure of the conflicting transaction's // coordinator. This situation comes in two flavors: -// - before a transaction has been finalized (committed or aborted) -// - after a transaction has been finalized but before all of its intents have -// been resolved +// - before a transaction has been finalized (committed or aborted) +// - after a transaction has been finalized but before all of its intents have +// been resolved // // In the first of these flavors, the transaction record may still have a // PENDING status. Without a live transaction coordinator heartbeating it, the @@ -880,64 +879,76 @@ type lockTableWaiter interface { // able to observe a full cycle in this graph and aborts one of the transactions // in the cycle to break the deadlock. // -// Example of Distributed Deadlock Detection +// # Example of Distributed Deadlock Detection // // The following diagram demonstrates how the txnWaitQueue interacts with // distributed deadlock detection. // // - txnA enters txnB's txnWaitQueue during a PushTxn request (MaybeWaitForPush) +// // - txnB enters txnC's txnWaitQueue during a PushTxn request (MaybeWaitForPush) +// // - txnC enters txnA's txnWaitQueue during a PushTxn request (MaybeWaitForPush) // -// .-----------------------------------. -// | | -// v | -// [txnA record] --> [txnB record] --> [txnC record] +// .-----------------------------------. +// | | +// v | +// [txnA record] --> [txnB record] --> [txnC record] // deps: deps: deps: -// - txnC - txnA - txnB +// +// - txnC - txnA - txnB // // - txnA queries its own txnWaitQueue using a QueryTxn request (MaybeWaitForQuery) // -// .-----------------------------------. -// | ............ | -// v v . | -// [txnA record] --> [txnB record] --> [txnC record] +// .-----------------------------------. +// | ............ | +// v v . | +// [txnA record] --> [txnB record] --> [txnC record] // deps: deps: deps: -// - txnC - txnA - txnB +// +// - txnC - txnA - txnB // // - txnA finds that txnC is a dependent. It transfers this dependency to txnB // -// .-----------------------------------. -// | | -// v | -// [txnA record] --> [txnB record] --> [txnC record] +// .-----------------------------------. +// | | +// v | +// [txnA record] --> [txnB record] --> [txnC record] // deps: deps: deps: -// - txnC - txnA - txnB -// - txnC +// +// - txnC - txnA - txnB +// +// - txnC // // - txnC queries its own txnWaitQueue using a QueryTxn request (MaybeWaitForQuery) +// // - txnB queries its own txnWaitQueue using a QueryTxn request (MaybeWaitForQuery) +// // - txnC finds that txnB is a dependent. It transfers this dependency to txnA +// // - txnB finds that txnA and txnC are dependents. It transfers these dependencies to txnC // -// .-----------------------------------. -// | | -// v | -// [txnA record] --> [txnB record] --> [txnC record] +// .-----------------------------------. +// | | +// v | +// [txnA record] --> [txnB record] --> [txnC record] // deps: deps: deps: -// - txnC - txnA - txnB -// - txnB - txnC - txnA -// - txnC +// +// - txnC - txnA - txnB +// +// - txnB - txnC - txnA +// +// - txnC // // - txnB notices that txnC is a transitive dependency of itself. This indicates // a cycle in the global wait-for graph. txnC is aborted, breaking the cycle // and the deadlock // -// [txnA record] --> [txnB record] --> [txnC record: ABORTED] +// [txnA record] --> [txnB record] --> [txnC record: ABORTED] // // - txnC releases its locks and the transactions proceed in order. // -// [txnA record] --> [txnB record] --> (free to commit) +// [txnA record] --> [txnB record] --> (free to commit) // // TODO(nvanbenschoten): if we exposed a "queue guard" interface, we could make // stronger guarantees around cleaning up enqueued txns when there are no diff --git a/pkg/kv/kvserver/concurrency/concurrency_manager_test.go b/pkg/kv/kvserver/concurrency/concurrency_manager_test.go index 86f817b5797c..e81b86615cf1 100644 --- a/pkg/kv/kvserver/concurrency/concurrency_manager_test.go +++ b/pkg/kv/kvserver/concurrency/concurrency_manager_test.go @@ -15,7 +15,6 @@ import ( "context" "fmt" "io" - "io/ioutil" "reflect" "regexp" "runtime" @@ -57,7 +56,9 @@ import ( // // new-txn name= ts=[,] [epoch=] [priority] [uncertainty-limit=[,]] // new-request name= txn=|none ts=[,] [priority] [inconsistent] [wait-policy=] [lock-timeout] [max-lock-wait-queue-length=] [poison-policy=[err|wait]] -// [=...] (hint: see scanSingleRequest) +// +// [=...] (hint: see scanSingleRequest) +// // sequence req= [eval-kind= // finish req= @@ -85,7 +86,6 @@ import ( // debug-set-discovered-locks-threshold-to-consult-finalized-txn-cache n= // debug-set-max-locks n= // reset -// func TestConcurrencyManagerBasic(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) @@ -1297,7 +1297,7 @@ var goroutineStalledStates = map[string]bool{ // allocations. func goroutineStatus(t *testing.T, filter string, buf *[]byte) []*stack.Goroutine { b := stacks(buf) - s, _, err := stack.ScanSnapshot(bytes.NewBuffer(b), ioutil.Discard, stack.DefaultOpts()) + s, _, err := stack.ScanSnapshot(bytes.NewBuffer(b), io.Discard, stack.DefaultOpts()) if err != io.EOF { t.Fatalf("could not parse goroutine dump: %v", err) return nil diff --git a/pkg/kv/kvserver/concurrency/lock_table.go b/pkg/kv/kvserver/concurrency/lock_table.go index a213bd54d659..8a3d2b6c71e6 100644 --- a/pkg/kv/kvserver/concurrency/lock_table.go +++ b/pkg/kv/kvserver/concurrency/lock_table.go @@ -169,9 +169,10 @@ type treeMu struct { // thread. // // Mutex ordering: lockTableImpl.enabledMu -// > treeMu.mu -// > lockState.mu -// > lockTableGuardImpl.mu +// +// > treeMu.mu +// > lockState.mu +// > lockTableGuardImpl.mu type lockTableImpl struct { // The ID of the range to which this replica's lock table belongs. // Used to populate results when querying the lock table. @@ -316,46 +317,46 @@ func (t *lockTableImpl) setMaxLocks(maxLocks int64) { // transitions where the transitions are notified via newState() and the current // state can be read using CurState(). // -// - The waitFor* states provide information on who the request is waiting for. -// The waitForDistinguished state is a sub-case -- a distinguished waiter is -// responsible for taking extra actions e.g. immediately pushing the transaction -// it is waiting for. The implementation ensures that if there are multiple -// requests in waitFor state waiting on the same transaction at least one will -// be a distinguished waiter. +// - The waitFor* states provide information on who the request is waiting for. +// The waitForDistinguished state is a sub-case -- a distinguished waiter is +// responsible for taking extra actions e.g. immediately pushing the transaction +// it is waiting for. The implementation ensures that if there are multiple +// requests in waitFor state waiting on the same transaction at least one will +// be a distinguished waiter. // -// TODO(sbhola): investigate removing the waitForDistinguished state which -// will simplify the code here. All waitFor requests would wait (currently -// 50ms) before pushing the transaction (for deadlock detection) they are -// waiting on, say T. Typically T will be done before 50ms which is considered -// ok: the one exception we will need to make is if T has the min priority or -// the waiting transaction has max priority -- in both cases it will push -// immediately. The bad case is if T is ABORTED: the push will succeed after, -// and if T left N intents, each push would wait for 50ms, incurring a latency -// of 50*N ms. A cache of recently encountered ABORTED transactions on each -// Store should mitigate this latency increase. Whenever a transaction sees a -// waitFor state, it will consult this cache and if T is found, push -// immediately (if there isn't already a push in-flight) -- even if T is not -// initially in the cache, the first push will place it in the cache, so the -// maximum latency increase is 50ms. +// TODO(sbhola): investigate removing the waitForDistinguished state which +// will simplify the code here. All waitFor requests would wait (currently +// 50ms) before pushing the transaction (for deadlock detection) they are +// waiting on, say T. Typically T will be done before 50ms which is considered +// ok: the one exception we will need to make is if T has the min priority or +// the waiting transaction has max priority -- in both cases it will push +// immediately. The bad case is if T is ABORTED: the push will succeed after, +// and if T left N intents, each push would wait for 50ms, incurring a latency +// of 50*N ms. A cache of recently encountered ABORTED transactions on each +// Store should mitigate this latency increase. Whenever a transaction sees a +// waitFor state, it will consult this cache and if T is found, push +// immediately (if there isn't already a push in-flight) -- even if T is not +// initially in the cache, the first push will place it in the cache, so the +// maximum latency increase is 50ms. // -// - The waitElsewhere state is a rare state that is used when the lockTable is -// under memory pressure and is clearing its internal queue state. Like the -// waitFor* states, it informs the request who it is waiting for so that -// deadlock detection works. However, sequencing information inside the -// lockTable is mostly discarded. +// - The waitElsewhere state is a rare state that is used when the lockTable is +// under memory pressure and is clearing its internal queue state. Like the +// waitFor* states, it informs the request who it is waiting for so that +// deadlock detection works. However, sequencing information inside the +// lockTable is mostly discarded. // -// - The waitSelf state is a rare state when a different request from the same -// transaction has a reservation. See the comment about "Reservations" in -// lockState. +// - The waitSelf state is a rare state when a different request from the same +// transaction has a reservation. See the comment about "Reservations" in +// lockState. // -// - The waitQueueMaxLengthExceeded state is used to indicate that the request -// was rejected because it attempted to enter a lock wait-queue as a writer -// and found that the queue's length was already equal to or exceeding the -// request's configured maximum. +// - The waitQueueMaxLengthExceeded state is used to indicate that the request +// was rejected because it attempted to enter a lock wait-queue as a writer +// and found that the queue's length was already equal to or exceeding the +// request's configured maximum. // -// - The doneWaiting state is used to indicate that the request should make -// another call to ScanAndEnqueue() (that next call is more likely to return a -// lockTableGuard that returns false from StartWaiting()). +// - The doneWaiting state is used to indicate that the request should make +// another call to ScanAndEnqueue() (that next call is more likely to return a +// lockTableGuard that returns false from StartWaiting()). type lockTableGuardImpl struct { seqNum uint64 lt *lockTableImpl @@ -736,12 +737,13 @@ func (g *lockTableGuardImpl) findNextLockAfter(notify bool) { // writer is typically waiting in an active state, i.e., the // lockTableGuardImpl.key refers to this lockState. However, there are // multiple reasons that can cause a writer to be an inactive waiter: -// - Breaking of reservations (see the comment on reservations below, in -// lockState) can cause a writer to be an inactive waiter. -// - A discovered lock causes the discoverer to become an inactive waiter -// (until it scans again). -// - A lock held by a finalized txn causes the first waiter to be an inactive -// waiter. +// - Breaking of reservations (see the comment on reservations below, in +// lockState) can cause a writer to be an inactive waiter. +// - A discovered lock causes the discoverer to become an inactive waiter +// (until it scans again). +// - A lock held by a finalized txn causes the first waiter to be an inactive +// waiter. +// // The first case above (breaking reservations) only occurs for transactional // requests, but the other cases can happen for both transactional and // non-transactional requests. @@ -1488,50 +1490,51 @@ func (l *lockState) clearLockHolder() { // It uses the finalizedTxnCache to decide that the caller does not need to // wait on a lock of a transaction that is already finalized. // -// - For unreplicated locks, this method will silently remove the lock and -// proceed as normal. -// - For replicated locks the behavior is more complicated since we need to -// resolve the intent. We desire: -// A. batching of intent resolution. -// B. minimize races where intent resolution is being performed by multiple -// requests. -// C. minimize races where the intent has not yet been resolved but has been -// removed from the lock table, thereby causing some other request to -// evaluate wastefully and discover the intent. +// - For unreplicated locks, this method will silently remove the lock and +// proceed as normal. // -// For A, the caller of tryActiveWait will accumulate the LockUpdates. For B, -// we only generate a LockUpdate here if this request is either a reader, or -// the first writer in the queue, i.e., it is only blocked by the lock -// holder. This prevents races between multiple writers in doing resolution -// but not between multiple readers and between readers and writers. We could -// be more conservative in only doing the intent resolution if the waiter was -// equivalent to a distinguished-waiter, but there it no guarantee that that -// distinguished waiter will do intent resolution in a timely manner (since -// it could block waiting on some other lock). Instead, the caller of -// tryActiveWait makes a best-effort to reduce racing (explained below). For -// C, the caller of tryActiveWait removes the lock from the in-memory -// data-structure only if the request does not need to wait anywhere, which -// means it will immediately proceed to intent resolution. Additionally, if -// the lock has already been removed, it suggests that some other request has -// already claimed intent resolution (or done it), so this request does not -// need to do the resolution. +// - For replicated locks the behavior is more complicated since we need to +// resolve the intent. We desire: +// A. batching of intent resolution. +// B. minimize races where intent resolution is being performed by multiple +// requests. +// C. minimize races where the intent has not yet been resolved but has been +// removed from the lock table, thereby causing some other request to +// evaluate wastefully and discover the intent. // -// Ideally, we would strengthen B and C -- a request should make a claim on -// intent resolution for a set of keys, and will either resolve the intent, -// or due to an error will return that claim so others can do so. A -// replicated lock (intent) would not be removed from the in-memory -// data-structure until it was actually gone. -// TODO(sumeer): do this cleaner solution for batched intent resolution. +// For A, the caller of tryActiveWait will accumulate the LockUpdates. For B, +// we only generate a LockUpdate here if this request is either a reader, or +// the first writer in the queue, i.e., it is only blocked by the lock +// holder. This prevents races between multiple writers in doing resolution +// but not between multiple readers and between readers and writers. We could +// be more conservative in only doing the intent resolution if the waiter was +// equivalent to a distinguished-waiter, but there it no guarantee that that +// distinguished waiter will do intent resolution in a timely manner (since +// it could block waiting on some other lock). Instead, the caller of +// tryActiveWait makes a best-effort to reduce racing (explained below). For +// C, the caller of tryActiveWait removes the lock from the in-memory +// data-structure only if the request does not need to wait anywhere, which +// means it will immediately proceed to intent resolution. Additionally, if +// the lock has already been removed, it suggests that some other request has +// already claimed intent resolution (or done it), so this request does not +// need to do the resolution. // -// In the future we'd like to augment the lockTable with an understanding of -// finalized but not yet resolved locks. These locks will allow conflicting -// transactions to proceed with evaluation without the need to first remove -// all traces of them via a round of replication. This is discussed in more -// detail in #41720. Specifically, see mention of "contention footprint" and -// COMMITTED_BUT_NOT_REMOVABLE. -// Also, resolving these locks/intents would proceed without latching, so we -// would not rely on MVCC scanning to add discovered locks to the lock table, -// since the discovered locks may be stale. +// Ideally, we would strengthen B and C -- a request should make a claim on +// intent resolution for a set of keys, and will either resolve the intent, +// or due to an error will return that claim so others can do so. A +// replicated lock (intent) would not be removed from the in-memory +// data-structure until it was actually gone. +// TODO(sumeer): do this cleaner solution for batched intent resolution. +// +// In the future we'd like to augment the lockTable with an understanding of +// finalized but not yet resolved locks. These locks will allow conflicting +// transactions to proceed with evaluation without the need to first remove +// all traces of them via a round of replication. This is discussed in more +// detail in #41720. Specifically, see mention of "contention footprint" and +// COMMITTED_BUT_NOT_REMOVABLE. +// Also, resolving these locks/intents would proceed without latching, so we +// would not rely on MVCC scanning to add discovered locks to the lock table, +// since the discovered locks may be stale. // // The return value is true iff it is actively waiting. // Acquires l.mu, g.mu. @@ -2716,9 +2719,10 @@ func (t *lockTableImpl) lockCountForTesting() int64 { } // tryClearLocks attempts to clear locks. -// - force=false: removes locks until it has removed numToClear locks. It does -// not remove locks marked as notRemovable. -// - force=true: removes all locks. +// - force=false: removes locks until it has removed numToClear locks. It does +// not remove locks marked as notRemovable. +// - force=true: removes all locks. +// // Waiters of removed locks are told to wait elsewhere or that they are done // waiting. func (t *lockTableImpl) tryClearLocks(force bool, numToClear int) { diff --git a/pkg/kv/kvserver/concurrency/lockstate_interval_btree.go b/pkg/kv/kvserver/concurrency/lockstate_interval_btree.go index f139eee40049..df8e40809045 100644 --- a/pkg/kv/kvserver/concurrency/lockstate_interval_btree.go +++ b/pkg/kv/kvserver/concurrency/lockstate_interval_btree.go @@ -32,17 +32,20 @@ const ( // cmp returns a value indicating the sort order relationship between // a and b. The comparison is performed lexicographically on -// (a.Key(), a.EndKey(), a.ID()) +// +// (a.Key(), a.EndKey(), a.ID()) +// // and -// (b.Key(), b.EndKey(), b.ID()) +// +// (b.Key(), b.EndKey(), b.ID()) +// // tuples. // // Given c = cmp(a, b): // -// c == -1 if (a.Key(), a.EndKey(), a.ID()) < (b.Key(), b.EndKey(), b.ID()) -// c == 0 if (a.Key(), a.EndKey(), a.ID()) == (b.Key(), b.EndKey(), b.ID()) -// c == 1 if (a.Key(), a.EndKey(), a.ID()) > (b.Key(), b.EndKey(), b.ID()) -// +// c == -1 if (a.Key(), a.EndKey(), a.ID()) < (b.Key(), b.EndKey(), b.ID()) +// c == 0 if (a.Key(), a.EndKey(), a.ID()) == (b.Key(), b.EndKey(), b.ID()) +// c == 1 if (a.Key(), a.EndKey(), a.ID()) > (b.Key(), b.EndKey(), b.ID()) func cmp(a, b *lockState) int { c := bytes.Compare(a.Key(), b.Key()) if c != 0 { @@ -325,21 +328,21 @@ func (n *node) find(item *lockState) (index int, found bool) { // // Before: // -// +-----------+ -// | x y z | -// +--/-/-\-\--+ +// +-----------+ +// | x y z | +// +--/-/-\-\--+ // // After: // -// +-----------+ -// | y | -// +----/-\----+ -// / \ -// v v +// +-----------+ +// | y | +// +----/-\----+ +// / \ +// v v +// // +-----------+ +-----------+ // | x | | z | // +-----------+ +-----------+ -// func (n *node) split(i int) (*lockState, *node) { out := n.items[i] var next *node @@ -1004,9 +1007,9 @@ func (i *iterator) Cur() *lockState { // is to minimize the number of key comparisons performed in total. The // algorithm operates based on the following two invariants maintained by // augmented interval btree: -// 1. all items are sorted in the btree based on their start key. -// 2. all btree nodes maintain the upper bound end key of all items -// in their subtree. +// 1. all items are sorted in the btree based on their start key. +// 2. all btree nodes maintain the upper bound end key of all items +// in their subtree. // // The scan algorithm starts in "unconstrained minimum" and "unconstrained // maximum" states. To enter a "constrained minimum" state, the scan must reach @@ -1021,28 +1024,28 @@ func (i *iterator) Cur() *lockState { // // The scan algorithm works like a standard btree forward scan with the // following augmentations: -// 1. before tranversing the tree, the scan performs a binary search on the -// root node's items to determine a "soft" lower-bound constraint position -// and a "hard" upper-bound constraint position in the root's children. -// 2. when tranversing into a child node in the lower or upper bound constraint -// position, the constraint is refined by searching the child's items. -// 3. the initial traversal down the tree follows the left-most children -// whose upper bound end keys are equal to or greater than the start key -// of the search range. The children followed will be equal to or less -// than the soft lower bound constraint. -// 4. once the initial tranversal completes and the scan is in the left-most -// btree node whose upper bound overlaps the search range, key comparisons -// must be performed with each item in the tree. This is necessary because -// any of these items may have end keys that cause them to overlap with the -// search range. -// 5. once the scan reaches the lower bound constraint position (the first item -// with a start key equal to or greater than the search range's start key), -// it can begin scaning without performing key comparisons. This is allowed -// because all items from this point forward will have end keys that are -// greater than the search range's start key. -// 6. once the scan reaches the upper bound constraint position, it terminates. -// It does so because the item at this position is the first item with a -// start key larger than the search range's end key. +// 1. before tranversing the tree, the scan performs a binary search on the +// root node's items to determine a "soft" lower-bound constraint position +// and a "hard" upper-bound constraint position in the root's children. +// 2. when tranversing into a child node in the lower or upper bound constraint +// position, the constraint is refined by searching the child's items. +// 3. the initial traversal down the tree follows the left-most children +// whose upper bound end keys are equal to or greater than the start key +// of the search range. The children followed will be equal to or less +// than the soft lower bound constraint. +// 4. once the initial tranversal completes and the scan is in the left-most +// btree node whose upper bound overlaps the search range, key comparisons +// must be performed with each item in the tree. This is necessary because +// any of these items may have end keys that cause them to overlap with the +// search range. +// 5. once the scan reaches the lower bound constraint position (the first item +// with a start key equal to or greater than the search range's start key), +// it can begin scaning without performing key comparisons. This is allowed +// because all items from this point forward will have end keys that are +// greater than the search range's start key. +// 6. once the scan reaches the upper bound constraint position, it terminates. +// It does so because the item at this position is the first item with a +// start key larger than the search range's end key. type overlapScan struct { // The "soft" lower-bound constraint. constrMinN *node diff --git a/pkg/kv/kvserver/diskmap/disk_map.go b/pkg/kv/kvserver/diskmap/disk_map.go index 56defd4d39ac..73bf8b692164 100644 --- a/pkg/kv/kvserver/diskmap/disk_map.go +++ b/pkg/kv/kvserver/diskmap/disk_map.go @@ -26,16 +26,17 @@ type Factory interface { // SortedDiskMapIterator is a simple iterator used to iterate over keys and/or // values. // Example use of iterating over all keys: -// var i SortedDiskMapIterator -// for i.Rewind(); ; i.Next() { -// if ok, err := i.Valid(); err != nil { +// +// var i SortedDiskMapIterator +// for i.Rewind(); ; i.Next() { +// if ok, err := i.Valid(); err != nil { // // Handle error. -// } else if !ok { +// } else if !ok { // break -// } -// key := i.UnsafeKey() +// } +// key := i.UnsafeKey() // // Do something. -// } +// } type SortedDiskMapIterator interface { // SeekGE sets the iterator's position to the first key greater than or equal // to the provided key. diff --git a/pkg/kv/kvserver/gc/gc.go b/pkg/kv/kvserver/gc/gc.go index d016930fa506..1d9fef7657f6 100644 --- a/pkg/kv/kvserver/gc/gc.go +++ b/pkg/kv/kvserver/gc/gc.go @@ -697,6 +697,7 @@ func isGarbage( // transaction records, queue last processed timestamps, and range descriptors. // // - Transaction entries: +// // - For expired transactions , schedule the intents for // asynchronous resolution. The actual transaction spans are not // returned for GC in this pass, but are separately GC'ed after @@ -704,8 +705,8 @@ func isGarbage( // are no intents on the txn record, in which case it's returned for // immediate GC. // -// - Queue last processed times: cleanup any entries which don't match -// this range's start key. This can happen on range merges. +// - Queue last processed times: cleanup any entries which don't match +// this range's start key. This can happen on range merges. func processLocalKeyRange( ctx context.Context, snap storage.Reader, diff --git a/pkg/kv/kvserver/gc/gc_random_test.go b/pkg/kv/kvserver/gc/gc_random_test.go index 5a783c256d94..c2bd0fe485d0 100644 --- a/pkg/kv/kvserver/gc/gc_random_test.go +++ b/pkg/kv/kvserver/gc/gc_random_test.go @@ -334,10 +334,10 @@ type historyItem struct { // any dependency on how key splitting is done inside pebble. // Generated expected values are produces by simulating GC in a naive way where // each value is considered live if: -// - it is a value or tombstone and its timestamp is higher than gc threshold -// - it is a range tombstone and its timestamp is higher than gc threshold -// - it is a first value at or below gc threshold and there are no deletions -// between gc threshold and the value +// - it is a value or tombstone and its timestamp is higher than gc threshold +// - it is a range tombstone and its timestamp is higher than gc threshold +// - it is a first value at or below gc threshold and there are no deletions +// between gc threshold and the value func assertLiveData( t *testing.T, after, before storage.Reader, diff --git a/pkg/kv/kvserver/intentresolver/intent_resolver.go b/pkg/kv/kvserver/intentresolver/intent_resolver.go index e8d116271250..f7032e486158 100644 --- a/pkg/kv/kvserver/intentresolver/intent_resolver.go +++ b/pkg/kv/kvserver/intentresolver/intent_resolver.go @@ -325,11 +325,14 @@ func (ir *IntentResolver) PushTransaction( // // Callers are involved with // a) conflict resolution for commands being executed at the Store with the -// client waiting, +// +// client waiting, +// // b) resolving intents encountered during inconsistent operations, and // c) resolving intents upon EndTxn which are not local to the given range. -// This is the only path in which the transaction is going to be in -// non-pending state and doesn't require a push. +// +// This is the only path in which the transaction is going to be in +// non-pending state and doesn't require a push. func (ir *IntentResolver) MaybePushTransactions( ctx context.Context, pushTxns map[uuid.UUID]*enginepb.TxnMeta, diff --git a/pkg/kv/kvserver/liveness/livenesspb/liveness.go b/pkg/kv/kvserver/liveness/livenesspb/liveness.go index 69f99874ca39..9f8c61c1a0f9 100644 --- a/pkg/kv/kvserver/liveness/livenesspb/liveness.go +++ b/pkg/kv/kvserver/liveness/livenesspb/liveness.go @@ -99,9 +99,9 @@ func (c MembershipStatus) String() string { // (which also includes decommissioning a decommissioned node) the valid state // transitions for Membership are as follows: // -// Decommissioning => Active -// Active => Decommissioning -// Decommissioning => Decommissioned +// Decommissioning => Active +// Active => Decommissioning +// Decommissioning => Decommissioned // // See diagram above the Membership type for more details. func ValidateTransition(old, new Liveness) error { diff --git a/pkg/kv/kvserver/loqrecovery/plan.go b/pkg/kv/kvserver/loqrecovery/plan.go index 3a753ca7359c..fc41db04cf7e 100644 --- a/pkg/kv/kvserver/loqrecovery/plan.go +++ b/pkg/kv/kvserver/loqrecovery/plan.go @@ -173,10 +173,14 @@ func PlanReplicas( // validateReplicaSets evaluates provided set of replicas and an optional // deadStoreIDs request and produces consistency info containing: // availableStores - all storeIDs for which info was collected, i.e. -// (barring operator error) the conclusive list of all -// remaining stores in the cluster. +// +// (barring operator error) the conclusive list of all +// remaining stores in the cluster. +// // missingStores - all dead stores (stores that are referenced by replicas, -// but not present in any of descriptors) +// +// but not present in any of descriptors) +// // If inconsistency is found e.g. no info was provided for a store but it is // not present in explicit deadStoreIDs list, error is returned. func validateReplicaSets( diff --git a/pkg/kv/kvserver/merge_queue.go b/pkg/kv/kvserver/merge_queue.go index 4c79cf5005aa..387d0b8264e6 100644 --- a/pkg/kv/kvserver/merge_queue.go +++ b/pkg/kv/kvserver/merge_queue.go @@ -63,8 +63,8 @@ var MergeQueueInterval = settings.RegisterDurationSetting( // Note that the merge queue is not capable of initiating all possible merges. // Consider the example below: // -// /Table/51/1 /Table/51/2 /Table/52 -// 32MB 0MB 32MB +// /Table/51/1 /Table/51/2 /Table/52 +// 32MB 0MB 32MB // // The range beginning at /Table/51/2 is empty and would, ideally, be merged // away. The range to its left, /Table/51/1, will not propose a merge because it diff --git a/pkg/kv/kvserver/mvcc_gc_queue.go b/pkg/kv/kvserver/mvcc_gc_queue.go index 61d3b4810f78..1d347ae61420 100644 --- a/pkg/kv/kvserver/mvcc_gc_queue.go +++ b/pkg/kv/kvserver/mvcc_gc_queue.go @@ -107,12 +107,12 @@ func largeAbortSpan(ms enginepb.MVCCStats) bool { // entirety using the MVCC versions iterator. The mvcc gc queue manages // the following tasks: // -// - GC of version data via TTL expiration (and more complex schemes -// as implemented going forward). -// - Resolve extant write intents (pushing their transactions). -// - GC of old transaction and AbortSpan entries. This should include -// most committed and aborted entries almost immediately and, after a -// threshold on inactivity, all others. +// - GC of version data via TTL expiration (and more complex schemes +// as implemented going forward). +// - Resolve extant write intents (pushing their transactions). +// - GC of old transaction and AbortSpan entries. This should include +// most committed and aborted entries almost immediately and, after a +// threshold on inactivity, all others. // // The shouldQueue function combines the need for the above tasks into a // single priority. If any task is overdue, shouldQueue returns true. @@ -299,25 +299,26 @@ func makeMVCCGCQueueScore( // from the right side of the frame), at least a surface area of `X` has been // removed. // -// x=-ttl GCBytes=1+4 -// | 3 (age) -// | +-------+ -// | | keep | 1 (bytes) -// | +-------+ -// +-----------------------+ -// | | -// | remove | 3 (bytes) -// | | -// +-----------------------+ -// | 7 (age) +// x=-ttl GCBytes=1+4 +// | 3 (age) +// | +-------+ +// | | keep | 1 (bytes) +// | +-------+ +// +-----------------------+ +// | | +// | remove | 3 (bytes) +// | | +// +-----------------------+ +// | 7 (age) // -// This is true because +// # This is true because // // deletable area = total area - nondeletable area -// = X + ttl*GCBytes - nondeletable area -// >= X + ttl*GCBytes - ttl*(bytes in nondeletable area) -// = X + ttl*(GCBytes - bytes in nondeletable area) -// >= X. +// +// = X + ttl*GCBytes - nondeletable area +// >= X + ttl*GCBytes - ttl*(bytes in nondeletable area) +// = X + ttl*(GCBytes - bytes in nondeletable area) +// >= X. // // Or, in other words, you can only hope to put `ttl*GCBytes` of area in the // "safe" rectangle. Once you've done that, everything else you put is going to @@ -564,16 +565,16 @@ func (r *replicaGCer) GC( // * obtaining the transaction for a AbortSpan entry requires a Push // // The following order is taken below: -// 1) collect all intents with sufficiently old txn record -// 2) collect these intents' transactions -// 3) scan the transaction table, collecting abandoned or completed txns -// 4) push all of these transactions (possibly recreating entries) -// 5) resolve all intents (unless the txn is not yet finalized), which -// will recreate AbortSpan entries (but with the txn timestamp; i.e. -// likely GC'able) -// 6) scan the AbortSpan table for old entries -// 7) push these transactions (again, recreating txn entries). -// 8) send a GCRequest. +// 1. collect all intents with sufficiently old txn record +// 2. collect these intents' transactions +// 3. scan the transaction table, collecting abandoned or completed txns +// 4. push all of these transactions (possibly recreating entries) +// 5. resolve all intents (unless the txn is not yet finalized), which +// will recreate AbortSpan entries (but with the txn timestamp; i.e. +// likely GC'able) +// 6. scan the AbortSpan table for old entries +// 7. push these transactions (again, recreating txn entries). +// 8. send a GCRequest. func (mgcq *mvccGCQueue) process( ctx context.Context, repl *Replica, _ spanconfig.StoreReader, ) (processed bool, err error) { diff --git a/pkg/kv/kvserver/raft_log_queue.go b/pkg/kv/kvserver/raft_log_queue.go index 036e1e9d9359..c721e2b2ec37 100644 --- a/pkg/kv/kvserver/raft_log_queue.go +++ b/pkg/kv/kvserver/raft_log_queue.go @@ -99,15 +99,17 @@ import ( // cycle of everyone running with the default value of true. It only exists as // a safety switch in case the new behavior causes unanticipated issues. // Current plan: -// - v22.1: Has the setting. Expectation is that no one changes to false. -// - v22.2: The code behavior is hard-coded to true, in that the setting has -// no effect (we can also delete a bunch of legacy code). +// - v22.1: Has the setting. Expectation is that no one changes to false. +// - v22.2: The code behavior is hard-coded to true, in that the setting has +// no effect (we can also delete a bunch of legacy code). +// // Mixed version clusters: -// - v21.2 and v22.1: Will behave as strongly coupled since the cluster -// version serves as an additional gate. -// - v22.1 and v22.2: If the setting has been changed to false the v22.1 nodes -// will do strongly coupled truncation and the v22.2 will do loosely -// coupled. This co-existence is correct. +// - v21.2 and v22.1: Will behave as strongly coupled since the cluster +// version serves as an additional gate. +// - v22.1 and v22.2: If the setting has been changed to false the v22.1 nodes +// will do strongly coupled truncation and the v22.2 will do loosely +// coupled. This co-existence is correct. +// // NB: The above comment is incorrect about the default value being true. Due // to https://github.com/cockroachdb/cockroach/issues/78412 we have changed // the default to false for v22.1. @@ -218,15 +220,15 @@ func newRaftLogQueue(store *Store, db *kv.DB) *raftLogQueue { // // Unfortunately, the size tracking is not very robust as it suffers from two // limitations at the time of writing: -// 1. it may undercount as it is in-memory and incremented only as proposals -// are handled; that is, a freshly started node will believe its Raft log to be -// zero-sized independent of its actual size, and -// 2. the addition and corresponding subtraction happen in very different places -// and are difficult to keep bug-free, meaning that there is low confidence that -// we maintain the delta in a completely accurate manner over time. One example -// of potential errors are sideloaded proposals, for which the subtraction needs -// to load the size of the file on-disk (i.e. supplied by the fs), whereas -// the addition uses the in-memory representation of the file. +// 1. it may undercount as it is in-memory and incremented only as proposals +// are handled; that is, a freshly started node will believe its Raft log to be +// zero-sized independent of its actual size, and +// 2. the addition and corresponding subtraction happen in very different places +// and are difficult to keep bug-free, meaning that there is low confidence that +// we maintain the delta in a completely accurate manner over time. One example +// of potential errors are sideloaded proposals, for which the subtraction needs +// to load the size of the file on-disk (i.e. supplied by the fs), whereas +// the addition uses the in-memory representation of the file. // // Ideally, a Raft log that grows large for whichever reason (for instance the // queue being stuck on another replica) wouldn't be more than a nuisance on @@ -351,15 +353,15 @@ const ( // No assumption should be made about the relationship between // RaftStatus.Commit, FirstIndex, LastIndex. This is because: -// - In some cases they are not updated or read atomically. -// - FirstIndex is a potentially future first index, after the pending -// truncations have been applied. Currently, pending truncations are being -// proposed through raft, so one can be sure that these pending truncations -// do not refer to entries that are not already in the log. However, this -// situation may change in the future. In general, we should not make an -// assumption on what is in the local raft log based solely on FirstIndex, -// and should be based on whether [FirstIndex,LastIndex] is a non-empty -// interval. +// - In some cases they are not updated or read atomically. +// - FirstIndex is a potentially future first index, after the pending +// truncations have been applied. Currently, pending truncations are being +// proposed through raft, so one can be sure that these pending truncations +// do not refer to entries that are not already in the log. However, this +// situation may change in the future. In general, we should not make an +// assumption on what is in the local raft log based solely on FirstIndex, +// and should be based on whether [FirstIndex,LastIndex] is a non-empty +// interval. type truncateDecisionInput struct { RaftStatus raft.Status LogSize, MaxLogSize int64 diff --git a/pkg/kv/kvserver/rangefeed/catchup_scan.go b/pkg/kv/kvserver/rangefeed/catchup_scan.go index 8b604c2a0aed..242a03537172 100644 --- a/pkg/kv/kvserver/rangefeed/catchup_scan.go +++ b/pkg/kv/kvserver/rangefeed/catchup_scan.go @@ -27,10 +27,10 @@ import ( // (often) more efficient MVCCIncrementalIterator. When the caller wants to // iterate to see older versions of a key, the desire of the caller needs to // be expressed using one of two methods: -// - Next: when it wants to omit any versions that are not within the time -// bounds. -// - NextIgnoringTime: when it wants to see the next older version even if it -// is not within the time bounds. +// - Next: when it wants to omit any versions that are not within the time +// bounds. +// - NextIgnoringTime: when it wants to see the next older version even if it +// is not within the time bounds. type simpleCatchupIter interface { storage.SimpleMVCCIterator NextIgnoringTime() diff --git a/pkg/kv/kvserver/rangefeed/catchup_scan_test.go b/pkg/kv/kvserver/rangefeed/catchup_scan_test.go index 9a6b3b596877..95acf1917c7c 100644 --- a/pkg/kv/kvserver/rangefeed/catchup_scan_test.go +++ b/pkg/kv/kvserver/rangefeed/catchup_scan_test.go @@ -34,10 +34,10 @@ import ( // // To invoke and compare on the numRangeKeys dimension: // -// go test ./pkg/kv/kvserver/rangefeed/ -run - -count 10 -bench BenchmarkCatchUpScan 2>&1 | tee bench.txt -// for flavor in numRangeKeys=0 numRangeKeys=1 numRangeKeys=100; do grep -E "${flavor}[^0-9]+" bench.txt | sed -E "s/${flavor}+/X/" > $flavor.txt; done -// benchstat numRangeKeys\={0,1}.txt -// benchstat numRangeKeys\={0,100}.txt +// go test ./pkg/kv/kvserver/rangefeed/ -run - -count 10 -bench BenchmarkCatchUpScan 2>&1 | tee bench.txt +// for flavor in numRangeKeys=0 numRangeKeys=1 numRangeKeys=100; do grep -E "${flavor}[^0-9]+" bench.txt | sed -E "s/${flavor}+/X/" > $flavor.txt; done +// benchstat numRangeKeys\={0,1}.txt +// benchstat numRangeKeys\={0,100}.txt func TestCatchupScan(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) diff --git a/pkg/kv/kvserver/rangefeed/processor.go b/pkg/kv/kvserver/rangefeed/processor.go index 3bd0b58bf7c7..46aa41b2722b 100644 --- a/pkg/kv/kvserver/rangefeed/processor.go +++ b/pkg/kv/kvserver/rangefeed/processor.go @@ -107,9 +107,9 @@ func (sc *Config) SetDefaults() { // Processor manages a set of rangefeed registrations and handles the routing of // logical updates to these registrations. While routing logical updates to // rangefeed registrations, the processor performs two important tasks: -// 1. it translates logical updates into rangefeed events. -// 2. it transforms a range-level closed timestamp to a rangefeed-level resolved -// timestamp. +// 1. it translates logical updates into rangefeed events. +// 2. it transforms a range-level closed timestamp to a rangefeed-level resolved +// timestamp. type Processor struct { Config reg registry diff --git a/pkg/kv/kvserver/rangefeed/resolved_timestamp.go b/pkg/kv/kvserver/rangefeed/resolved_timestamp.go index 4b7e6dd6f743..f1f9e8d57a8f 100644 --- a/pkg/kv/kvserver/rangefeed/resolved_timestamp.go +++ b/pkg/kv/kvserver/rangefeed/resolved_timestamp.go @@ -275,10 +275,11 @@ func (rts *resolvedTimestamp) assertOpAboveRTS(op enginepb.MVCCLogicalOp, opTS h // that may at some point in the future result in a RangeFeedValue publication. // Based on this definition, there are three possible states that an extent // intent can be in while fitting the requirement to be an "unresolved intent": -// 1. part of a PENDING transaction -// 2. part of a STAGING transaction that has not been explicitly committed yet -// 3. part of a COMMITTED transaction but not yet resolved due to the asynchronous -// nature of intent resolution +// 1. part of a PENDING transaction +// 2. part of a STAGING transaction that has not been explicitly committed yet +// 3. part of a COMMITTED transaction but not yet resolved due to the asynchronous +// nature of intent resolution +// // Notably, this means that an intent that exists but that is known to be part // of an ABORTED transaction is not considered "unresolved", even if it has yet // to be cleaned up. In the context of rangefeeds, the intent's fate is resolved @@ -287,11 +288,11 @@ func (rts *resolvedTimestamp) assertOpAboveRTS(op enginepb.MVCCLogicalOp, opTS h // Defining unresolved intents in this way presents two paths for an unresolved // intent to become resolved (and thus decrement the unresolvedTxn's ref count). // An unresolved intent can become resolved if: -// 1. it is COMMITTED or ABORTED through the traditional intent resolution -// process. -// 2. it's transaction is observed to be ABORTED, meaning that it is by -// definition resolved even if it has yet to be cleaned up by the intent -// resolution process. +// 1. it is COMMITTED or ABORTED through the traditional intent resolution +// process. +// 2. it's transaction is observed to be ABORTED, meaning that it is by +// definition resolved even if it has yet to be cleaned up by the intent +// resolution process. // // An unresolvedTxn is a transaction that has one or more unresolved intents on // a given range. The structure itself maintains metadata about the transaction diff --git a/pkg/kv/kvserver/rangefeed/task.go b/pkg/kv/kvserver/rangefeed/task.go index 6a98068fe9ae..65258a63af3b 100644 --- a/pkg/kv/kvserver/rangefeed/task.go +++ b/pkg/kv/kvserver/rangefeed/task.go @@ -37,7 +37,6 @@ type runnable interface { // the Processor was started and hooked up to a stream of logical operations. // The Processor can initialize its resolvedTimestamp once the scan completes // because it knows it is now tracking all intents in its key range. -// type initResolvedTSScan struct { p *Processor is IntentScanner @@ -89,8 +88,8 @@ type IntentScanner interface { // // EngineIterator Contract: // -// - The EngineIterator must have an UpperBound set. -// - The range must be using separated intents. +// - The EngineIterator must have an UpperBound set. +// - The range must be using separated intents. type SeparatedIntentScanner struct { iter storage.EngineIterator } @@ -151,12 +150,11 @@ func (s *SeparatedIntentScanner) Close() { s.iter.Close() } // // MVCCIterator Contract: // -// The provided MVCCIterator must observe all intents in the Processor's keyspan. -// An important implication of this is that if the iterator is a -// TimeBoundIterator, its MinTimestamp cannot be above the keyspan's largest -// known resolved timestamp, if one has ever been recorded. If one has never -// been recorded, the TimeBoundIterator cannot have any lower bound. -// +// The provided MVCCIterator must observe all intents in the Processor's keyspan. +// An important implication of this is that if the iterator is a +// TimeBoundIterator, its MinTimestamp cannot be above the keyspan's largest +// known resolved timestamp, if one has ever been recorded. If one has never +// been recorded, the TimeBoundIterator cannot have any lower bound. type LegacyIntentScanner struct { iter storage.SimpleMVCCIterator } @@ -225,20 +223,20 @@ type TxnPusher interface { // txnPushAttempt pushes all old transactions that have unresolved intents on // the range which are blocking the resolved timestamp from moving forward. It // does so in two steps. -// 1. it pushes all old transactions to the current timestamp and gathers -// up the transactions' authoritative transaction records. -// 2. for each transaction that is pushed, it checks the transaction's current -// status and reacts accordingly: -// - PENDING: inform the Processor that the transaction's timestamp has -// increased so that the transaction's intents no longer need -// to block the resolved timestamp. Even though the intents -// may still be at an older timestamp, we know that they can't -// commit at that timestamp. -// - COMMITTED: launch async processes to resolve the transaction's intents -// so they will be resolved sometime soon and unblock the -// resolved timestamp. -// - ABORTED: inform the Processor to stop caring about the transaction. -// It will never commit and its intents can be safely ignored. +// 1. it pushes all old transactions to the current timestamp and gathers +// up the transactions' authoritative transaction records. +// 2. for each transaction that is pushed, it checks the transaction's current +// status and reacts accordingly: +// - PENDING: inform the Processor that the transaction's timestamp has +// increased so that the transaction's intents no longer need +// to block the resolved timestamp. Even though the intents +// may still be at an older timestamp, we know that they can't +// commit at that timestamp. +// - COMMITTED: launch async processes to resolve the transaction's intents +// so they will be resolved sometime soon and unblock the +// resolved timestamp. +// - ABORTED: inform the Processor to stop caring about the transaction. +// It will never commit and its intents can be safely ignored. type txnPushAttempt struct { p *Processor txns []enginepb.TxnMeta diff --git a/pkg/kv/kvserver/replica_application_state_machine.go b/pkg/kv/kvserver/replica_application_state_machine.go index a4ad32e91e4e..266aede2d050 100644 --- a/pkg/kv/kvserver/replica_application_state_machine.go +++ b/pkg/kv/kvserver/replica_application_state_machine.go @@ -200,7 +200,7 @@ var noopOnProbeCommandErr = roachpb.NewErrorf("no-op on ProbeRequest") // 1. verify that the command was proposed under the current lease. This is // determined using the proposal's ProposerLeaseSequence. // 1.1. lease requests instead check for specifying the current lease -// as the lease they follow. +// as the lease they follow. // 1.2. ProbeRequest instead always fail this step with noopOnProbeCommandErr. // 2. verify that the command hasn't been re-ordered with other commands that // were proposed after it and which already applied. This is determined diff --git a/pkg/kv/kvserver/replica_backpressure.go b/pkg/kv/kvserver/replica_backpressure.go index 8ac038879425..fd1aa95e63cc 100644 --- a/pkg/kv/kvserver/replica_backpressure.go +++ b/pkg/kv/kvserver/replica_backpressure.go @@ -53,7 +53,7 @@ var backpressureRangeSizeMultiplier = settings.RegisterFloatSetting( // // We additionally mitigate this situation further by doing the following: // -// 1) We store in-memory on each replica the largest zone configuration range +// 1. We store in-memory on each replica the largest zone configuration range // size (largestPreviousMaxRangeBytes) we've seen and we do not backpressure // if the current range size is less than that. That value is cleared when // a range splits or runs GC such that the range size becomes smaller than @@ -61,10 +61,9 @@ var backpressureRangeSizeMultiplier = settings.RegisterFloatSetting( // a node may restart before the splitting has concluded, leaving the // cluster in a state of backpressure. // -// 2) We assign a higher priority in the snapshot queue to ranges which are +// 2. We assign a higher priority in the snapshot queue to ranges which are // currently backpressuring than ranges which are larger but are not // applying backpressure. -// var backpressureByteTolerance = settings.RegisterByteSizeSetting( settings.TenantWritable, "kv.range.backpressure_byte_tolerance", diff --git a/pkg/kv/kvserver/replica_closedts_test.go b/pkg/kv/kvserver/replica_closedts_test.go index c6b96c3867fc..7713692f00f8 100644 --- a/pkg/kv/kvserver/replica_closedts_test.go +++ b/pkg/kv/kvserver/replica_closedts_test.go @@ -433,31 +433,31 @@ func TestBumpSideTransportClosed(t *testing.T) { // // The tricky scenario tested is the following: // -// 1. A lease held by rep1 is getting close to its expiration. -// 2. Rep1 begins the process of transferring its lease to rep2 with a start -// time of 100. -// 3. The transfer goes slowly. From the perspective of rep2, the original lease -// expires, so it begins acquiring a new lease with a start time of 200. The -// lease acquisition is slow to propose. -// 4. The lease transfer finally applies. Rep2 is the new leaseholder and bumps -// its tscache to 100. -// 5. Two writes start evaluating on rep2 under the new lease. They bump their -// write timestamp to 100,1. -// 6. Rep2's lease acquisition from step 3 is proposed. Here's where the -// regression that this test is protecting against comes in: if rep2 was to -// mechanically bump its assignedClosedTimestamp to 200, that'd be incorrect -// because there are in-flight writes at 100. If those writes get proposed -// after the lease acquisition request, the second of them to get proposed -// would violate the closed time carried by the first (see below). -// 7. The lease acquisition gets rejected below Raft because the previous lease -// it asserts doesn't correspond to the lease that it applies under. -// 8. The two writes from step 5 are proposed. The closed timestamp that they -// each carry has a lower bound of rep2.assignedClosedTimestmap. If this was -// 200, then the second one would violate the closed timestamp carried by the -// first one - the first one says that 200 is closed, but then the second -// tries to write at 100. Note that the first write is OK writing at 100 even -// though it carries a closed timestamp of 200 - the closed timestamp carried -// by a command only binds future commands. +// 1. A lease held by rep1 is getting close to its expiration. +// 2. Rep1 begins the process of transferring its lease to rep2 with a start +// time of 100. +// 3. The transfer goes slowly. From the perspective of rep2, the original lease +// expires, so it begins acquiring a new lease with a start time of 200. The +// lease acquisition is slow to propose. +// 4. The lease transfer finally applies. Rep2 is the new leaseholder and bumps +// its tscache to 100. +// 5. Two writes start evaluating on rep2 under the new lease. They bump their +// write timestamp to 100,1. +// 6. Rep2's lease acquisition from step 3 is proposed. Here's where the +// regression that this test is protecting against comes in: if rep2 was to +// mechanically bump its assignedClosedTimestamp to 200, that'd be incorrect +// because there are in-flight writes at 100. If those writes get proposed +// after the lease acquisition request, the second of them to get proposed +// would violate the closed time carried by the first (see below). +// 7. The lease acquisition gets rejected below Raft because the previous lease +// it asserts doesn't correspond to the lease that it applies under. +// 8. The two writes from step 5 are proposed. The closed timestamp that they +// each carry has a lower bound of rep2.assignedClosedTimestmap. If this was +// 200, then the second one would violate the closed timestamp carried by the +// first one - the first one says that 200 is closed, but then the second +// tries to write at 100. Note that the first write is OK writing at 100 even +// though it carries a closed timestamp of 200 - the closed timestamp carried +// by a command only binds future commands. // // The test simulates the scenario and verifies that we don't crash with a // closed timestamp violation assertion. We avoid the violation because, in step diff --git a/pkg/kv/kvserver/replica_command.go b/pkg/kv/kvserver/replica_command.go index eb7b06f3620f..a40758f343eb 100644 --- a/pkg/kv/kvserver/replica_command.go +++ b/pkg/kv/kvserver/replica_command.go @@ -890,52 +890,52 @@ func waitForReplicasInit( // // In general, ChangeReplicas will carry out the following steps. // -// 1. Run a distributed transaction that adds all new replicas as learner replicas. -// Learner replicas receive the log, but do not have voting rights. They are -// used to catch up these new replicas before turning them into voters, which -// is important for the continued availability of the range throughout the -// replication change. Learners are added (and removed) one by one due to a -// technicality (see https://github.com/cockroachdb/cockroach/pull/40268). +// 1. Run a distributed transaction that adds all new replicas as learner replicas. +// Learner replicas receive the log, but do not have voting rights. They are +// used to catch up these new replicas before turning them into voters, which +// is important for the continued availability of the range throughout the +// replication change. Learners are added (and removed) one by one due to a +// technicality (see https://github.com/cockroachdb/cockroach/pull/40268). // -// The distributed transaction updates both copies of the range descriptor -// (the one on the range and that in the meta ranges) to that effect, and -// commits with a special trigger instructing Raft (via ProposeConfChange) to -// tie a corresponding replication configuration change which goes into -// effect (on each replica) when the transaction commit is applied to the -// state. Applying the command also updates each replica's local view of -// the state to reflect the new descriptor. +// The distributed transaction updates both copies of the range descriptor +// (the one on the range and that in the meta ranges) to that effect, and +// commits with a special trigger instructing Raft (via ProposeConfChange) to +// tie a corresponding replication configuration change which goes into +// effect (on each replica) when the transaction commit is applied to the +// state. Applying the command also updates each replica's local view of +// the state to reflect the new descriptor. // -// If no replicas are being added, this first step is elided. If non-voting -// replicas (which are also learners in etcd/raft) are being added, then this -// step is all we need. The rest of the steps only apply if voter replicas -// are being added. +// If no replicas are being added, this first step is elided. If non-voting +// replicas (which are also learners in etcd/raft) are being added, then this +// step is all we need. The rest of the steps only apply if voter replicas +// are being added. // -// 2. Send Raft snapshots to all learner replicas. This would happen -// automatically by the existing recovery mechanisms (raft snapshot queue), but -// it is done explicitly as a convenient way to ensure learners are caught up -// before the next step is entered. (We ensure that work is not duplicated -// between the snapshot queue and the explicit snapshot via the -// snapshotLogTruncationConstraints map). Snapshots are subject to both -// bandwidth rate limiting and throttling. +// 2. Send Raft snapshots to all learner replicas. This would happen +// automatically by the existing recovery mechanisms (raft snapshot queue), but +// it is done explicitly as a convenient way to ensure learners are caught up +// before the next step is entered. (We ensure that work is not duplicated +// between the snapshot queue and the explicit snapshot via the +// snapshotLogTruncationConstraints map). Snapshots are subject to both +// bandwidth rate limiting and throttling. // -// If no replicas are being added, this step is similarly elided. +// If no replicas are being added, this step is similarly elided. // -// 3. Carry out a distributed transaction similar to that which added the -// learner replicas, except this time it (atomically) changes all learners to -// voters and removes any replicas for which this was requested; voters are -// demoted before actually being removed to avoid bug in etcd/raft: -// See https://github.com/cockroachdb/cockroach/pull/40268. +// 3. Carry out a distributed transaction similar to that which added the +// learner replicas, except this time it (atomically) changes all learners to +// voters and removes any replicas for which this was requested; voters are +// demoted before actually being removed to avoid bug in etcd/raft: +// See https://github.com/cockroachdb/cockroach/pull/40268. // -// If only one replica is being added, raft can chose the simple -// configuration change protocol; otherwise it has to use joint consensus. In -// this latter mechanism, a first configuration change is made which results -// in a configuration ("joint configuration") in which a quorum of both the -// old replicas and the new replica sets is required for decision making. -// Transitioning into this joint configuration, the RangeDescriptor (which is -// the source of truth of the replication configuration) is updated with -// corresponding replicas of type VOTER_INCOMING and VOTER_DEMOTING. -// Immediately after committing this change, a second transition updates the -// descriptor with and activates the final configuration. +// If only one replica is being added, raft can chose the simple +// configuration change protocol; otherwise it has to use joint consensus. In +// this latter mechanism, a first configuration change is made which results +// in a configuration ("joint configuration") in which a quorum of both the +// old replicas and the new replica sets is required for decision making. +// Transitioning into this joint configuration, the RangeDescriptor (which is +// the source of truth of the replication configuration) is updated with +// corresponding replicas of type VOTER_INCOMING and VOTER_DEMOTING. +// Immediately after committing this change, a second transition updates the +// descriptor with and activates the final configuration. // // Concretely, if the initial members of the range are s1/1, s2/2, and s3/3, and // an atomic membership change were to add s4/4 and s5/5 while removing s1/1 and @@ -1860,9 +1860,9 @@ func (r *Replica) lockLearnerSnapshot( // The atomic membership change is carried out chiefly via the construction of a // suitable ChangeReplicasTrigger, see prepareChangeReplicasTrigger for details. // -// When adding/removing only a single voter, joint consensus is not used. -// Notably, demotions must always use joint consensus, even if only a single -// voter is being demoted, due to a (liftable) limitation in etcd/raft. +// When adding/removing only a single voter, joint consensus is not used. +// Notably, demotions must always use joint consensus, even if only a single +// voter is being demoted, due to a (liftable) limitation in etcd/raft. // // [raft-bug]: https://github.com/etcd-io/etcd/issues/11284 func (r *Replica) execReplicationChangesForVoters( @@ -3084,12 +3084,12 @@ func (r *Replica) AdminRelocateRange( // the desired state. In an "atomic replication changes" world, this is // conceptually easy: change from the old set of replicas to the new one. But // there are two reasons that complicate this: -// 1. we can't remove the leaseholder, so if we ultimately want to do that -// the lease has to be moved first. If we start out with *only* the -// leaseholder, we will have to add a replica first. -// 2. this code is rewritten late in the cycle and it is both safer and -// closer to its previous incarnation to never issue atomic changes -// other than simple swaps. +// 1. we can't remove the leaseholder, so if we ultimately want to do that +// the lease has to be moved first. If we start out with *only* the +// leaseholder, we will have to add a replica first. +// 2. this code is rewritten late in the cycle and it is both safer and +// closer to its previous incarnation to never issue atomic changes +// other than simple swaps. // // The loop below repeatedly calls relocateOne, which gives us either // one or two ops that move the range towards the desired replication state. If diff --git a/pkg/kv/kvserver/replica_consistency.go b/pkg/kv/kvserver/replica_consistency.go index ee9c5ed69a76..5b9d0f3c4004 100644 --- a/pkg/kv/kvserver/replica_consistency.go +++ b/pkg/kv/kvserver/replica_consistency.go @@ -55,11 +55,11 @@ const replicaChecksumGCInterval = time.Hour // fatalOnStatsMismatch, if true, turns stats mismatches into fatal errors. A // stats mismatch is the event in which -// - the consistency checker finds that all replicas are consistent -// (i.e. byte-by-byte identical) -// - the (identical) stats tracked in them do not correspond to a recomputation -// via the data, i.e. the stats were incorrect -// - ContainsEstimates==false, i.e. the stats claimed they were correct. +// - the consistency checker finds that all replicas are consistent +// (i.e. byte-by-byte identical) +// - the (identical) stats tracked in them do not correspond to a recomputation +// via the data, i.e. the stats were incorrect +// - ContainsEstimates==false, i.e. the stats claimed they were correct. // // Before issuing the fatal error, the cluster bootstrap version is verified. // We know that old versions of CockroachDB sometimes violated this invariant, diff --git a/pkg/kv/kvserver/replica_init.go b/pkg/kv/kvserver/replica_init.go index 18555e0761b1..e6b8c4e48591 100644 --- a/pkg/kv/kvserver/replica_init.go +++ b/pkg/kv/kvserver/replica_init.go @@ -163,10 +163,9 @@ func (r *Replica) setStartKeyLocked(startKey roachpb.RKey) { // // This method is called in three places: // -// 1) newReplica - used when the store is initializing and during testing -// 2) tryGetOrCreateReplica - see newUnloadedReplica -// 3) splitPostApply - this call initializes a previously uninitialized Replica. -// +// 1. newReplica - used when the store is initializing and during testing +// 2. tryGetOrCreateReplica - see newUnloadedReplica +// 3. splitPostApply - this call initializes a previously uninitialized Replica. func (r *Replica) loadRaftMuLockedReplicaMuLocked(desc *roachpb.RangeDescriptor) error { ctx := r.AnnotateCtx(context.TODO()) if r.mu.state.Desc != nil && r.IsInitialized() { diff --git a/pkg/kv/kvserver/replica_placeholder.go b/pkg/kv/kvserver/replica_placeholder.go index 9602082cd0c1..03fc8abbda22 100644 --- a/pkg/kv/kvserver/replica_placeholder.go +++ b/pkg/kv/kvserver/replica_placeholder.go @@ -54,18 +54,18 @@ import ( // // The rules for placeholders are as follows: // -// - placeholders are only installed for uninitialized replicas (under raftMu). -// In particular, a snapshot that gets sent to an initialized replica installs -// no placeholder (the initialized replica plays the role of the placeholder). -// - they do not overlap any initialized replica's key bounds. (This invariant -// is maintained via Store.mu.replicasByKey). -// - a placeholder can only be removed by the operation that installed it, and -// that operation *must* eventually remove it. In practice, they are inserted -// before receiving the snapshot data, so they are fairly long-lived. They -// are removed when the receipt of the snapshot fails, the snapshot is discarded, -// or the snapshot was fully applied (in which case the placeholder is exchanged -// for a RangeDescriptor). -// - placeholders must not be copied (i.e. always pass by reference). +// - placeholders are only installed for uninitialized replicas (under raftMu). +// In particular, a snapshot that gets sent to an initialized replica installs +// no placeholder (the initialized replica plays the role of the placeholder). +// - they do not overlap any initialized replica's key bounds. (This invariant +// is maintained via Store.mu.replicasByKey). +// - a placeholder can only be removed by the operation that installed it, and +// that operation *must* eventually remove it. In practice, they are inserted +// before receiving the snapshot data, so they are fairly long-lived. They +// are removed when the receipt of the snapshot fails, the snapshot is discarded, +// or the snapshot was fully applied (in which case the placeholder is exchanged +// for a RangeDescriptor). +// - placeholders must not be copied (i.e. always pass by reference). // // In particular, when removing a placeholder we don't have to worry about // whether we're removing our own or someone else's. This is because they diff --git a/pkg/kv/kvserver/replica_proposal_buf.go b/pkg/kv/kvserver/replica_proposal_buf.go index 2bd900f31bd0..c33894263c27 100644 --- a/pkg/kv/kvserver/replica_proposal_buf.go +++ b/pkg/kv/kvserver/replica_proposal_buf.go @@ -553,12 +553,12 @@ func (b *propBuf) FlushLockedWithRaftGroup( // changing before the proposal is passed to etcd/raft. // // Currently, the request types which may be rejected by this function are: -// - RequestLease when the proposer is not the raft leader (with caveats). -// - TransferLease when the proposer cannot guarantee that the lease transfer -// target does not currently need a Raft snapshot to catch up on its Raft log. -// In such cases, the proposer cannot guarantee that the lease transfer target -// will not need a Raft snapshot to catch up to and apply the lease transfer. -// This requires that the proposer is the raft leader. +// - RequestLease when the proposer is not the raft leader (with caveats). +// - TransferLease when the proposer cannot guarantee that the lease transfer +// target does not currently need a Raft snapshot to catch up on its Raft log. +// In such cases, the proposer cannot guarantee that the lease transfer target +// will not need a Raft snapshot to catch up to and apply the lease transfer. +// This requires that the proposer is the raft leader. // // The function returns true if the proposal was rejected, and false if not. // If the proposal was rejected and true is returned, it will have been cleaned @@ -1047,13 +1047,13 @@ func (b *propBuf) TrackEvaluatingRequest( // ensuring that no future writes ever write below it. // // Returns false in the following cases: -// 1) target is below the propBuf's closed timestamp. This ensures that the -// side-transport (the caller) is prevented from publishing closed timestamp -// regressions. In other words, for a given LAI, the side-transport only -// publishes closed timestamps higher than what Raft published. -// 2) There are requests evaluating at timestamps equal to or below target (as -// tracked by the evalTracker). We can't close timestamps at or above these -// requests' write timestamps. +// 1. target is below the propBuf's closed timestamp. This ensures that the +// side-transport (the caller) is prevented from publishing closed timestamp +// regressions. In other words, for a given LAI, the side-transport only +// publishes closed timestamps higher than what Raft published. +// 2. There are requests evaluating at timestamps equal to or below target (as +// tracked by the evalTracker). We can't close timestamps at or above these +// requests' write timestamps. func (b *propBuf) MaybeForwardClosedLocked(ctx context.Context, target hlc.Timestamp) bool { if lb := b.evalTracker.LowerBound(ctx); !lb.IsEmpty() && lb.LessEq(target) { return false diff --git a/pkg/kv/kvserver/replica_raft.go b/pkg/kv/kvserver/replica_raft.go index b3e0b5dd32dd..344f4beccc5e 100644 --- a/pkg/kv/kvserver/replica_raft.go +++ b/pkg/kv/kvserver/replica_raft.go @@ -91,14 +91,14 @@ func makeIDKey() kvserverbase.CmdIDKey { // would violate the locking order specified for Store.mu. // // Return values: -// - a channel which receives a response or error upon application -// - a closure used to attempt to abandon the command. When called, it unbinds -// the command's context from its Raft proposal. The client is then free to -// terminate execution, although it is given no guarantee that the proposal -// won't still go on to commit and apply at some later time. -// - the proposal's ID. -// - any error obtained during the creation or proposal of the command, in -// which case the other returned values are zero. +// - a channel which receives a response or error upon application +// - a closure used to attempt to abandon the command. When called, it unbinds +// the command's context from its Raft proposal. The client is then free to +// terminate execution, although it is given no guarantee that the proposal +// won't still go on to commit and apply at some later time. +// - the proposal's ID. +// - any error obtained during the creation or proposal of the command, in +// which case the other returned values are zero. func (r *Replica) evalAndPropose( ctx context.Context, ba *roachpb.BatchRequest, diff --git a/pkg/kv/kvserver/replica_range_lease.go b/pkg/kv/kvserver/replica_range_lease.go index f9e325bff320..fb318fdffe46 100644 --- a/pkg/kv/kvserver/replica_range_lease.go +++ b/pkg/kv/kvserver/replica_range_lease.go @@ -589,7 +589,7 @@ func (p *pendingLeaseRequest) newResolvedHandle(pErr *roachpb.Error) *leaseReque // to serve a request at a specific timestamp (which may be a future timestamp) // under the lease, as well as a notion of the current hlc time (now). // -// Explanation +// # Explanation // // A status of ERROR indicates a failure to determine the correct lease status, // and should not occur under normal operations. The caller's only recourse is @@ -621,7 +621,7 @@ func (p *pendingLeaseRequest) newResolvedHandle(pErr *roachpb.Error) *leaseReque // Finally, for requests timestamps falling before the stasis period of a lease // that is not EXPIRED and also not PROSCRIBED, the status is VALID. // -// Implementation Note +// # Implementation Note // // On the surface, it might seem like we could easily abandon the lease stasis // concept in favor of consulting a request's uncertainty interval. We would @@ -637,15 +637,14 @@ func (p *pendingLeaseRequest) newResolvedHandle(pErr *roachpb.Error) *leaseReque // occur for two non-transactional requests operating on a single register // during a lease change: // -// * a range lease gets committed on the new lease holder (but not the old). -// * client proposes and commits a write on new lease holder (with a timestamp -// just greater than the expiration of the old lease). -// * client tries to read what it wrote, but hits a slow coordinator (which -// assigns a timestamp covered by the old lease). -// * the read is served by the old lease holder (which has not processed the -// change in lease holdership). -// * the client fails to read their own write. -// +// - a range lease gets committed on the new lease holder (but not the old). +// - client proposes and commits a write on new lease holder (with a timestamp +// just greater than the expiration of the old lease). +// - client tries to read what it wrote, but hits a slow coordinator (which +// assigns a timestamp covered by the old lease). +// - the read is served by the old lease holder (which has not processed the +// change in lease holdership). +// - the client fails to read their own write. func (r *Replica) leaseStatus( ctx context.Context, lease roachpb.Lease, @@ -1115,16 +1114,23 @@ func (r *Replica) checkRequestTimeRLocked(now hlc.ClockTimestamp, reqTS hlc.Time // The method can has four possible outcomes: // // (1) the request timestamp is too far in the future. In this case, -// a nonstructured error is returned. This shouldn't happen. +// +// a nonstructured error is returned. This shouldn't happen. +// // (2) the lease is invalid or otherwise unable to serve a request at -// the specified timestamp. In this case, an InvalidLeaseError is -// returned, which is caught in executeBatchWithConcurrencyRetries -// and used to trigger a lease acquisition/extension. +// +// the specified timestamp. In this case, an InvalidLeaseError is +// returned, which is caught in executeBatchWithConcurrencyRetries +// and used to trigger a lease acquisition/extension. +// // (3) the lease is valid but held by a different replica. In this case, -// a NotLeaseHolderError is returned, which is propagated back up to -// the DistSender and triggers a redirection of the request. +// +// a NotLeaseHolderError is returned, which is propagated back up to +// the DistSender and triggers a redirection of the request. +// // (4) the lease is valid, held locally, and capable of serving the -// given request. In this case, no error is returned. +// +// given request. In this case, no error is returned. // // In addition to the lease status, the method also returns whether the // lease should be considered for extension using maybeExtendLeaseAsync @@ -1222,10 +1228,11 @@ func (r *Replica) leaseGoodToGo( // served. // // TODO(spencer): for write commands, don't wait while requesting -// the range lease. If the lease acquisition fails, the write cmd -// will fail as well. If it succeeds, as is likely, then the write -// will not incur latency waiting for the command to complete. -// Reads, however, must wait. +// +// the range lease. If the lease acquisition fails, the write cmd +// will fail as well. If it succeeds, as is likely, then the write +// will not incur latency waiting for the command to complete. +// Reads, however, must wait. func (r *Replica) redirectOnOrAcquireLease( ctx context.Context, ) (kvserverpb.LeaseStatus, *roachpb.Error) { diff --git a/pkg/kv/kvserver/replica_send.go b/pkg/kv/kvserver/replica_send.go index 4fb51a8ccb66..bc886af8dee4 100644 --- a/pkg/kv/kvserver/replica_send.go +++ b/pkg/kv/kvserver/replica_send.go @@ -51,55 +51,63 @@ var optimisticEvalLimitedScans = settings.RegisterBoolSetting( // is presented below, with a focus on where requests may spend // most of their time (once they arrive at the Node.Batch endpoint). // -// DistSender (tenant) -// │ -// ┆ (RPC) -// │ -// ▼ -// Node.Batch (host cluster) -// │ -// ▼ -// Admission control -// │ -// ▼ -// Replica.Send -// │ -// Circuit breaker -// │ -// ▼ -// Replica.maybeBackpressureBatch (if Range too large) -// │ -// ▼ -// Replica.maybeRateLimitBatch (tenant rate limits) -// │ -// ▼ -// Replica.maybeCommitWaitBeforeCommitTrigger (if committing with commit-trigger) -// │ +// DistSender (tenant) +// │ +// ┆ (RPC) +// │ +// ▼ +// Node.Batch (host cluster) +// │ +// ▼ +// Admission control +// │ +// ▼ +// Replica.Send +// │ +// Circuit breaker +// │ +// ▼ +// Replica.maybeBackpressureBatch (if Range too large) +// │ +// ▼ +// Replica.maybeRateLimitBatch (tenant rate limits) +// │ +// ▼ +// Replica.maybeCommitWaitBeforeCommitTrigger (if committing with commit-trigger) +// │ +// // read-write ◄─────────────────────────┴────────────────────────► read-only -// │ │ -// │ │ -// ├─────────────► executeBatchWithConcurrencyRetries ◄────────────┤ -// │ (handles leases and txn conflicts) │ -// │ │ -// ▼ │ +// +// │ │ +// │ │ +// ├─────────────► executeBatchWithConcurrencyRetries ◄────────────┤ +// │ (handles leases and txn conflicts) │ +// │ │ +// ▼ │ +// // executeWriteBatch │ -// │ │ -// ▼ ▼ +// +// │ │ +// ▼ ▼ +// // evalAndPropose (turns the BatchRequest executeReadOnlyBatch -// │ into pebble WriteBatch) -// │ -// ├──────────────────► (writes that can use async consensus do not -// │ wait for replication and are done here) -// │ -// ├──────────────────► maybeAcquireProposalQuota -// │ (applies backpressure in case of -// │ lagging Raft followers) -// │ -// │ -// ▼ +// +// │ into pebble WriteBatch) +// │ +// ├──────────────────► (writes that can use async consensus do not +// │ wait for replication and are done here) +// │ +// ├──────────────────► maybeAcquireProposalQuota +// │ (applies backpressure in case of +// │ lagging Raft followers) +// │ +// │ +// ▼ +// // handleRaftReady (drives the Raft loop, first appending to the log -// to commit the command, then signaling proposer and -// applying the command) +// +// to commit the command, then signaling proposer and +// applying the command) func (r *Replica) Send( ctx context.Context, ba roachpb.BatchRequest, ) (*roachpb.BatchResponse, *roachpb.Error) { @@ -347,16 +355,16 @@ func (r *Replica) maybeAddRangeInfoToResponse( // caller. However, it does not need to. Instead, it can assume responsibility // for releasing the concurrency guard it was provided by returning nil. This is // useful is cases where the function: -// 1. eagerly released the concurrency guard after it determined that isolation -// from conflicting requests was no longer needed. -// 2. is continuing to execute asynchronously and needs to maintain isolation -// from conflicting requests throughout the lifetime of its asynchronous -// processing. The most prominent example of asynchronous processing is -// with requests that have the "async consensus" flag set. A more subtle -// case is with requests that are acknowledged by the Raft machinery after -// their Raft entry has been committed but before it has been applied to -// the replicated state machine. In all of these cases, responsibility -// for releasing the concurrency guard is handed to Raft. +// 1. eagerly released the concurrency guard after it determined that isolation +// from conflicting requests was no longer needed. +// 2. is continuing to execute asynchronously and needs to maintain isolation +// from conflicting requests throughout the lifetime of its asynchronous +// processing. The most prominent example of asynchronous processing is +// with requests that have the "async consensus" flag set. A more subtle +// case is with requests that are acknowledged by the Raft machinery after +// their Raft entry has been committed but before it has been applied to +// the replicated state machine. In all of these cases, responsibility +// for releasing the concurrency guard is handed to Raft. // // However, this option is not permitted if the function returns a "server-side // concurrency retry error" (see isConcurrencyRetryError for more details). If diff --git a/pkg/kv/kvserver/replica_test.go b/pkg/kv/kvserver/replica_test.go index c7fd60baec2b..368603a7dc49 100644 --- a/pkg/kv/kvserver/replica_test.go +++ b/pkg/kv/kvserver/replica_test.go @@ -839,15 +839,15 @@ func TestLeaseReplicaNotInDesc(t *testing.T) { } // TestReplicaRangeMismatchRedirect tests two behaviors that should occur. -// - Following a Range split, the client may send BatchRequests based on stale -// cache data targeting the wrong range. Internally this triggers a -// RangeKeyMismatchError, but in the cases where the RHS of the range is still -// present on the local store, we opportunistically retry server-side by -// re-routing the request to the right range. No error is bubbled up to the -// client. -// - This test also ensures that after a successful server-side retry attempt we -// bubble up the most up-to-date RangeInfos for the client to update its range -// cache. +// - Following a Range split, the client may send BatchRequests based on stale +// cache data targeting the wrong range. Internally this triggers a +// RangeKeyMismatchError, but in the cases where the RHS of the range is still +// present on the local store, we opportunistically retry server-side by +// re-routing the request to the right range. No error is bubbled up to the +// client. +// - This test also ensures that after a successful server-side retry attempt we +// bubble up the most up-to-date RangeInfos for the client to update its range +// cache. func TestReplicaRangeMismatchRedirect(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) @@ -8699,13 +8699,12 @@ func TestRefreshFromBelowGCThreshold(t *testing.T) { // The test contains a subtest for each of the combinations of the following // boolean options: // -// - followerRead: configures whether the read should be served from the +// - followerRead: configures whether the read should be served from the // leaseholder replica or from a follower replica. // -// - thresholdFirst: configures whether the GC operation should be split into +// - thresholdFirst: configures whether the GC operation should be split into // two requests, with the first bumping the GC threshold and the second // GCing the expired version. This is how the real MVCC GC queue works. -// func TestGCThresholdRacesWithRead(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) @@ -13315,14 +13314,14 @@ func TestSplitSnapshotWarningStr(t *testing.T) { // // The test does the following things: // -// * Propose cmd at an initial MaxLeaseIndex. -// * Refresh that cmd immediately. -// * Fail the initial command with an injected error which will lead to a -// reproposal at a higher MaxLeaseIndex. -// * Simultaneously update the lease sequence number on the replica so all -// future commands will fail with NotLeaseHolderError. -// * Enable unconditional refreshes of commands after a raft ready so that -// higher MaxLeaseIndex commands are refreshed. +// - Propose cmd at an initial MaxLeaseIndex. +// - Refresh that cmd immediately. +// - Fail the initial command with an injected error which will lead to a +// reproposal at a higher MaxLeaseIndex. +// - Simultaneously update the lease sequence number on the replica so all +// future commands will fail with NotLeaseHolderError. +// - Enable unconditional refreshes of commands after a raft ready so that +// higher MaxLeaseIndex commands are refreshed. // // This order of events ensures that there will be a committed command which // experiences the lease mismatch error but does not carry the highest diff --git a/pkg/kv/kvserver/replica_tscache.go b/pkg/kv/kvserver/replica_tscache.go index 1f18b87a2996..be768c9f8e95 100644 --- a/pkg/kv/kvserver/replica_tscache.go +++ b/pkg/kv/kvserver/replica_tscache.go @@ -408,48 +408,47 @@ func (r *Replica) applyTimestampCache( // // This is detailed in the transaction record state machine below: // -// +----------------------------------------------------+ -// | vars | -// |----------------------------------------------------| -// | v1 = tsCache[push_marker(txn.id)] = timestamp | -// | v2 = tsCache[tombstone_marker(txn.id)] = timestamp | -// +----------------------------------------------------+ -// | operations | -// |----------------------------------------------------| -// | v -> t = forward v by timestamp t | -// +----------------------------------------------------+ -// -// HeartbeatTxn -// PushTxn(TIMESTAMP) then: update record -// then: v1 -> push.ts v2 -> txn.ts -// +------+ HeartbeatTxn +------+ -// PushTxn(ABORT) | | if: v2 < txn.orig | | PushTxn(TIMESTAMP) -// then: v2 -> txn.ts | v then: txn.ts -> v1 | v then: update record -// +-----------------+ v2 -> txn.ts +--------------------+ -// +----| | else: fail | |----+ -// | | |------------------------->| | | -// | | no txn record | | txn record written | | -// +--->| | EndTxn(STAGING) | [pending] |<---+ -// | |__ if: v2 < txn.orig | | -// +-----------------+ \__ then: txn.ts -> v1 +--------------------+ -// | ^ \__ else: fail _/ | ^ -// | | \__ _/ | | -// EndTxn(!STAGING) | | \__ _/ | EndTxn(STAGING) -// if: v2 < txn.orig | Eager GC | \____ _/______ | | -// then: v2 -> txn.ts | or | _/ \ | | HeartbeatTxn -// else: fail | GC queue | /----------------/ | | | if: epoch update -// v | v EndTxn(!STAGING) v v | -// +--------------------+ or PushTxn(ABORT) +--------------------+ -// | | then: v2 -> txn.ts | | -// +--->| |<-----------------------| |----+ -// | | txn record written | | txn record written | | -// | | [finalized] | | [staging] | | -// +----| | | |<---+ -// PushTxn(*) +--------------------+ +--------------------+ -// then: no-op ^ PushTxn(*) + RecoverTxn | EndTxn(STAGING) -// | then: v2 -> txn.ts | or HeartbeatTxn -// +------------------------------+ then: update record +// +----------------------------------------------------+ +// | vars | +// |----------------------------------------------------| +// | v1 = tsCache[push_marker(txn.id)] = timestamp | +// | v2 = tsCache[tombstone_marker(txn.id)] = timestamp | +// +----------------------------------------------------+ +// | operations | +// |----------------------------------------------------| +// | v -> t = forward v by timestamp t | +// +----------------------------------------------------+ // +// HeartbeatTxn +// PushTxn(TIMESTAMP) then: update record +// then: v1 -> push.ts v2 -> txn.ts +// +------+ HeartbeatTxn +------+ +// PushTxn(ABORT) | | if: v2 < txn.orig | | PushTxn(TIMESTAMP) +// then: v2 -> txn.ts | v then: txn.ts -> v1 | v then: update record +// +-----------------+ v2 -> txn.ts +--------------------+ +// +----| | else: fail | |----+ +// | | |------------------------->| | | +// | | no txn record | | txn record written | | +// +--->| | EndTxn(STAGING) | [pending] |<---+ +// | |__ if: v2 < txn.orig | | +// +-----------------+ \__ then: txn.ts -> v1 +--------------------+ +// | ^ \__ else: fail _/ | ^ +// | | \__ _/ | | +// EndTxn(!STAGING) | | \__ _/ | EndTxn(STAGING) +// if: v2 < txn.orig | Eager GC | \____ _/______ | | +// then: v2 -> txn.ts | or | _/ \ | | HeartbeatTxn +// else: fail | GC queue | /----------------/ | | | if: epoch update +// v | v EndTxn(!STAGING) v v | +// +--------------------+ or PushTxn(ABORT) +--------------------+ +// | | then: v2 -> txn.ts | | +// +--->| |<-----------------------| |----+ +// | | txn record written | | txn record written | | +// | | [finalized] | | [staging] | | +// +----| | | |<---+ +// PushTxn(*) +--------------------+ +--------------------+ +// then: no-op ^ PushTxn(*) + RecoverTxn | EndTxn(STAGING) +// | then: v2 -> txn.ts | or HeartbeatTxn +// +------------------------------+ then: update record // // In the diagram, CanCreateTxnRecord is consulted in all three of the // state transitions that move away from the "no txn record" state. @@ -508,7 +507,6 @@ func (r *Replica) applyTimestampCache( // with only two states that the transaction record could be in, written or not // written. At that point, it begins to closely resemble any other write in the // system. -// func (r *Replica) CanCreateTxnRecord( ctx context.Context, txnID uuid.UUID, txnKey []byte, txnMinTS hlc.Timestamp, ) (ok bool, minCommitTS hlc.Timestamp, reason roachpb.TransactionAbortedReason) { diff --git a/pkg/kv/kvserver/replica_write.go b/pkg/kv/kvserver/replica_write.go index 87a58723720c..3924b896a170 100644 --- a/pkg/kv/kvserver/replica_write.go +++ b/pkg/kv/kvserver/replica_write.go @@ -56,18 +56,18 @@ var migrateApplicationTimeout = settings.RegisterDurationSetting( // // Concretely, // -// - The timestamp cache is checked to determine if the command's affected keys -// were accessed with a timestamp exceeding that of the command; if so, the -// command's timestamp is incremented accordingly. -// - A RaftCommand is constructed. If proposer-evaluated KV is active, -// the request is evaluated and the Result is placed in the -// RaftCommand. If not, the request itself is added to the command. -// - The proposal is inserted into the Replica's in-flight proposals map, -// a lease index is assigned to it, and it is submitted to Raft, returning -// a channel. -// - The result of the Raft proposal is read from the channel and the command -// registered with the timestamp cache, its latches are released, and -// its result (which could be an error) is returned to the client. +// - The timestamp cache is checked to determine if the command's affected keys +// were accessed with a timestamp exceeding that of the command; if so, the +// command's timestamp is incremented accordingly. +// - A RaftCommand is constructed. If proposer-evaluated KV is active, +// the request is evaluated and the Result is placed in the +// RaftCommand. If not, the request itself is added to the command. +// - The proposal is inserted into the Replica's in-flight proposals map, +// a lease index is assigned to it, and it is submitted to Raft, returning +// a channel. +// - The result of the Raft proposal is read from the channel and the command +// registered with the timestamp cache, its latches are released, and +// its result (which could be an error) is returned to the client. // // Returns either a response or an error, along with the provided concurrency // guard if it is passing ownership back to the caller of the function. @@ -739,7 +739,8 @@ func (r *Replica) newBatchedEngine( // (2) the transaction's commit timestamp has been forwarded // (3) the transaction exceeded its deadline // (4) the transaction is not in its first epoch and the EndTxn request does -// not require one phase commit. +// +// not require one phase commit. func isOnePhaseCommit(ba *roachpb.BatchRequest) bool { if ba.Txn == nil { return false diff --git a/pkg/kv/kvserver/reports/constraint_stats_report_test.go b/pkg/kv/kvserver/reports/constraint_stats_report_test.go index 1fde69e95c18..013d73c8a966 100644 --- a/pkg/kv/kvserver/reports/constraint_stats_report_test.go +++ b/pkg/kv/kvserver/reports/constraint_stats_report_test.go @@ -1191,7 +1191,8 @@ func makeTableDesc(t table, tableID int, dbID int) (descpb.TableDescriptor, erro // and possibly nil). // // parent: Can be nil if the parent table doesn't have a zone of its own. In that -// case, if any subzones are created, a placeholder zone will also be created and returned. +// +// case, if any subzones are created, a placeholder zone will also be created and returned. func addIndexSubzones(idx index, parent *zonepb.ZoneConfig, idxID int) *zonepb.ZoneConfig { res := parent diff --git a/pkg/kv/kvserver/spanlatch/doc.go b/pkg/kv/kvserver/spanlatch/doc.go index 53795ddb4a04..d602be5920e2 100644 --- a/pkg/kv/kvserver/spanlatch/doc.go +++ b/pkg/kv/kvserver/spanlatch/doc.go @@ -19,20 +19,19 @@ enable more concurrency between requests. The structure can trace its lineage back to a simple sync.Mutex. From there, the structure evolved through the following progression: - * The structure began by enforcing strict mutual exclusion for access to any - keys. Conceptually, it was a sync.Mutex. - * Concurrent read-only access to keys and key ranges was permitted. Read and - writes were serialized with each other, writes were serialized with each other, - but no ordering was enforced between reads. Conceptually, the structure became - a sync.RWMutex. - * The structure became key range-aware and concurrent access to non-overlapping - key ranges was permitted. Conceptually, the structure became an interval - tree of sync.RWMutexes. - * The structure became timestamp-aware and concurrent access of non-causal - read and write pairs was permitted. The effect of this was that reads no - longer waited for writes at higher timestamps and writes no longer waited - for reads at lower timestamps. Conceptually, the structure became an interval - tree of timestamp-aware sync.RWMutexes. - + - The structure began by enforcing strict mutual exclusion for access to any + keys. Conceptually, it was a sync.Mutex. + - Concurrent read-only access to keys and key ranges was permitted. Read and + writes were serialized with each other, writes were serialized with each other, + but no ordering was enforced between reads. Conceptually, the structure became + a sync.RWMutex. + - The structure became key range-aware and concurrent access to non-overlapping + key ranges was permitted. Conceptually, the structure became an interval + tree of sync.RWMutexes. + - The structure became timestamp-aware and concurrent access of non-causal + read and write pairs was permitted. The effect of this was that reads no + longer waited for writes at higher timestamps and writes no longer waited + for reads at lower timestamps. Conceptually, the structure became an interval + tree of timestamp-aware sync.RWMutexes. */ package spanlatch diff --git a/pkg/kv/kvserver/spanlatch/latch_interval_btree.go b/pkg/kv/kvserver/spanlatch/latch_interval_btree.go index c8f081ab7c10..2cfe22ad69a0 100644 --- a/pkg/kv/kvserver/spanlatch/latch_interval_btree.go +++ b/pkg/kv/kvserver/spanlatch/latch_interval_btree.go @@ -32,17 +32,20 @@ const ( // cmp returns a value indicating the sort order relationship between // a and b. The comparison is performed lexicographically on -// (a.Key(), a.EndKey(), a.ID()) +// +// (a.Key(), a.EndKey(), a.ID()) +// // and -// (b.Key(), b.EndKey(), b.ID()) +// +// (b.Key(), b.EndKey(), b.ID()) +// // tuples. // // Given c = cmp(a, b): // -// c == -1 if (a.Key(), a.EndKey(), a.ID()) < (b.Key(), b.EndKey(), b.ID()) -// c == 0 if (a.Key(), a.EndKey(), a.ID()) == (b.Key(), b.EndKey(), b.ID()) -// c == 1 if (a.Key(), a.EndKey(), a.ID()) > (b.Key(), b.EndKey(), b.ID()) -// +// c == -1 if (a.Key(), a.EndKey(), a.ID()) < (b.Key(), b.EndKey(), b.ID()) +// c == 0 if (a.Key(), a.EndKey(), a.ID()) == (b.Key(), b.EndKey(), b.ID()) +// c == 1 if (a.Key(), a.EndKey(), a.ID()) > (b.Key(), b.EndKey(), b.ID()) func cmp(a, b *latch) int { c := bytes.Compare(a.Key(), b.Key()) if c != 0 { @@ -325,21 +328,21 @@ func (n *node) find(item *latch) (index int, found bool) { // // Before: // -// +-----------+ -// | x y z | -// +--/-/-\-\--+ +// +-----------+ +// | x y z | +// +--/-/-\-\--+ // // After: // -// +-----------+ -// | y | -// +----/-\----+ -// / \ -// v v +// +-----------+ +// | y | +// +----/-\----+ +// / \ +// v v +// // +-----------+ +-----------+ // | x | | z | // +-----------+ +-----------+ -// func (n *node) split(i int) (*latch, *node) { out := n.items[i] var next *node @@ -1004,9 +1007,9 @@ func (i *iterator) Cur() *latch { // is to minimize the number of key comparisons performed in total. The // algorithm operates based on the following two invariants maintained by // augmented interval btree: -// 1. all items are sorted in the btree based on their start key. -// 2. all btree nodes maintain the upper bound end key of all items -// in their subtree. +// 1. all items are sorted in the btree based on their start key. +// 2. all btree nodes maintain the upper bound end key of all items +// in their subtree. // // The scan algorithm starts in "unconstrained minimum" and "unconstrained // maximum" states. To enter a "constrained minimum" state, the scan must reach @@ -1021,28 +1024,28 @@ func (i *iterator) Cur() *latch { // // The scan algorithm works like a standard btree forward scan with the // following augmentations: -// 1. before tranversing the tree, the scan performs a binary search on the -// root node's items to determine a "soft" lower-bound constraint position -// and a "hard" upper-bound constraint position in the root's children. -// 2. when tranversing into a child node in the lower or upper bound constraint -// position, the constraint is refined by searching the child's items. -// 3. the initial traversal down the tree follows the left-most children -// whose upper bound end keys are equal to or greater than the start key -// of the search range. The children followed will be equal to or less -// than the soft lower bound constraint. -// 4. once the initial tranversal completes and the scan is in the left-most -// btree node whose upper bound overlaps the search range, key comparisons -// must be performed with each item in the tree. This is necessary because -// any of these items may have end keys that cause them to overlap with the -// search range. -// 5. once the scan reaches the lower bound constraint position (the first item -// with a start key equal to or greater than the search range's start key), -// it can begin scaning without performing key comparisons. This is allowed -// because all items from this point forward will have end keys that are -// greater than the search range's start key. -// 6. once the scan reaches the upper bound constraint position, it terminates. -// It does so because the item at this position is the first item with a -// start key larger than the search range's end key. +// 1. before tranversing the tree, the scan performs a binary search on the +// root node's items to determine a "soft" lower-bound constraint position +// and a "hard" upper-bound constraint position in the root's children. +// 2. when tranversing into a child node in the lower or upper bound constraint +// position, the constraint is refined by searching the child's items. +// 3. the initial traversal down the tree follows the left-most children +// whose upper bound end keys are equal to or greater than the start key +// of the search range. The children followed will be equal to or less +// than the soft lower bound constraint. +// 4. once the initial tranversal completes and the scan is in the left-most +// btree node whose upper bound overlaps the search range, key comparisons +// must be performed with each item in the tree. This is necessary because +// any of these items may have end keys that cause them to overlap with the +// search range. +// 5. once the scan reaches the lower bound constraint position (the first item +// with a start key equal to or greater than the search range's start key), +// it can begin scaning without performing key comparisons. This is allowed +// because all items from this point forward will have end keys that are +// greater than the search range's start key. +// 6. once the scan reaches the upper bound constraint position, it terminates. +// It does so because the item at this position is the first item with a +// start key larger than the search range's end key. type overlapScan struct { // The "soft" lower-bound constraint. constrMinN *node diff --git a/pkg/kv/kvserver/spanlatch/signal.go b/pkg/kv/kvserver/spanlatch/signal.go index 1a7fdceb1968..f3b99480f78d 100644 --- a/pkg/kv/kvserver/spanlatch/signal.go +++ b/pkg/kv/kvserver/spanlatch/signal.go @@ -28,13 +28,12 @@ const ( // // The type has three benefits over using a channel directly and // closing the channel when the operation completes: -// 1. signaled() uses atomics to provide a fast-path for checking -// whether the operation has completed. It is ~75x faster than -// using a channel for this purpose. -// 2. the receiver's channel is lazily initialized when signalChan() -// is called, avoiding the allocation when one is not needed. -// 3. because of 2, the type's zero value can be used directly. -// +// 1. signaled() uses atomics to provide a fast-path for checking +// whether the operation has completed. It is ~75x faster than +// using a channel for this purpose. +// 2. the receiver's channel is lazily initialized when signalChan() +// is called, avoiding the allocation when one is not needed. +// 3. because of 2, the type's zero value can be used directly. type signal struct { a int32 c unsafe.Pointer // chan struct{}, lazily initialized diff --git a/pkg/kv/kvserver/store.go b/pkg/kv/kvserver/store.go index 6c88f25e7e79..c524ed73ba31 100644 --- a/pkg/kv/kvserver/store.go +++ b/pkg/kv/kvserver/store.go @@ -446,7 +446,7 @@ INVARIANT: the set of all Ranges (as determined by, e.g. a transactionally consistent scan of the meta index ranges) always exactly covers the addressable keyspace roachpb.KeyMin (inclusive) to roachpb.KeyMax (exclusive). -Ranges +# Ranges Each Replica is part of a Range, i.e. corresponds to what other systems would call a shard. A Range is a consensus group backed by Raft, i.e. each Replica is @@ -463,7 +463,7 @@ these interact heavily with the Range as a consensus group (of which each Replica is a member). All of these intricacies are described at a high level in this comment. -RangeDescriptor +# RangeDescriptor A roachpb.RangeDescriptor is the configuration of a Range. It is an MVCC-backed key-value pair (where the key is derived from the StartKey via @@ -481,8 +481,8 @@ these operations at some point will - update the RangeDescriptor (for example, to reflect a split, or a change to the Replicas comprising the members of the Range) -- update the meta ranges (which form a search index used for request routing, see - kv.RangeLookup and updateRangeAddressing for details) + - update the meta ranges (which form a search index used for request routing, see + kv.RangeLookup and updateRangeAddressing for details) - commit with a roachpb.InternalCommitTrigger. @@ -521,7 +521,7 @@ Replica for any given key, and ensure that no two Replicas on a Store operate on shared keyspace (as seen by the storage.Engine). Refer to the Replica Lifecycle diagram below for details on how this invariant is upheld. -Replica Lifecycle +# Replica Lifecycle A Replica should be thought of primarily as a State Machine applying commands from a replicated log (the log being replicated across the members of the @@ -581,41 +581,41 @@ request a snapshot. See maybeDelaySplitToAvoidSnapshot. The diagram is a lot to take in. The various transitions are discussed in prose below, and the source .dot file is in store_doc_replica_lifecycle.dot. - +---------------------+ - +------------------ | Absent | ---------------------------------------------------------------------------------------------------+ - | +---------------------+ | - | | Subsume Crash applySnapshot | - | | Store.Start +---------------+ +---------+ +---------------+ | - | v v | v | v | | - | +-----------------------------------------------------------------------------------------------------------------------+ | - +---------+------------------ | | | - | | | Initialized | | - | | | | | - | +----+------------------ | | -+----+ - | | | +-----------------------------------------------------------------------------------------------------------------------+ | | - | | | | ^ ^ | | | | | - | | | Raft msg | Crash | applySnapshot | post-split | | | | | - | | | v | | | | | | | - | | | +---------------------------------------------------------+ pre-split | | | | | - | | +-----------------> | | <---------------------+--------------+--------------------+----+ | - | | | | | | | | - | | | Uninitialized | Raft msg | | | | - | | | | -----------------+ | | | | - | | | | | | | | | - | | | | <----------------+ | | | | - | | +---------------------------------------------------------+ | | apply removal | | - | | | | | | | | - | | | ReplicaTooOldError | higher ReplicaID | Replica GC | | | - | | v v v | | | - | | Merged (snapshot) +---------------------------------------------------------------------------------------------+ | | | - | +----------------------> | | <+ | | - | | | | | - | apply Merge | | ReplicaTooOld | | - +---------------------------> | Removed | <---------------------+ | - | | | - | | higher ReplicaID | - | | <-------------------------------+ - +---------------------------------------------------------------------------------------------+ + +---------------------+ + +------------------ | Absent | ---------------------------------------------------------------------------------------------------+ + | +---------------------+ | + | | Subsume Crash applySnapshot | + | | Store.Start +---------------+ +---------+ +---------------+ | + | v v | v | v | | + | +-----------------------------------------------------------------------------------------------------------------------+ | + +---------+------------------ | | | + | | | Initialized | | + | | | | | + | +----+------------------ | | -+----+ + | | | +-----------------------------------------------------------------------------------------------------------------------+ | | + | | | | ^ ^ | | | | | + | | | Raft msg | Crash | applySnapshot | post-split | | | | | + | | | v | | | | | | | + | | | +---------------------------------------------------------+ pre-split | | | | | + | | +-----------------> | | <---------------------+--------------+--------------------+----+ | + | | | | | | | | + | | | Uninitialized | Raft msg | | | | + | | | | -----------------+ | | | | + | | | | | | | | | + | | | | <----------------+ | | | | + | | +---------------------------------------------------------+ | | apply removal | | + | | | | | | | | + | | | ReplicaTooOldError | higher ReplicaID | Replica GC | | | + | | v v v | | | + | | Merged (snapshot) +---------------------------------------------------------------------------------------------+ | | | + | +----------------------> | | <+ | | + | | | | | + | apply Merge | | ReplicaTooOld | | + +---------------------------> | Removed | <---------------------+ | + | | | + | | higher ReplicaID | + | | <-------------------------------+ + +---------------------------------------------------------------------------------------------+ When a Store starts, it iterates through all RangeDescriptors it can find on its Engine. Finding a RangeDescriptor by definition implies that the Replica is diff --git a/pkg/kv/kvserver/store_init.go b/pkg/kv/kvserver/store_init.go index 854daae6c465..8bf9f3e9519e 100644 --- a/pkg/kv/kvserver/store_init.go +++ b/pkg/kv/kvserver/store_init.go @@ -78,11 +78,15 @@ func InitEngine(ctx context.Context, eng storage.Engine, ident roachpb.StoreIden // Args: // eng: the engine to which data is to be written. // initialValues: an optional list of k/v to be written as well after each -// value's checksum is initialized. +// +// value's checksum is initialized. +// // bootstrapVersion: the version at which the cluster is bootstrapped. // numStores: the number of stores this node will have. // splits: an optional list of split points. Range addressing will be created -// for all the splits. The list needs to be sorted. +// +// for all the splits. The list needs to be sorted. +// // nowNanos: the timestamp at which to write the initial engine data. func WriteInitialClusterData( ctx context.Context, diff --git a/pkg/kv/kvserver/store_raft.go b/pkg/kv/kvserver/store_raft.go index eefd61f34f95..10796a5eca12 100644 --- a/pkg/kv/kvserver/store_raft.go +++ b/pkg/kv/kvserver/store_raft.go @@ -676,9 +676,9 @@ func (s *Store) processTick(_ context.Context, rangeID roachpb.RangeID) bool { // See the comment in shouldFollowerQuiesceOnNotify for details on how these two // functions combine to provide the guarantee that: // -// If a quorum of replica in a Raft group is alive and at least -// one of these replicas is up-to-date, the Raft group will catch -// up any of the live, lagging replicas. +// If a quorum of replica in a Raft group is alive and at least +// one of these replicas is up-to-date, the Raft group will catch +// up any of the live, lagging replicas. // // Note that this mechanism can race with concurrent invocations of processTick, // which may have a copy of the previous livenessMap where the now-live node is diff --git a/pkg/kv/kvserver/store_rebalancer_test.go b/pkg/kv/kvserver/store_rebalancer_test.go index faa8b76e25b5..a19e11c59e01 100644 --- a/pkg/kv/kvserver/store_rebalancer_test.go +++ b/pkg/kv/kvserver/store_rebalancer_test.go @@ -1491,10 +1491,10 @@ func TestNoLeaseTransferToBehindReplicas(t *testing.T) { } // TestStoreRebalancerReadAmpCheck checks that: -// - Under (1) disabled and (2) log that rebalancing decisions are unaffected -// by high read amplification. -// - Under (3) rebalanceOnly and (4) allocate that rebalance decisions exclude -// stores with high readamplification as candidate targets. +// - Under (1) disabled and (2) log that rebalancing decisions are unaffected +// by high read amplification. +// - Under (3) rebalanceOnly and (4) allocate that rebalance decisions exclude +// stores with high readamplification as candidate targets. func TestStoreRebalancerReadAmpCheck(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) diff --git a/pkg/kv/kvserver/store_send.go b/pkg/kv/kvserver/store_send.go index cf2048740f16..13e028e28d61 100644 --- a/pkg/kv/kvserver/store_send.go +++ b/pkg/kv/kvserver/store_send.go @@ -400,16 +400,16 @@ func (s *Store) maybeThrottleBatch( // perform its negotiation phase and execution phase in a single RPC. // // The server-side negotiation fast-path provides two benefits: -// 1. it avoids two network hops in the common-case where a bounded staleness -// read is targeting a single range. This in an important performance -// optimization for single-row point lookups. -// 2. it provides stronger guarantees around minimizing staleness during bounded -// staleness reads. Bounded staleness reads that hit the server-side -// fast-path use their target replica's most up-to-date resolved timestamp, -// so they are as fresh as possible. Bounded staleness reads that miss the -// fast-path and perform explicit negotiation (see below) consult a cache, so -// they may use an out-of-date, suboptimal resolved timestamp, as long as it -// is fresh enough to satisfy the staleness bound of the request. +// 1. it avoids two network hops in the common-case where a bounded staleness +// read is targeting a single range. This in an important performance +// optimization for single-row point lookups. +// 2. it provides stronger guarantees around minimizing staleness during bounded +// staleness reads. Bounded staleness reads that hit the server-side +// fast-path use their target replica's most up-to-date resolved timestamp, +// so they are as fresh as possible. Bounded staleness reads that miss the +// fast-path and perform explicit negotiation (see below) consult a cache, so +// they may use an out-of-date, suboptimal resolved timestamp, as long as it +// is fresh enough to satisfy the staleness bound of the request. // // The method should be called for requests that have their MinTimestampBound // field set, which indicates that the request wants a dynamic timestamp equal diff --git a/pkg/kv/kvserver/store_snapshot.go b/pkg/kv/kvserver/store_snapshot.go index 7ab6ab1bf307..6e7e48ec9c36 100644 --- a/pkg/kv/kvserver/store_snapshot.go +++ b/pkg/kv/kvserver/store_snapshot.go @@ -1194,18 +1194,18 @@ var snapshotSenderBatchSize = settings.RegisterByteSizeSetting( // that it will itself fail during sending, while the next snapshot wastes // enough time waiting for us that it will itself fail, ad infinitum: // -// t | snap1 snap2 snap3 snap4 snap5 ... -// ----+------------------------------------ -// 0 | send -// 15 | queue queue -// 30 | queue -// 45 | ok send -// 60 | queue -// 75 | fail fail send -// 90 | fail send -// 105 | -// 120 | fail -// 135 | +// t | snap1 snap2 snap3 snap4 snap5 ... +// ----+------------------------------------ +// 0 | send +// 15 | queue queue +// 30 | queue +// 45 | ok send +// 60 | queue +// 75 | fail fail send +// 90 | fail send +// 105 | +// 120 | fail +// 135 | // // If we limit the amount of time we are willing to wait for a reservation to // something that is small enough to, on success, give us enough time to @@ -1213,18 +1213,18 @@ var snapshotSenderBatchSize = settings.RegisterByteSizeSetting( // timeout, 45s needed to stream the data, we can wait at most 15s for a // reservation and still avoid starvation: // -// t | snap1 snap2 snap3 snap4 snap5 ... -// ----+------------------------------------ -// 0 | send -// 15 | queue queue -// 30 | fail fail send -// 45 | -// 60 | ok queue -// 75 | ok send -// 90 | -// 105 | -// 120 | ok -// 135 | +// t | snap1 snap2 snap3 snap4 snap5 ... +// ----+------------------------------------ +// 0 | send +// 15 | queue queue +// 30 | fail fail send +// 45 | +// 60 | ok queue +// 75 | ok send +// 90 | +// 105 | +// 120 | ok +// 135 | // // In practice, the snapshot reservation logic (reserveReceiveSnapshot) doesn't know // how long sending the snapshot will actually take. But it knows the timeout it @@ -1239,7 +1239,7 @@ var snapshotSenderBatchSize = settings.RegisterByteSizeSetting( // as the average streaming time is less than the guaranteed processing time for // any snapshot that succeeds in acquiring a reservation: // -// guaranteed_processing_time = (1 - reservation_queue_timeout_fraction) x timeout +// guaranteed_processing_time = (1 - reservation_queue_timeout_fraction) x timeout // // The timeout for the snapshot and replicate queues bottoms out at 60s (by // default, see kv.queue.process.guaranteed_time_budget). Given a default diff --git a/pkg/kv/kvserver/tenantrate/limiter_test.go b/pkg/kv/kvserver/tenantrate/limiter_test.go index 3d742c05a28e..e88e65235fdc 100644 --- a/pkg/kv/kvserver/tenantrate/limiter_test.go +++ b/pkg/kv/kvserver/tenantrate/limiter_test.go @@ -132,14 +132,13 @@ var t0 = time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC) // The argument is a yaml serialization of LimitConfigs. It returns the time as // of initialization (00:00:00.000). For example: // -// init -// rate: 1 -// burst: 2 -// read: { perbatch: 1, perrequest: 1, perbyte: 1 } -// write: { perbatch: 1, perrequest: 1, perbyte: 1 } -// ---- -// 00:00:00.000 -// +// init +// rate: 1 +// burst: 2 +// read: { perbatch: 1, perrequest: 1, perbyte: 1 } +// write: { perbatch: 1, perrequest: 1, perbyte: 1 } +// ---- +// 00:00:00.000 func (ts *testState) init(t *testing.T, d *datadriven.TestData) string { if ts.initialized { d.Fatalf(t, "already ran init") @@ -174,11 +173,10 @@ func (ts *testState) updateSettings(t *testing.T, d *datadriven.TestData) string // advance advances the clock by the provided duration and returns the new // current time. // -// advance -// 2s -// ---- -// 00:00:02.000 -// +// advance +// 2s +// ---- +// 00:00:02.000 func (ts *testState) advance(t *testing.T, d *datadriven.TestData) string { dur, err := time.ParseDuration(d.Input) if err != nil { @@ -201,12 +199,11 @@ func (ts *testState) advance(t *testing.T, d *datadriven.TestData) string { // The below example would launch two requests with ids "a" and "b" // corresponding to tenants 2 and 3 respectively. // -// launch -// - { id: a, tenant: 2, writebytes: 3} -// - { id: b, tenant: 3} -// ---- -// [a@2, b@3] -// +// launch +// - { id: a, tenant: 2, writebytes: 3} +// - { id: b, tenant: 3} +// ---- +// [a@2, b@3] func (ts *testState) launch(t *testing.T, d *datadriven.TestData) string { var cmds []struct { ID string @@ -251,11 +248,10 @@ func (ts *testState) launch(t *testing.T, d *datadriven.TestData) string { // // For example: // -// await -// [a] -// ---- -// [b@3] -// +// await +// [a] +// ---- +// [b@3] func (ts *testState) await(t *testing.T, d *datadriven.TestData) string { ids := parseStrings(t, d) const awaitTimeout = 1000 * time.Second @@ -285,11 +281,10 @@ func (ts *testState) await(t *testing.T, d *datadriven.TestData) string { // from the set of outstanding requests. The set of remaining requests will be // returned. See launch for details on the serialization of the output. // -// cancel -// [b] -// ---- -// [a@2] -// +// cancel +// [b] +// ---- +// [a@2] func (ts *testState) cancel(t *testing.T, d *datadriven.TestData) string { ids := parseStrings(t, d) for _, id := range ids { @@ -314,11 +309,10 @@ func (ts *testState) cancel(t *testing.T, d *datadriven.TestData) string { // // For example: // -// record_read -// - { tenant: 2, readrequests: 1, readbytes: 32 } -// ---- -// [a@2] -// +// record_read +// - { tenant: 2, readrequests: 1, readbytes: 32 } +// ---- +// [a@2] func (ts *testState) recordRead(t *testing.T, d *datadriven.TestData) string { var reads []struct { Tenant uint64 @@ -347,38 +341,37 @@ func (ts *testState) recordRead(t *testing.T, d *datadriven.TestData) string { // // For example: // -// metrics -// ---- -// kv_tenant_rate_limit_current_blocked 0 -// kv_tenant_rate_limit_current_blocked{tenant_id="2"} 0 -// kv_tenant_rate_limit_current_blocked{tenant_id="system"} 0 -// kv_tenant_rate_limit_num_tenants 0 -// kv_tenant_rate_limit_read_bytes_admitted 0 -// kv_tenant_rate_limit_read_bytes_admitted{tenant_id="2"} 0 -// kv_tenant_rate_limit_read_bytes_admitted{tenant_id="system"} 100 -// kv_tenant_rate_limit_read_requests_admitted 0 -// kv_tenant_rate_limit_read_requests_admitted{tenant_id="2"} 0 -// kv_tenant_rate_limit_read_requests_admitted{tenant_id="system"} 0 -// kv_tenant_rate_limit_read_batches_admitted 0 -// kv_tenant_rate_limit_read_batches_admitted{tenant_id="2"} 0 -// kv_tenant_rate_limit_read_batches_admitted{tenant_id="system"} 0 -// kv_tenant_rate_limit_write_bytes_admitted 50 -// kv_tenant_rate_limit_write_bytes_admitted{tenant_id="2"} 50 -// kv_tenant_rate_limit_write_bytes_admitted{tenant_id="system"} 0 -// kv_tenant_rate_limit_write_requests_admitted 0 -// kv_tenant_rate_limit_write_requests_admitted{tenant_id="2"} 0 -// kv_tenant_rate_limit_write_requests_admitted{tenant_id="system"} 0 -// kv_tenant_rate_limit_write_batches_admitted 0 -// kv_tenant_rate_limit_write_batches_admitted{tenant_id="2"} 0 -// kv_tenant_rate_limit_write_batches_admitted{tenant_id="system"} 0 +// metrics +// ---- +// kv_tenant_rate_limit_current_blocked 0 +// kv_tenant_rate_limit_current_blocked{tenant_id="2"} 0 +// kv_tenant_rate_limit_current_blocked{tenant_id="system"} 0 +// kv_tenant_rate_limit_num_tenants 0 +// kv_tenant_rate_limit_read_bytes_admitted 0 +// kv_tenant_rate_limit_read_bytes_admitted{tenant_id="2"} 0 +// kv_tenant_rate_limit_read_bytes_admitted{tenant_id="system"} 100 +// kv_tenant_rate_limit_read_requests_admitted 0 +// kv_tenant_rate_limit_read_requests_admitted{tenant_id="2"} 0 +// kv_tenant_rate_limit_read_requests_admitted{tenant_id="system"} 0 +// kv_tenant_rate_limit_read_batches_admitted 0 +// kv_tenant_rate_limit_read_batches_admitted{tenant_id="2"} 0 +// kv_tenant_rate_limit_read_batches_admitted{tenant_id="system"} 0 +// kv_tenant_rate_limit_write_bytes_admitted 50 +// kv_tenant_rate_limit_write_bytes_admitted{tenant_id="2"} 50 +// kv_tenant_rate_limit_write_bytes_admitted{tenant_id="system"} 0 +// kv_tenant_rate_limit_write_requests_admitted 0 +// kv_tenant_rate_limit_write_requests_admitted{tenant_id="2"} 0 +// kv_tenant_rate_limit_write_requests_admitted{tenant_id="system"} 0 +// kv_tenant_rate_limit_write_batches_admitted 0 +// kv_tenant_rate_limit_write_batches_admitted{tenant_id="2"} 0 +// kv_tenant_rate_limit_write_batches_admitted{tenant_id="system"} 0 // // Or with a regular expression: // -// metrics -// write_bytes_admitted\{tenant_id="2"\} -// ---- -// kv_tenant_rate_limit_write_bytes_admitted{tenant_id="2"} 50 -// +// metrics +// write_bytes_admitted\{tenant_id="2"\} +// ---- +// kv_tenant_rate_limit_write_bytes_admitted{tenant_id="2"} 50 func (ts *testState) metrics(t *testing.T, d *datadriven.TestData) string { // Compile the input into a regular expression. re, err := regexp.Compile(d.Input) @@ -421,11 +414,10 @@ func (ts *testState) metrics(t *testing.T, d *datadriven.TestData) string { // The following example would wait for there to be two outstanding timers at // 00:00:01.000 and 00:00:02.000. // -// timers -// ---- -// 00:00:01.000 -// 00:00:02.000 -// +// timers +// ---- +// 00:00:01.000 +// 00:00:02.000 func (ts *testState) timers(t *testing.T, d *datadriven.TestData) string { // If we are rewriting the test, just sleep a bit before returning the // timers. @@ -462,11 +454,10 @@ func timesToString(times []time.Time) string { // // For example: // -// get_tenants -// [2, 3, 2] -// ---- -// [2#2, 3#1] -// +// get_tenants +// [2, 3, 2] +// ---- +// [2#2, 3#1] func (ts *testState) getTenants(t *testing.T, d *datadriven.TestData) string { ctx := context.Background() tenantIDs := parseTenantIDs(t, d) @@ -483,11 +474,10 @@ func (ts *testState) getTenants(t *testing.T, d *datadriven.TestData) string { // // For example: // -// release_tenants -// [2, 3] -// ---- -// [2#1] -// +// release_tenants +// [2, 3] +// ---- +// [2#1] func (ts *testState) releaseTenants(t *testing.T, d *datadriven.TestData) string { tenantIDs := parseTenantIDs(t, d) for i := range tenantIDs { @@ -512,13 +502,12 @@ func (ts *testState) releaseTenants(t *testing.T, d *datadriven.TestData) string // // For example: // -// estimate_iops -// readpercentage: 50 -// readsize: 4096 -// writesize: 4096 -// ---- -// Mixed workload (50% reads; 4.0 KiB reads; 4.0 KiB writes): 256 sustained IOPS, 256 burst. -// +// estimate_iops +// readpercentage: 50 +// readsize: 4096 +// writesize: 4096 +// ---- +// Mixed workload (50% reads; 4.0 KiB reads; 4.0 KiB writes): 256 sustained IOPS, 256 burst. func (ts *testState) estimateIOPS(t *testing.T, d *datadriven.TestData) string { var workload struct { ReadPercentage int diff --git a/pkg/kv/kvserver/tscache/interval_skl.go b/pkg/kv/kvserver/tscache/interval_skl.go index 609765243a9d..18f8c6d84e4b 100644 --- a/pkg/kv/kvserver/tscache/interval_skl.go +++ b/pkg/kv/kvserver/tscache/interval_skl.go @@ -53,12 +53,13 @@ const ( // timestamp for that range is called the "gap timestamp". Here is a simplified // representation that would result after these ranges were added to an empty // intervalSkl: -// ["apple", "orange") = 200 -// ["kiwi", "raspberry"] = 100 // -// "apple" "orange" "raspberry" -// keyts=200 keyts=100 keyts=100 -// gapts=200 gapts=100 gapts=0 +// ["apple", "orange") = 200 +// ["kiwi", "raspberry"] = 100 +// +// "apple" "orange" "raspberry" +// keyts=200 keyts=100 keyts=100 +// gapts=200 gapts=100 gapts=0 // // That is, the range from apple (inclusive) to orange (exclusive) has a read // timestamp of 200. The range from orange (inclusive) to raspberry (inclusive) @@ -127,20 +128,18 @@ var initialSklAllocSize = func() int { // uses an arena allocator. Skiplist nodes refer to one another by offset into // the arena rather than by pointer, so the GC has very few objects to track. // -// // The data structure can conceptually be thought of as being parameterized over // a key and a value type, such that the key implements a Comparable interface // (see interval.Comparable) and the value implements a Ratchetable interface: // -// type Ratchetable interface { -// Ratchet(other Ratchetable) (changed bool) -// } +// type Ratchetable interface { +// Ratchet(other Ratchetable) (changed bool) +// } // // In other words, if Go supported zero-cost abstractions, this type might look // like: // -// type intervalSkl -// +// type intervalSkl type intervalSkl struct { // rotMutex synchronizes page rotation with all other operations. The read // lock is acquired by the Add and Lookup operations. The write lock is @@ -695,50 +694,50 @@ func (p *sklPage) addNode( // nodes between the previous node and the lookup node, which could change the // choice for the "previous gap value". The solution is two-fold: // -// 1. Add new nodes in two phases - initializing and then initialized. Nodes in -// the initializing state act as a synchronization point between goroutines -// that are adding a particular node and goroutines that are scanning for gap -// values. Scanning goroutines encounter the initializing nodes and are -// forced to ratchet them before continuing. If they fail to ratchet them -// because an arena is full, the nodes must never be initialized so they are -// set to cantInit. This is critical for correctness, because if one of these -// initializing nodes was not ratcheted when encountered during a forward -// scan and later initialized, we could see a ratchet inversion. For example, -// the inversion would occur if: -// - 1: a goroutine is scanning forwards after finding a previous gap value -// from node A in which it plans to initialize node C. -// - 2: node B is created and initialized between node A and node C with a -// larger value than either. -// - 1: the iterator scanning forwards to node C is already past node B when -// it is created. -// - 3: a lookup for the timestamp of node C comes in. Since it's not -// initialized, it uses node B's gap value. -// - 1: the iterator reaches node C and initializes it with node A's gap -// value, which is smaller than node B's. -// - 4: another lookup for the timestamp of node C comes it. It returns the -// nodes newly initialized value, which is smaller than the one it -// reported before. -// Ratcheting initializing nodes when encountered with the current gap value -// avoids this race. +// 1. Add new nodes in two phases - initializing and then initialized. Nodes in +// the initializing state act as a synchronization point between goroutines +// that are adding a particular node and goroutines that are scanning for gap +// values. Scanning goroutines encounter the initializing nodes and are +// forced to ratchet them before continuing. If they fail to ratchet them +// because an arena is full, the nodes must never be initialized so they are +// set to cantInit. This is critical for correctness, because if one of these +// initializing nodes was not ratcheted when encountered during a forward +// scan and later initialized, we could see a ratchet inversion. For example, +// the inversion would occur if: +// - 1: a goroutine is scanning forwards after finding a previous gap value +// from node A in which it plans to initialize node C. +// - 2: node B is created and initialized between node A and node C with a +// larger value than either. +// - 1: the iterator scanning forwards to node C is already past node B when +// it is created. +// - 3: a lookup for the timestamp of node C comes in. Since it's not +// initialized, it uses node B's gap value. +// - 1: the iterator reaches node C and initializes it with node A's gap +// value, which is smaller than node B's. +// - 4: another lookup for the timestamp of node C comes it. It returns the +// nodes newly initialized value, which is smaller than the one it +// reported before. +// Ratcheting initializing nodes when encountered with the current gap value +// avoids this race. // -// However, only a goroutine that saw a node in an uninitialized state before -// scanning backwards can switch it from initializing to initialized. This -// enforces a "happens-before" relationship between the creation of a node -// and the discovery of the gap value that is used when initializing it. If -// any goroutine was able to initialize a node, then this relationship would -// not exist and we could experience races where a newly inserted node A's -// call to ensureFloorValue could come before the insertion of a node B, but -// node B could be initialized with a gap value discovered before the -// insertion of node A. For more on this, see the discussion in #19672. +// However, only a goroutine that saw a node in an uninitialized state before +// scanning backwards can switch it from initializing to initialized. This +// enforces a "happens-before" relationship between the creation of a node +// and the discovery of the gap value that is used when initializing it. If +// any goroutine was able to initialize a node, then this relationship would +// not exist and we could experience races where a newly inserted node A's +// call to ensureFloorValue could come before the insertion of a node B, but +// node B could be initialized with a gap value discovered before the +// insertion of node A. For more on this, see the discussion in #19672. // -// 2. After the gap value of the first initialized node with a key less than or -// equal to the desired key has been found, the scanning goroutine will scan -// forwards until it reaches the original key. It will ratchet any -// uninitialized nodes along the way and inherit the gap value from them as -// it goes. By the time it reaches the original key, it has a valid gap -// value, which we have called the "previous gap value". At this point, if -// the node at key is uninitialized, the node can be initialized with the -// "previous gap value". +// 2. After the gap value of the first initialized node with a key less than or +// equal to the desired key has been found, the scanning goroutine will scan +// forwards until it reaches the original key. It will ratchet any +// uninitialized nodes along the way and inherit the gap value from them as +// it goes. By the time it reaches the original key, it has a valid gap +// value, which we have called the "previous gap value". At this point, if +// the node at key is uninitialized, the node can be initialized with the +// "previous gap value". // // It is an error to call ensureInitialized on a key without a node. When // finished, the iterator will be positioned the same as if it.Seek(key) had @@ -825,13 +824,13 @@ func (p *sklPage) getMaxTimestamp() hlc.Timestamp { // to a larger value. This provides the guarantee that the following relation // holds, regardless of the value of x: // -// x.LessEq(makeRatchetingTime(x).get()) +// x.LessEq(makeRatchetingTime(x).get()) // // It also provides the guarantee that if the synthetic flag is set on the // initial timestamp, then this flag is set on the resulting Timestamp. So the // following relation is guaranteed to hold, regardless of the value of x: // -// x.IsFlagSet(SYNTHETIC) == makeRatchetingTime(x).get().IsFlagSet(SYNTHETIC) +// x.IsFlagSet(SYNTHETIC) == makeRatchetingTime(x).get().IsFlagSet(SYNTHETIC) // // Compressed ratchetingTime values compare such that taking the maximum of any // two ratchetingTime values and converting that back to a Timestamp is always @@ -839,12 +838,13 @@ func (p *sklPage) getMaxTimestamp() hlc.Timestamp { // method. So the following relation is guaranteed to hold, regardless of the // value of x or y: // -// z := max(makeRatchetingTime(x), makeRatchetingTime(y)).get() -// x.Forward(y).LessEq(z) +// z := max(makeRatchetingTime(x), makeRatchetingTime(y)).get() +// x.Forward(y).LessEq(z) // // Bit layout (LSB to MSB): -// bits 0: inverted synthetic flag -// bits 1 - 63: upper 63 bits of wall time +// +// bits 0: inverted synthetic flag +// bits 1 - 63: upper 63 bits of wall time type ratchetingTime int64 func makeRatchetingTime(ts hlc.Timestamp) ratchetingTime { @@ -1068,13 +1068,13 @@ func (p *sklPage) maxInRange(it *arenaskl.Iterator, from, to []byte, opt rangeOp // During forward iteration, if another goroutine inserts a new gap node in the // interval between the previous node and the original key, then either: // -// 1. The forward iteration finds it and looks up its gap value. That node's gap -// value now becomes the new "previous gap value", and iteration continues. +// 1. The forward iteration finds it and looks up its gap value. That node's gap +// value now becomes the new "previous gap value", and iteration continues. // -// 2. The new node is created after the iterator has move past its position. As -// part of node creation, the creator had to scan backwards to find the gap -// value of the previous node. It is guaranteed to find a gap value that is -// >= the gap value found by the original goroutine. +// 2. The new node is created after the iterator has move past its position. As +// part of node creation, the creator had to scan backwards to find the gap +// value of the previous node. It is guaranteed to find a gap value that is +// >= the gap value found by the original goroutine. // // This means that no matter what gets inserted, or when it gets inserted, the // scanning goroutine is guaranteed to end up with a value that will never diff --git a/pkg/kv/kvserver/txn_recovery_integration_test.go b/pkg/kv/kvserver/txn_recovery_integration_test.go index 74c083267345..5bd3a3154ada 100644 --- a/pkg/kv/kvserver/txn_recovery_integration_test.go +++ b/pkg/kv/kvserver/txn_recovery_integration_test.go @@ -203,20 +203,19 @@ func TestTxnRecoveryFromStaging(t *testing.T) { // transaction. The test contains a subtest for each of the combinations of the // following boolean options: // -// - pushAbort: configures whether or not the high-priority operation is a +// - pushAbort: configures whether or not the high-priority operation is a // read (false) or a write (true), which dictates the kind of push // operation dispatched against the staging transaction. // -// - newEpoch: configures whether or not the staging transaction wrote the +// - newEpoch: configures whether or not the staging transaction wrote the // intent which the high-priority operation conflicts with at a higher // epoch than it is staged at. If true, the staging transaction is not // implicitly committed. // -// - newTimestamp: configures whether or not the staging transaction wrote the +// - newTimestamp: configures whether or not the staging transaction wrote the // intent which the high-priority operation conflicts with at a higher // timestamp than it is staged at. If true, the staging transaction is not // implicitly committed. -// func TestTxnRecoveryFromStagingWithHighPriority(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) diff --git a/pkg/kv/kvserver/uncertainty/compute.go b/pkg/kv/kvserver/uncertainty/compute.go index 667972468800..5b8b003548ab 100644 --- a/pkg/kv/kvserver/uncertainty/compute.go +++ b/pkg/kv/kvserver/uncertainty/compute.go @@ -32,9 +32,10 @@ import ( // timestamp. // // If both these conditions hold: -// * A transaction already has an observed timestamp value for a node -// * That node was not the leaseholder for some or all of the range as of the +// - A transaction already has an observed timestamp value for a node +// - That node was not the leaseholder for some or all of the range as of the // time of the observed timestamp, but it is now. +// // Then the transaction's observed timestamp is not (entirely) respected when // computing a local uncertainty limit. // @@ -59,17 +60,16 @@ import ( // // A similar hazard applies to range merges. // -// 1. put(k2 on n2, r2); gateway chooses t=1.0 -// 2. begin; read(k on n1, r1); gateway chooses t=0.98 -// 3. pick up observed timestamp for n1 of t=0.99 -// 4. r1 merged right-hand neighbor r2 @ t=1.1 -// 5. read(k2) on joint range at ReadTimestamp=0.98 should get -// ReadWithinUncertaintyInterval because of the write in step 1, so -// even though we observed n1's timestamp in step 3 we must expand -// the uncertainty interval to the range merge freeze time, which -// is guaranteed to be greater than any write which occurred on the -// right-hand side. -// +// 1. put(k2 on n2, r2); gateway chooses t=1.0 +// 2. begin; read(k on n1, r1); gateway chooses t=0.98 +// 3. pick up observed timestamp for n1 of t=0.99 +// 4. r1 merged right-hand neighbor r2 @ t=1.1 +// 5. read(k2) on joint range at ReadTimestamp=0.98 should get +// ReadWithinUncertaintyInterval because of the write in step 1, so +// even though we observed n1's timestamp in step 3 we must expand +// the uncertainty interval to the range merge freeze time, which +// is guaranteed to be greater than any write which occurred on the +// right-hand side. func ComputeInterval( h *roachpb.Header, status kvserverpb.LeaseStatus, maxOffset time.Duration, ) Interval { diff --git a/pkg/kv/kvserver/uncertainty/doc.go b/pkg/kv/kvserver/uncertainty/doc.go index c4b431b871c5..7ce5d93248dd 100644 --- a/pkg/kv/kvserver/uncertainty/doc.go +++ b/pkg/kv/kvserver/uncertainty/doc.go @@ -27,7 +27,7 @@ import ( // D0 ———————————————————————————————————————————————— // -// MVCCKey +// # MVCCKey // // Each value in CockroachDB is stored at an associated versioned key. This key // is distinguished from roachpb.Key with the addition of a "version" timestamp. @@ -45,7 +45,7 @@ var D0 = /* storage.MVCCKey */ struct { // D1 ———————————————————————————————————————————————— // -// MVCCValue +// # MVCCValue // // At and below the MVCC layer, each roachpb.Value as wrapped with a header that // contains MVCC-level metadata. One piece of metadata stored in this header is @@ -58,14 +58,14 @@ var D0 = /* storage.MVCCKey */ struct { // // When a versioned key-value is encountered by a transaction while reading, // there are three possible outcomes: -// 1. the key's value is visible to the reader and is returned. -// 2. the key's value is not visible to the reader but is determined to have -// definitely been written concurrently with reader's transaction, so it is -// ignored and not returned. -// 3. the key's value is not visible to the reader but is determined to have -// been possibly written before the reader's transaction, so the reader must -// perform an uncertainty restart and later return the value to ensure real- -// time ordering guarantees. +// 1. the key's value is visible to the reader and is returned. +// 2. the key's value is not visible to the reader but is determined to have +// definitely been written concurrently with reader's transaction, so it is +// ignored and not returned. +// 3. the key's value is not visible to the reader but is determined to have +// been possibly written before the reader's transaction, so the reader must +// perform an uncertainty restart and later return the value to ensure real- +// time ordering guarantees. // // We call the differentiation between outcomes 2 and 3 "uncertainty". The rest // of this package explores the mechanisms used to guarantee real-time ordering @@ -83,7 +83,7 @@ var D1 = /* storage.MVCCValue */ struct { // D2 ———————————————————————————————————————————————— // -// Transaction.GlobalUncertaintyLimit +// # Transaction.GlobalUncertaintyLimit // // A transaction's global uncertainty limit is the inclusive upper bound of its // uncertainty interval. The value is set to the transaction's initial timestamp @@ -95,7 +95,7 @@ var D2 = roachpb.Transaction{}.GlobalUncertaintyLimit // D3 ———————————————————————————————————————————————— // -// Interval +// # Interval // // When the transaction sends a request to a replica, an uncertainty interval // is computed. This interval consists of a global and a local component. The @@ -106,7 +106,7 @@ var D3 = Interval{} // D4 ———————————————————————————————————————————————— // -// ReadWithinUncertaintyIntervalError +// # ReadWithinUncertaintyIntervalError // // While reading, if a transaction encounters a value above its read timestamp // but equal to or below its global limit, it triggers a read within uncertainty @@ -122,7 +122,7 @@ var D4 = roachpb.ReadWithinUncertaintyIntervalError{} // D5 ———————————————————————————————————————————————— // -// ObservedTimestamp +// # ObservedTimestamp // // An observed timestamp is a combination of a NodeID and a Timestamp. The // timestamp is said to have been "observed" because it was pulled from the @@ -136,7 +136,7 @@ var D5 = roachpb.ObservedTimestamp{} // D6 ———————————————————————————————————————————————— // -// Transaction.UpdateObservedTimestamp +// # Transaction.UpdateObservedTimestamp // // A transaction collects observed timestamps as it visits nodes in the cluster // when performing reads and writes. @@ -144,7 +144,7 @@ var D6 = (&roachpb.Transaction{}).UpdateObservedTimestamp // D7 ———————————————————————————————————————————————— // -// Transaction.ObservedTimestamps +// # Transaction.ObservedTimestamps // // The observed timestamps are collected in a list on the transaction proto. The // purpose of this list is to avoid uncertainty related restarts which occur @@ -155,7 +155,7 @@ var D6 = (&roachpb.Transaction{}).UpdateObservedTimestamp // timestamp to determine whether the value was written before or after the // clock observation by the reader. // -// Meaning +// # Meaning // // Morally speaking, having an entry for a node in this list means that this // node has been visited before, and that no more uncertainty restarts are @@ -173,7 +173,7 @@ var D6 = (&roachpb.Transaction{}).UpdateObservedTimestamp // timestamp that is at least high as our entry in the list for node A, so no // future operation on node A will be uncertain. // -// Correctness +// # Correctness // // Thus, expressed properly, we can say that when a node has been read from // successfully before by a transaction, uncertainty for values written by a @@ -185,10 +185,10 @@ var D6 = (&roachpb.Transaction{}).UpdateObservedTimestamp // timestamps than the clock reading we observe. This implies the following // property: // -// Any writes that the transaction may later see written by leaseholders on -// this node at higher local timestamps than the observed timestamp could not -// have taken place causally before this transaction and can be ignored for -// the purposes of uncertainty. +// Any writes that the transaction may later see written by leaseholders on +// this node at higher local timestamps than the observed timestamp could not +// have taken place causally before this transaction and can be ignored for +// the purposes of uncertainty. // // There are two invariants necessary for this property to hold: // @@ -214,7 +214,7 @@ var D6 = (&roachpb.Transaction{}).UpdateObservedTimestamp // invariant holds for all leaseholders, given that a Range's initial // leaseholder assumes responsibility for an empty range with no writes. // -// Usage +// # Usage // // The property ensures that when this list holds a corresponding entry for the // node who owns the lease that the current request is executing under, we can @@ -243,7 +243,7 @@ var D6 = (&roachpb.Transaction{}).UpdateObservedTimestamp // we may add that to the list, which eliminates read uncertainty for reads on // that node. // -// Follower Reads +// # Follower Reads // // If the replica serving a transaction's read is not the leaseholder for its // range, an observed timestamp pulled from the follower node's clock has no @@ -263,7 +263,7 @@ var D7 = roachpb.Transaction{}.ObservedTimestamps // D8 ———————————————————————————————————————————————— // -// TimestampFromServerClock +// # TimestampFromServerClock // // Non-transactional requests that defer their timestamp allocation to the // leaseholder of their (single) range also have uncertainty intervals, which @@ -294,78 +294,78 @@ var D7 = roachpb.Transaction{}.ObservedTimestamps // "no" for the following reasons, so they cannot forgo the use of uncertainty // interval: // -// 1. the request timestamp is allocated before consulting the replica's lease. -// This means that there are times when the replica is not the leaseholder at -// the point of timestamp allocation, and only becomes the leaseholder later. -// In such cases, the timestamp assigned to the request is not guaranteed to -// be greater than the local timestamp of all writes served by the range at -// the time of allocation. This is true despite invariants 1 & 2 from above, -// because the replica allocating the timestamp is not yet the leaseholder. -// -// In cases where the replica that assigned the non-transactional request's -// timestamp takes over as the leaseholder after the timestamp allocation, we -// expect minimumLocalLimitForLeaseholder to forward the local uncertainty -// limit above TimestampFromServerClock, to the lease start time. -// -// For example, consider the following series of events: -// - client writes k = v1 -// - leaseholder writes v1 at ts = 100 -// - client receives ack for write -// - client later wants to read k using a non-txn request -// - follower replica with slower clock receives non-txn request -// - follower replica assigns request ts = 95 -// - lease transferred to follower replica with lease start time = 101 -// - non-txn request must use 101 as local limit of uncertainty interval to -// ensure that it observes k = v1 in its uncertainty interval, performs a -// server-side retry, bumps its read timestamp, and returns k = v1. Failure -// to do so would be a stale read. -// -// 2. even if the replica's lease is stable and the timestamp is assigned to the -// non-transactional request by the leaseholder, the assigned clock reading -// only reflects the local timestamp of all of the writes served by the -// leaseholder (and previous leaseholders) thus far. This clock reading is -// not guaranteed to lead the commit timestamp of all of these writes, -// especially if they are committed remotely and resolved after the request -// has received its clock reading but before the request begins evaluating. -// -// As a result, the non-transactional request needs an uncertainty interval -// with a global uncertainty limit far enough in advance of the leaseholder's -// local HLC clock to ensure that it considers any value that was part of a -// transaction which could have committed before the request was received by -// the leaseholder to be uncertain. Concretely, the non-transactional request -// needs to consider values of the following form to be uncertain: -// -// local_timestamp < local_limit && commit_timestamp < global_limit -// -// The value that the non-transactional request is observing may have been -// written on the local leaseholder at time 10, its transaction may have been -// committed remotely at time 20, acknowledged, then the non-transactional -// request may have begun and received a timestamp of 15 from the local -// leaseholder, then finally the value may have been resolved asynchronously -// and moved to timestamp 20 (local_timestamp: 10, commit_timestamp: 20). The -// failure of the non-transactional request to observe this value would be a -// stale read. -// -// For example, consider the following series of events: -// - client begins a txn and is assigned provisional commit timestamp = 10 -// - client's txn performs a Put(k, v1) -// - leaseholder serves Put(k, v1), lays down intent at local_timestamp = 10 -// - client's txn performs a write elsewhere and hits a WriteTooOldError -// that bumps its provisional commit timestamp to 20 -// - client's txn refreshes to ts = 20. This notably happens without -// involvement of the leaseholder that served the Put (this is at the heart -// of #36431), so that leaseholder's clock is not updated -// - client's txn commits remotely and client receives the acknowledgment -// - client later initiates non-txn read of k -// - leaseholder assigns read timestamp ts = 15 -// - asynchronous intent resolution resolves the txn's intent at k, moving v1 -// to ts = 20 in the process -// - non-txn request must use an uncertainty interval that extends past 20 -// to ensure that it observes k = v1 in uncertainty interval, performs a -// server-side retry, bumps its read timestamp, and returns k = v1. Failure -// to do so would be a stale read. -// -// TODO(nvanbenschoten): add another reason here once we address #73292. +// 1. the request timestamp is allocated before consulting the replica's lease. +// This means that there are times when the replica is not the leaseholder at +// the point of timestamp allocation, and only becomes the leaseholder later. +// In such cases, the timestamp assigned to the request is not guaranteed to +// be greater than the local timestamp of all writes served by the range at +// the time of allocation. This is true despite invariants 1 & 2 from above, +// because the replica allocating the timestamp is not yet the leaseholder. +// +// In cases where the replica that assigned the non-transactional request's +// timestamp takes over as the leaseholder after the timestamp allocation, we +// expect minimumLocalLimitForLeaseholder to forward the local uncertainty +// limit above TimestampFromServerClock, to the lease start time. +// +// For example, consider the following series of events: +// - client writes k = v1 +// - leaseholder writes v1 at ts = 100 +// - client receives ack for write +// - client later wants to read k using a non-txn request +// - follower replica with slower clock receives non-txn request +// - follower replica assigns request ts = 95 +// - lease transferred to follower replica with lease start time = 101 +// - non-txn request must use 101 as local limit of uncertainty interval to +// ensure that it observes k = v1 in its uncertainty interval, performs a +// server-side retry, bumps its read timestamp, and returns k = v1. Failure +// to do so would be a stale read. +// +// 2. even if the replica's lease is stable and the timestamp is assigned to the +// non-transactional request by the leaseholder, the assigned clock reading +// only reflects the local timestamp of all of the writes served by the +// leaseholder (and previous leaseholders) thus far. This clock reading is +// not guaranteed to lead the commit timestamp of all of these writes, +// especially if they are committed remotely and resolved after the request +// has received its clock reading but before the request begins evaluating. +// +// As a result, the non-transactional request needs an uncertainty interval +// with a global uncertainty limit far enough in advance of the leaseholder's +// local HLC clock to ensure that it considers any value that was part of a +// transaction which could have committed before the request was received by +// the leaseholder to be uncertain. Concretely, the non-transactional request +// needs to consider values of the following form to be uncertain: +// +// local_timestamp < local_limit && commit_timestamp < global_limit +// +// The value that the non-transactional request is observing may have been +// written on the local leaseholder at time 10, its transaction may have been +// committed remotely at time 20, acknowledged, then the non-transactional +// request may have begun and received a timestamp of 15 from the local +// leaseholder, then finally the value may have been resolved asynchronously +// and moved to timestamp 20 (local_timestamp: 10, commit_timestamp: 20). The +// failure of the non-transactional request to observe this value would be a +// stale read. +// +// For example, consider the following series of events: +// - client begins a txn and is assigned provisional commit timestamp = 10 +// - client's txn performs a Put(k, v1) +// - leaseholder serves Put(k, v1), lays down intent at local_timestamp = 10 +// - client's txn performs a write elsewhere and hits a WriteTooOldError +// that bumps its provisional commit timestamp to 20 +// - client's txn refreshes to ts = 20. This notably happens without +// involvement of the leaseholder that served the Put (this is at the heart +// of #36431), so that leaseholder's clock is not updated +// - client's txn commits remotely and client receives the acknowledgment +// - client later initiates non-txn read of k +// - leaseholder assigns read timestamp ts = 15 +// - asynchronous intent resolution resolves the txn's intent at k, moving v1 +// to ts = 20 in the process +// - non-txn request must use an uncertainty interval that extends past 20 +// to ensure that it observes k = v1 in uncertainty interval, performs a +// server-side retry, bumps its read timestamp, and returns k = v1. Failure +// to do so would be a stale read. +// +// TODO(nvanbenschoten): add another reason here once we address #73292. // // Convenient, because non-transactional requests are always scoped to a // single-range, those that hit uncertainty errors can always retry on the @@ -375,7 +375,7 @@ var D8 = roachpb.Header{}.TimestampFromServerClock // D9 ———————————————————————————————————————————————— // -// ComputeInterval +// # ComputeInterval // // Observed timestamps allow transactions to avoid uncertainty related restarts // because they allow transactions to bound their uncertainty limit when reading diff --git a/pkg/kv/range_lookup.go b/pkg/kv/range_lookup.go index 6facaebdc972..b4a618d37827 100644 --- a/pkg/kv/range_lookup.go +++ b/pkg/kv/range_lookup.go @@ -45,19 +45,18 @@ import ( // The "Range Metadata Key" for a range is built by appending the end key of the // range to the respective meta prefix. // -// // It is often useful to think of Cockroach's ranges as existing in a three // level tree: // -// [/meta1/,/meta1/max) <-- always one range, gossipped, start here! -// | -// ----------------------- -// | | -// [/meta2/,/meta2/m) [/meta2/m,/meta2/max) -// | | -// --------- --------- -// | | | | -// [a,g) [g,m) [m,s) [s,max) <- user data +// [/meta1/,/meta1/max) <-- always one range, gossipped, start here! +// | +// ----------------------- +// | | +// [/meta2/,/meta2/m) [/meta2/m,/meta2/max) +// | | +// --------- --------- +// | | | | +// [a,g) [g,m) [m,s) [s,max) <- user data // // In this analogy, each node (range) contains a number of RangeDescriptors, and // these descriptors act as pointers to the location of its children. So given a @@ -68,77 +67,78 @@ import ( // its parent range, we know that the descriptor we want is the first descriptor // to the right of this meta key in the parent's ordered set of keys. // -// // Let's look at a few examples that demonstrate how RangeLookup performs this // task of finding a user RangeDescriptors from cached meta2 descriptors: // // Ex. 1: -// Meta2 Ranges: [/meta2/a, /meta2/z) -// User Ranges: [a, f) [f, p), [p, z) -// 1.a: RangeLookup(key=f) -// In this case, we want to look up the range descriptor for the range [f, p) -// because "f" is in that range. Remember that this descriptor will be stored -// at "/meta2/p". Of course, when we're performing the RangeLookup, we don't -// actually know what the bounds of this range are or where exactly it's -// stored (that's what we're looking up!), so all we have to go off of is the -// lookup key. So, we first determine the meta key for the lookup key using -// RangeMetaKey, which is simply "/meta2/f". We then construct the scan bounds -// for this key using MetaScanBounds. This scan bound will be -// [/meta2/f.Next(),/meta2/max). The reason that this scan doesn't start at -// "/meta2/f" is because if this key is the start key of a range (like it is -// in this example!), the previous range descriptor will be stored at that -// key. We then issue a forward ScanRequest over this range. Since we're -// assuming we already cached the meta2 range that contains this span of keys, -// we send the request directly to that range's replica (if we didn't have -// this cached, the process would recurse to lookup the meta2 range -// descriptor). We then find that the first KV pair we see during the scan is -// at "/meta2/p". This is our desired range descriptor. -// 1.b: RangeLookup(key=m) -// This case is similar. We construct a scan for this key "m" from -// [/meta2/m.Next(),/meta2/max) and everything works the same as before. -// 1.b: RangeLookup(key=p) -// Here, we're looking for the descriptor for the range [p, z), because key "p" -// is included in that range, but not [f, p). We scan with bounds of -// [/meta2/p.Next(),/meta2/max) and everything works as expected. +// +// Meta2 Ranges: [/meta2/a, /meta2/z) +// User Ranges: [a, f) [f, p), [p, z) +// 1.a: RangeLookup(key=f) +// In this case, we want to look up the range descriptor for the range [f, p) +// because "f" is in that range. Remember that this descriptor will be stored +// at "/meta2/p". Of course, when we're performing the RangeLookup, we don't +// actually know what the bounds of this range are or where exactly it's +// stored (that's what we're looking up!), so all we have to go off of is the +// lookup key. So, we first determine the meta key for the lookup key using +// RangeMetaKey, which is simply "/meta2/f". We then construct the scan bounds +// for this key using MetaScanBounds. This scan bound will be +// [/meta2/f.Next(),/meta2/max). The reason that this scan doesn't start at +// "/meta2/f" is because if this key is the start key of a range (like it is +// in this example!), the previous range descriptor will be stored at that +// key. We then issue a forward ScanRequest over this range. Since we're +// assuming we already cached the meta2 range that contains this span of keys, +// we send the request directly to that range's replica (if we didn't have +// this cached, the process would recurse to lookup the meta2 range +// descriptor). We then find that the first KV pair we see during the scan is +// at "/meta2/p". This is our desired range descriptor. +// 1.b: RangeLookup(key=m) +// This case is similar. We construct a scan for this key "m" from +// [/meta2/m.Next(),/meta2/max) and everything works the same as before. +// 1.b: RangeLookup(key=p) +// Here, we're looking for the descriptor for the range [p, z), because key "p" +// is included in that range, but not [f, p). We scan with bounds of +// [/meta2/p.Next(),/meta2/max) and everything works as expected. // // Ex. 2: -// Meta2 Ranges: [/meta2/a, /meta2/m) [/meta2/m, /meta2/z) -// User Ranges: [a, f) [f, p), [p, z) -// 2.a: RangeLookup(key=n) -// In this case, we want to look up the range descriptor for the range [f, p) -// because "n" is in that range. Remember that this descriptor will be stored -// at "/meta2/p", which in this case is on the second meta2 range. So, we -// construct the scan bounds of [/meta2/n.Next(),/meta2/max), send this scan -// to the second meta2 range, and find that the first descriptor found is the -// desired descriptor. -// 2.b: RangeLookup(key=g) -// This is where things get a little tricky. As usual, we construct scan -// bounds of [/meta2/g.Next(),/meta2/max). However, this scan will be routed -// to the first meta2 range. It will scan forward and notice that no -// descriptors are stored between [/meta2/g.Next(),/meta2/m). We then rely on -// DistSender to continue this scan onto the next meta2 range since the result -// from the first meta2 range will be empty. Once on the next meta2 range, -// we'll find the desired descriptor at "/meta2/p". +// +// Meta2 Ranges: [/meta2/a, /meta2/m) [/meta2/m, /meta2/z) +// User Ranges: [a, f) [f, p), [p, z) +// 2.a: RangeLookup(key=n) +// In this case, we want to look up the range descriptor for the range [f, p) +// because "n" is in that range. Remember that this descriptor will be stored +// at "/meta2/p", which in this case is on the second meta2 range. So, we +// construct the scan bounds of [/meta2/n.Next(),/meta2/max), send this scan +// to the second meta2 range, and find that the first descriptor found is the +// desired descriptor. +// 2.b: RangeLookup(key=g) +// This is where things get a little tricky. As usual, we construct scan +// bounds of [/meta2/g.Next(),/meta2/max). However, this scan will be routed +// to the first meta2 range. It will scan forward and notice that no +// descriptors are stored between [/meta2/g.Next(),/meta2/m). We then rely on +// DistSender to continue this scan onto the next meta2 range since the result +// from the first meta2 range will be empty. Once on the next meta2 range, +// we'll find the desired descriptor at "/meta2/p". // // Ex. 3: -// Meta2 Ranges: [/meta2/a, /meta2/m) [/meta2/m, /meta2/z) -// User Ranges: [a, f) [f, m), [m,s) [p, z) -// 3.a: RangeLookup(key=g) -// This is a little confusing, but actually behaves the exact same way at 2.b. -// Notice that the descriptor for [f, m) is actually stored on the second -// meta2 range! So the lookup scan will start on the first meta2 range and -// continue onto the second before finding the desired descriptor at /meta2/m. -// This is an unfortunate result of us storing RangeDescriptors at -// RangeMetaKey(desc.EndKey) instead of RangeMetaKey(desc.StartKey) even -// though our ranges are [inclusive,exclusive). Still everything works if we -// let DistSender do its job when scanning over the meta2 range. // -// See #16266 and #17565 for further discussion. Notably, it is not possible -// to pick meta2 boundaries such that we will never run into this issue. The -// only way to avoid this completely would be to store RangeDescriptors at -// RangeMetaKey(desc.StartKey) and only allow meta2 split boundaries at -// RangeMetaKey(existingSplitBoundary) +// Meta2 Ranges: [/meta2/a, /meta2/m) [/meta2/m, /meta2/z) +// User Ranges: [a, f) [f, m), [m,s) [p, z) +// 3.a: RangeLookup(key=g) +// This is a little confusing, but actually behaves the exact same way at 2.b. +// Notice that the descriptor for [f, m) is actually stored on the second +// meta2 range! So the lookup scan will start on the first meta2 range and +// continue onto the second before finding the desired descriptor at /meta2/m. +// This is an unfortunate result of us storing RangeDescriptors at +// RangeMetaKey(desc.EndKey) instead of RangeMetaKey(desc.StartKey) even +// though our ranges are [inclusive,exclusive). Still everything works if we +// let DistSender do its job when scanning over the meta2 range. // +// See #16266 and #17565 for further discussion. Notably, it is not possible +// to pick meta2 boundaries such that we will never run into this issue. The +// only way to avoid this completely would be to store RangeDescriptors at +// RangeMetaKey(desc.StartKey) and only allow meta2 split boundaries at +// RangeMetaKey(existingSplitBoundary) // // Lookups for range metadata keys usually want to perform reads at the // READ_UNCOMMITTED read consistency level read in order to observe intents as diff --git a/pkg/kv/txn.go b/pkg/kv/txn.go index 33a57b7dbb18..990cc8eb0185 100644 --- a/pkg/kv/txn.go +++ b/pkg/kv/txn.go @@ -105,13 +105,14 @@ type Txn struct { // transaction (including stopping the heartbeat loop). // // gatewayNodeID: If != 0, this is the ID of the node on whose behalf this -// transaction is running. Normally this is the current node, but in the case -// of Txns created on remote nodes by DistSQL this will be the gateway. -// If 0 is passed, then no value is going to be filled in the batches sent -// through this txn. This will have the effect that the DistSender will fill -// in the batch with the current node's ID. -// If the gatewayNodeID is set and this is a root transaction, we optimize -// away any clock uncertainty for our own node, as our clock is accessible. +// +// transaction is running. Normally this is the current node, but in the case +// of Txns created on remote nodes by DistSQL this will be the gateway. +// If 0 is passed, then no value is going to be filled in the batches sent +// through this txn. This will have the effect that the DistSender will fill +// in the batch with the current node's ID. +// If the gatewayNodeID is set and this is a root transaction, we optimize +// away any clock uncertainty for our own node, as our clock is accessible. // // See also db.NewTxn(). func NewTxn(ctx context.Context, db *DB, gatewayNodeID roachpb.NodeID) *Txn { @@ -422,8 +423,8 @@ func (txn *Txn) NewBatch() *Batch { // Get retrieves the value for a key, returning the retrieved key/value or an // error. It is not considered an error for the key to not exist. // -// r, err := txn.Get("a") -// // string(r.Key) == "a" +// r, err := txn.Get("a") +// // string(r.Key) == "a" // // key can be either a byte slice or a string. func (txn *Txn) Get(ctx context.Context, key interface{}) (KeyValue, error) { @@ -436,8 +437,8 @@ func (txn *Txn) Get(ctx context.Context, key interface{}) (KeyValue, error) { // or an error. An unreplicated, exclusive lock is acquired on the key, if it // exists. It is not considered an error for the key to not exist. // -// r, err := txn.GetForUpdate("a") -// // string(r.Key) == "a" +// r, err := txn.GetForUpdate("a") +// // string(r.Key) == "a" // // key can be either a byte slice or a string. func (txn *Txn) GetForUpdate(ctx context.Context, key interface{}) (KeyValue, error) { diff --git a/pkg/multitenant/tenantcostmodel/model.go b/pkg/multitenant/tenantcostmodel/model.go index 402896837982..1f0b5cbe4932 100644 --- a/pkg/multitenant/tenantcostmodel/model.go +++ b/pkg/multitenant/tenantcostmodel/model.go @@ -29,21 +29,22 @@ type RU float64 // // The cost model takes into account the following activities: // -// - KV "read" and "write" batches. KV batches that read or write data have a -// base cost, a per-request cost, and a per-byte cost. Specifically, the -// cost of a read batch is: -// RUs = KVReadBatch + -// * KVReadRequest + -// * KVReadByte -// The cost of a write batch is: -// RUs = KVWriteBatch + -// * KVWriteRequest + -// * KVWriteByte +// - KV "read" and "write" batches. KV batches that read or write data have a +// base cost, a per-request cost, and a per-byte cost. Specifically, the +// cost of a read batch is: +// RUs = KVReadBatch + +// * KVReadRequest + +// * KVReadByte +// The cost of a write batch is: +// RUs = KVWriteBatch + +// * KVWriteRequest + +// * KVWriteByte // -// - CPU usage on the tenant's SQL pods. -// - Writes to external storage services such as S3. -// - Count of bytes returned from SQL to the client (network egress). +// - CPU usage on the tenant's SQL pods. // +// - Writes to external storage services such as S3. +// +// - Count of bytes returned from SQL to the client (network egress). type Config struct { // KVReadBatch is the baseline cost of a batch of KV reads. KVReadBatch RU diff --git a/pkg/obs/event_exporter.go b/pkg/obs/event_exporter.go index 99541e672851..53d001ecfbac 100644 --- a/pkg/obs/event_exporter.go +++ b/pkg/obs/event_exporter.go @@ -121,9 +121,10 @@ var _ obspb.ObsServer = &EventsServer{} // // |msg|msg|msg|msg|msg|msg|msg|msg|msg| // └----------------------^--------------┘ -// triggerSize maxBufferSize -// └--------------┘ -// sized-based flush is triggered when size falls in this range +// +// triggerSize maxBufferSize +// └--------------┘ +// sized-based flush is triggered when size falls in this range // // maxBufferSize should also be set such that it makes sense in relationship // with the flush latency: only one flush is ever in flight at a time, so the diff --git a/pkg/obsservice/obslib/migrations/migrations.go b/pkg/obsservice/obslib/migrations/migrations.go index 6e95f514b66d..9fe54bb21908 100644 --- a/pkg/obsservice/obslib/migrations/migrations.go +++ b/pkg/obsservice/obslib/migrations/migrations.go @@ -21,6 +21,7 @@ import ( // sqlMigrations embeds all the .sql file containing migrations to be run by // Goose. +// //go:embed sqlmigrations/*.sql var sqlMigrations embed.FS diff --git a/pkg/roachpb/data.go b/pkg/roachpb/data.go index caaa2f048f73..a287fe22cb84 100644 --- a/pkg/roachpb/data.go +++ b/pkg/roachpb/data.go @@ -211,10 +211,13 @@ func (k Key) String() string { // // Args: // valDirs: The direction for the key's components, generally needed for correct -// decoding. If nil, the values are pretty-printed with default encoding -// direction. +// +// decoding. If nil, the values are pretty-printed with default encoding +// direction. +// // maxLen: If not 0, only the first maxLen chars from the decoded key are -// returned, plus a "..." suffix. +// +// returned, plus a "..." suffix. func (k Key) StringWithDirs(valDirs []encoding.Direction, maxLen int) string { var s string if PrettyPrintKey != nil { @@ -1340,28 +1343,29 @@ func (t *Transaction) GetObservedTimestamp(nodeID NodeID) (hlc.ClockTimestamp, b // // Additionally, the caller must ensure: // -// 1) if the new range overlaps with some range in the list, then it -// also overlaps with every subsequent range in the list. +// 1. if the new range overlaps with some range in the list, then it +// also overlaps with every subsequent range in the list. // -// 2) the new range's "end" seqnum is larger or equal to the "end" -// seqnum of the last element in the list. +// 2. the new range's "end" seqnum is larger or equal to the "end" +// seqnum of the last element in the list. // // For example: -// current list [3 5] [10 20] [22 24] -// new item: [8 26] -// final list: [3 5] [8 26] // -// current list [3 5] [10 20] [22 24] -// new item: [28 32] -// final list: [3 5] [10 20] [22 24] [28 32] +// current list [3 5] [10 20] [22 24] +// new item: [8 26] +// final list: [3 5] [8 26] +// +// current list [3 5] [10 20] [22 24] +// new item: [28 32] +// final list: [3 5] [10 20] [22 24] [28 32] // // This corresponds to savepoints semantics: // -// - Property 1 says that a rollback to an earlier savepoint -// rolls back over all writes following that savepoint. -// - Property 2 comes from that the new range's 'end' seqnum is the -// current write seqnum and thus larger than or equal to every -// previously seen value. +// - Property 1 says that a rollback to an earlier savepoint +// rolls back over all writes following that savepoint. +// - Property 2 comes from that the new range's 'end' seqnum is the +// current write seqnum and thus larger than or equal to every +// previously seen value. func (t *Transaction) AddIgnoredSeqNumRange(newRange enginepb.IgnoredSeqNumRange) { // Truncate the list at the last element not included in the new range. @@ -2101,11 +2105,11 @@ func (s Span) Equal(o Span) bool { } // Overlaps returns true WLOG for span A and B iff: -// 1. Both spans contain one key (just the start key) and they are equal; or -// 2. The span with only one key is contained inside the other span; or -// 3. The end key of span A is strictly greater than the start key of span B -// and the end key of span B is strictly greater than the start key of span -// A. +// 1. Both spans contain one key (just the start key) and they are equal; or +// 2. The span with only one key is contained inside the other span; or +// 3. The end key of span A is strictly greater than the start key of span B +// and the end key of span B is strictly greater than the start key of span +// A. func (s Span) Overlaps(o Span) bool { if !s.Valid() || !o.Valid() { return false diff --git a/pkg/roachpb/data_test.go b/pkg/roachpb/data_test.go index 285489ba6d8e..0c553623bf88 100644 --- a/pkg/roachpb/data_test.go +++ b/pkg/roachpb/data_test.go @@ -774,14 +774,14 @@ func TestTransactionRefresh(t *testing.T) { // with the former and contains a subset of its protos. // // Assertions: -// 1. Transaction->TransactionRecord->Transaction is lossless for the fields -// in TransactionRecord. It drops all other fields. -// 2. TransactionRecord->Transaction->TransactionRecord is lossless. -// Fields not in TransactionRecord are set as zero values. -// 3. Transaction messages can be decoded as TransactionRecord messages. -// Fields not in TransactionRecord are dropped. -// 4. TransactionRecord messages can be decoded as Transaction messages. -// Fields not in TransactionRecord are decoded as zero values. +// 1. Transaction->TransactionRecord->Transaction is lossless for the fields +// in TransactionRecord. It drops all other fields. +// 2. TransactionRecord->Transaction->TransactionRecord is lossless. +// Fields not in TransactionRecord are set as zero values. +// 3. Transaction messages can be decoded as TransactionRecord messages. +// Fields not in TransactionRecord are dropped. +// 4. TransactionRecord messages can be decoded as Transaction messages. +// Fields not in TransactionRecord are decoded as zero values. func TestTransactionRecordRoundtrips(t *testing.T) { // Verify that converting from a Transaction to a TransactionRecord // strips out fields but is lossless for the desired fields. diff --git a/pkg/roachpb/errors.go b/pkg/roachpb/errors.go index 04e3a4329502..490028fa0f54 100644 --- a/pkg/roachpb/errors.go +++ b/pkg/roachpb/errors.go @@ -230,6 +230,7 @@ type ErrorDetailType int // This lists all ErrorDetail types. The numeric values in this list are used to // identify corresponding timeseries. The values correspond to the proto oneof // values. +// //go:generate stringer -type=ErrorDetailType const ( NotLeaseHolderErrType ErrorDetailType = 1 diff --git a/pkg/roachpb/gen/main.go b/pkg/roachpb/gen/main.go index 8380e410580b..17b4143d4246 100644 --- a/pkg/roachpb/gen/main.go +++ b/pkg/roachpb/gen/main.go @@ -98,12 +98,11 @@ func initVariants(ins *inspector.Inspector) { // The code in question looks like the below snippet, where we would pull // "ErrorDetail_NotLeaseholder" one of the returned strings. // -// // XXX_OneofWrappers is for the internal use of the proto package. -// func (*ErrorDetail) XXX_OneofWrappers() []interface{} { -// return []interface{}{ -// (*ErrorDetail_NotLeaseHolder)(nil), -// ... -// +// // XXX_OneofWrappers is for the internal use of the proto package. +// func (*ErrorDetail) XXX_OneofWrappers() []interface{} { +// return []interface{}{ +// (*ErrorDetail_NotLeaseHolder)(nil), +// ... func findVariantTypes(ins *inspector.Inspector, oneofName string) []string { var variants []string var inFunc bool @@ -149,10 +148,9 @@ func findVariantTypes(ins *inspector.Inspector, oneofName string) []string { // The code in question looks like the below snippet, where we would return // ("NotLeaseHolder", "NotLeaseHolderError"). // -// type ErrorDetail_NotLeaseHolder struct { -// NotLeaseHolder *NotLeaseHolderError -// } -// +// type ErrorDetail_NotLeaseHolder struct { +// NotLeaseHolder *NotLeaseHolderError +// } func findVariantField(ins *inspector.Inspector, vType string) (fieldName, msgName string) { ins.Preorder([]ast.Node{ (*ast.TypeSpec)(nil), diff --git a/pkg/roachpb/metadata_replicas.go b/pkg/roachpb/metadata_replicas.go index f058dcb849f5..72a07d089c4a 100644 --- a/pkg/roachpb/metadata_replicas.go +++ b/pkg/roachpb/metadata_replicas.go @@ -144,16 +144,16 @@ func (d ReplicaSet) containsVoterIncoming() bool { // For simplicity, CockroachDB treats learner replicas the same as voter // replicas as much as possible, but there are a few exceptions: // -// - Learner replicas are not considered when calculating quorum size, and thus -// do not affect the computation of which ranges are under-replicated for -// upreplication/alerting/debug/etc purposes. Ditto for over-replicated. -// - Learner replicas cannot become raft leaders, so we also don't allow them to -// become leaseholders. As a result, DistSender and the various oracles don't -// try to send them traffic. -// - The raft snapshot queue tries to avoid sending snapshots to ephemeral -// learners (but not to non-voting replicas, which are also etcd learners) for -// reasons described below. -// - Merges won't run while a learner replica is present. +// - Learner replicas are not considered when calculating quorum size, and thus +// do not affect the computation of which ranges are under-replicated for +// upreplication/alerting/debug/etc purposes. Ditto for over-replicated. +// - Learner replicas cannot become raft leaders, so we also don't allow them to +// become leaseholders. As a result, DistSender and the various oracles don't +// try to send them traffic. +// - The raft snapshot queue tries to avoid sending snapshots to ephemeral +// learners (but not to non-voting replicas, which are also etcd learners) for +// reasons described below. +// - Merges won't run while a learner replica is present. // // Replicas are now added in two ConfChange transactions. The first creates the // learner and the second promotes it to a voter. If the node that is diff --git a/pkg/roachpb/span_config.go b/pkg/roachpb/span_config.go index 40c0be7ee2ef..02c621ea8ba1 100644 --- a/pkg/roachpb/span_config.go +++ b/pkg/roachpb/span_config.go @@ -98,7 +98,8 @@ func (s *SpanConfig) ValidateSystemTargetSpanConfig() error { // GetNumVoters returns the number of voting replicas as defined in the // span config. // TODO(arul): We can get rid of this now that we're correctly populating -// numVoters when going from ZoneConfigs -> SpanConfigs. +// +// numVoters when going from ZoneConfigs -> SpanConfigs. func (s *SpanConfig) GetNumVoters() int32 { if s.NumVoters != 0 { return s.NumVoters diff --git a/pkg/roachprod/install/cluster_synced.go b/pkg/roachprod/install/cluster_synced.go index 7e23d0f1bb08..df0bac7e9590 100644 --- a/pkg/roachprod/install/cluster_synced.go +++ b/pkg/roachprod/install/cluster_synced.go @@ -126,10 +126,10 @@ func (c *SyncedCluster) localVMDir(n Node) string { // TargetNodes is the fully expanded, ordered list of nodes that any given // roachprod command is intending to target. // -// $ roachprod create local -n 4 -// $ roachprod start local # [1, 2, 3, 4] -// $ roachprod start local:2-4 # [2, 3, 4] -// $ roachprod start local:2,1,4 # [1, 2, 4] +// $ roachprod create local -n 4 +// $ roachprod start local # [1, 2, 3, 4] +// $ roachprod start local:2-4 # [2, 3, 4] +// $ roachprod start local:2,1,4 # [1, 2, 4] func (c *SyncedCluster) TargetNodes() Nodes { return append(Nodes{}, c.Nodes...) } @@ -170,25 +170,25 @@ func (c *SyncedCluster) GetInternalIP(ctx context.Context, n Node) (string, erro // correct process, when monitoring or stopping. // // Normally, the value is of the form: -// [/][/tag] // -// Examples: +// [/][/tag] // -// - non-local cluster without tags: -// ROACHPROD=1 +// Examples: // -// - non-local cluster with tag foo: -// ROACHPROD=1/foo +// - non-local cluster without tags: +// ROACHPROD=1 // -// - non-local cluster with hierarchical tag foo/bar: -// ROACHPROD=1/foo/bar +// - non-local cluster with tag foo: +// ROACHPROD=1/foo // -// - local cluster: -// ROACHPROD=local-foo/1 +// - non-local cluster with hierarchical tag foo/bar: +// ROACHPROD=1/foo/bar // -// - local cluster with tag bar: -// ROACHPROD=local-foo/1/bar +// - local cluster: +// ROACHPROD=local-foo/1 // +// - local cluster with tag bar: +// ROACHPROD=local-foo/1/bar func (c *SyncedCluster) roachprodEnvValue(node Node) string { var parts []string if c.IsLocal() { @@ -850,12 +850,12 @@ func (c *SyncedCluster) Wait(ctx context.Context, l *logger.Logger) error { // added to the hosts via the c.AuthorizedKeys field. It does so in the following // steps: // -// 1. Creates an ssh key pair on the first host to be used on all hosts if -// none exists. -// 2. Distributes the public key, private key, and authorized_keys file from -// the first host to the others. -// 3. Merges the data in c.AuthorizedKeys with the existing authorized_keys -// files on all hosts. +// 1. Creates an ssh key pair on the first host to be used on all hosts if +// none exists. +// 2. Distributes the public key, private key, and authorized_keys file from +// the first host to the others. +// 3. Merges the data in c.AuthorizedKeys with the existing authorized_keys +// files on all hosts. // // This call strives to be idempotent. func (c *SyncedCluster) SetupSSH(ctx context.Context, l *logger.Logger) error { @@ -1686,9 +1686,9 @@ func (c *SyncedCluster) Put( // For example, if dest is "tpcc-test.logs" then the logs for each node will be // stored like: // -// tpcc-test.logs/1.logs/... -// tpcc-test.logs/2.logs/... -// ... +// tpcc-test.logs/1.logs/... +// tpcc-test.logs/2.logs/... +// ... // // Log file syncing uses rsync which attempts to be efficient when deciding // which files to update. The logs are merged by calling @@ -2221,7 +2221,7 @@ func (c *SyncedCluster) ParallelE( var writer ui.Writer out := l.Stdout if display == "" { - out = ioutil.Discard + out = io.Discard } var ticker *time.Ticker diff --git a/pkg/roachprod/install/nodes.go b/pkg/roachprod/install/nodes.go index ca75cd8e13eb..f1372dd5af69 100644 --- a/pkg/roachprod/install/nodes.go +++ b/pkg/roachprod/install/nodes.go @@ -31,12 +31,11 @@ type Nodes []Node // node ranges. Nodes are 1-indexed. // // Examples: -// - "all" -// - "1" -// - "1-3" -// - "1,3,5" -// - "1,2-4,7-8" -// +// - "all" +// - "1" +// - "1-3" +// - "1,3,5" +// - "1,2-4,7-8" func ListNodes(s string, numNodesInCluster int) (Nodes, error) { if s == "" { return nil, errors.AssertionFailedf("empty node selector") diff --git a/pkg/roachprod/logger/log.go b/pkg/roachprod/logger/log.go index f35e66fbb2f1..509cb82a882a 100644 --- a/pkg/roachprod/logger/log.go +++ b/pkg/roachprod/logger/log.go @@ -14,7 +14,6 @@ import ( "context" "fmt" "io" - "io/ioutil" "log" "os" "path/filepath" @@ -51,14 +50,14 @@ type quietStdoutOption struct { } func (quietStdoutOption) apply(cfg *Config) { - cfg.Stdout = ioutil.Discard + cfg.Stdout = io.Discard } type quietStderrOption struct { } func (quietStderrOption) apply(cfg *Config) { - cfg.Stderr = ioutil.Discard + cfg.Stderr = io.Discard } // QuietStdout is a logger option that suppresses Stdout. diff --git a/pkg/roachprod/prometheus/prometheus.go b/pkg/roachprod/prometheus/prometheus.go index 80483221c4ab..adc994e51f72 100644 --- a/pkg/roachprod/prometheus/prometheus.go +++ b/pkg/roachprod/prometheus/prometheus.go @@ -81,7 +81,7 @@ type GrafanaConfig struct { // WithWorkload sets up a scraping config for a single `workload` running on the // given node and port. If the workload is in the config, the node and port will be // added to the workload's scrape config (i.e. allows for chaining). If port == 0, -//defaultWorkloadPort is used. +// defaultWorkloadPort is used. func (cfg *Config) WithWorkload(workloadName string, nodes install.Node, port int) *Config { // Find the workload's scrapeConfig, if it exists. diff --git a/pkg/roachprod/vm/aws/config.go b/pkg/roachprod/vm/aws/config.go index 4db6b71066e9..20445f11d15b 100644 --- a/pkg/roachprod/vm/aws/config.go +++ b/pkg/roachprod/vm/aws/config.go @@ -33,23 +33,23 @@ import ( // The struct is constructed by deserializing json that follows the form of // the below example. // -// { -// "regions": { -// "sensitive": false, -// "type": "list", -// "value": [ -// { -// "ami_id": "ami-48630c2e", -// "region": "ap-northeast-1", -// "security_group": "sg-0006e480d77a10104", -// "subnets": { -// "ap-northeast-1a": "subnet-0d144db3c9e47edf5", -// "ap-northeast-1c": "subnet-02fcaaa6212fc3c1a", -// "ap-northeast-1d": "subnet-0e9006ef8b3bef61f" -// } -// } -// ] -// } +// { +// "regions": { +// "sensitive": false, +// "type": "list", +// "value": [ +// { +// "ami_id": "ami-48630c2e", +// "region": "ap-northeast-1", +// "security_group": "sg-0006e480d77a10104", +// "subnets": { +// "ap-northeast-1a": "subnet-0d144db3c9e47edf5", +// "ap-northeast-1c": "subnet-02fcaaa6212fc3c1a", +// "ap-northeast-1d": "subnet-0e9006ef8b3bef61f" +// } +// } +// ] +// } // // It has this awkward structure to deal with the terraform serialization // of lists. Ideally terraform would output an artifact whose structure mirrors diff --git a/pkg/roachprod/vm/azure/doc.go b/pkg/roachprod/vm/azure/doc.go index 7ec56825f3f1..a0043ec348cf 100644 --- a/pkg/roachprod/vm/azure/doc.go +++ b/pkg/roachprod/vm/azure/doc.go @@ -29,17 +29,17 @@ // // The following resources are created for each cluster: // -// Roachprod "commons" -// | Resource Group (one per Location / Region) -// | VNet (10./16) -// | Subnet (10./18 range) -// -// Per cluster -// | Resource Group (one per Location / Region) -// | []IPAddress (public IP address for each VM) -// | []NIC (bound to IPAddress and to a common Subnet) -// | []VM (bound to a NIC) -// | OSDisk (100GB, standard SSD storage) +// Roachprod "commons" +// | Resource Group (one per Location / Region) +// | VNet (10./16) +// | Subnet (10./18 range) +// +// Per cluster +// | Resource Group (one per Location / Region) +// | []IPAddress (public IP address for each VM) +// | []NIC (bound to IPAddress and to a common Subnet) +// | []VM (bound to a NIC) +// | OSDisk (100GB, standard SSD storage) // // Roachprod creates a "common" resource group, VNet, and Subnet for // each location that clusters may be deployed into. Each NIC that is diff --git a/pkg/roachprod/vm/azure/utils.go b/pkg/roachprod/vm/azure/utils.go index 6953d6de908e..cf83dbff95ce 100644 --- a/pkg/roachprod/vm/azure/utils.go +++ b/pkg/roachprod/vm/azure/utils.go @@ -108,10 +108,10 @@ touch /mnt/data1/.roachprod-initialized // CTRL-c while roachprod waiting for initialization to complete (otherwise, roachprod // tries to destroy partially created cluster). // Then, ssh to one of the machines: -// 1. /var/log/cloud-init-output.log contains the output of all the steps -// performed by cloud-init, including the steps performed by above script. -// 2. You can extract uploaded script and try executing/debugging it via: -// sudo cloud-init query userdata > script.sh +// 1. /var/log/cloud-init-output.log contains the output of all the steps +// performed by cloud-init, including the steps performed by above script. +// 2. You can extract uploaded script and try executing/debugging it via: +// sudo cloud-init query userdata > script.sh func evalStartupTemplate(args azureStartupArgs) (string, error) { cloudInit := bytes.NewBuffer(nil) encoder := base64.NewEncoder(base64.StdEncoding, cloudInit) diff --git a/pkg/roachprod/vm/local/local.go b/pkg/roachprod/vm/local/local.go index c7fc1124c5a3..4f883112fd1e 100644 --- a/pkg/roachprod/vm/local/local.go +++ b/pkg/roachprod/vm/local/local.go @@ -34,10 +34,12 @@ const ProviderName = config.Local // Node indexes start at 1. // // If the cluster name is "local", node 1 directory is: -// ${HOME}/local/1 +// +// ${HOME}/local/1 // // If the cluster name is "local-foo", node 1 directory is: -// ${HOME}/local/foo-1 +// +// ${HOME}/local/foo-1 // // WARNING: when we destroy a local cluster, we remove these directories so it's // important that this function never returns things like "" or "/". diff --git a/pkg/roachprod/vm/vm.go b/pkg/roachprod/vm/vm.go index 97306d4a6461..8aa4f00713d2 100644 --- a/pkg/roachprod/vm/vm.go +++ b/pkg/roachprod/vm/vm.go @@ -407,8 +407,7 @@ func ProvidersSequential(named []string, action func(Provider) error) error { // // For example: // -// ZonePlacement(3, 8) = []int{0, 0, 1, 1, 2, 2, 0, 1} -// +// ZonePlacement(3, 8) = []int{0, 0, 1, 1, 2, 2, 0, 1} func ZonePlacement(numZones, numNodes int) (nodeZones []int) { if numZones < 1 { panic("expected 1 or more zones") diff --git a/pkg/security/certificate_manager.go b/pkg/security/certificate_manager.go index b2f5397650f8..bb94735533a4 100644 --- a/pkg/security/certificate_manager.go +++ b/pkg/security/certificate_manager.go @@ -88,17 +88,17 @@ var ( // no fallback if invalid certs/keys are present. // // The nomenclature for certificates is as follows, all within the certs-dir. -// - ca.crt main CA certificate. -// Used to verify everything unless overridden by more specific CAs. -// - ca-client.crt CA certificate to verify client certificates. If it does not exist, -// fall back on 'ca.crt'. -// - node.crt node certificate. -// Server-side certificate (always) and client-side certificate unless -// client.node.crt is found. -// Verified using 'ca.crt'. -// - client..crt client certificate for 'user'. Verified using 'ca.crt', or 'ca-client.crt'. -// - client.node.crt client certificate for the 'node' user. If it does not exist, -// fall back on 'node.crt'. +// - ca.crt main CA certificate. +// Used to verify everything unless overridden by more specific CAs. +// - ca-client.crt CA certificate to verify client certificates. If it does not exist, +// fall back on 'ca.crt'. +// - node.crt node certificate. +// Server-side certificate (always) and client-side certificate unless +// client.node.crt is found. +// Verified using 'ca.crt'. +// - client..crt client certificate for 'user'. Verified using 'ca.crt', or 'ca-client.crt'. +// - client.node.crt client certificate for the 'node' user. If it does not exist, +// fall back on 'node.crt'. type CertificateManager struct { tenantIdentifier uint64 certnames.Locator diff --git a/pkg/security/password/password.go b/pkg/security/password/password.go index 76f1c20a2a45..aecdfca12f50 100644 --- a/pkg/security/password/password.go +++ b/pkg/security/password/password.go @@ -559,14 +559,14 @@ func isMD5Hash(hashedPassword []byte) bool { // password is already hashed, and if already hashed, verifies whether // the hash is recognized as a valid hash. // Return values: -// - isPreHashed indicates whether the password is already hashed. -// - supportedScheme indicates whether the scheme is currently supported -// for authentication. If false, issueNum indicates which github -// issue to report in the error message. -// - schemeName is the name of the hashing scheme, for inclusion -// in error messages (no guarantee is made of stability of this string). -// - hashedPassword is a translated version from the input, -// suitable for storage in the password database. +// - isPreHashed indicates whether the password is already hashed. +// - supportedScheme indicates whether the scheme is currently supported +// for authentication. If false, issueNum indicates which github +// issue to report in the error message. +// - schemeName is the name of the hashing scheme, for inclusion +// in error messages (no guarantee is made of stability of this string). +// - hashedPassword is a translated version from the input, +// suitable for storage in the password database. func CheckPasswordHashValidity( ctx context.Context, inputPassword []byte, ) ( @@ -604,16 +604,16 @@ func CheckPasswordHashValidity( // the mapping gives SCRAM authn latency of ~60ms too. // // The actual values were computed as follows: -// 1. measure the bcrypt authentication cost for costs 1-19. -// 2. assuming the bcrypt latency is a_bcrypt*2^c + b_bcrypt, where c -// is the bcrypt cost, use statistical regression to derive -// a_bcrypt and b_bcrypt. (we found b_bcrypt to be negligible.) -// 3. measure the SCRAM authn cost for iter counts 4096-1000000, -// *on the same hardware*. -// 4. assuming the SCRAM latency is a_scram*c + b_scram, -// where c is the SCRAM iter count, use stat regression -// to derive a_scram and b_scram. (we found b_scram to be negligible). -// 5. for each bcrypt cost, compute scram iter count = a_bcrypt * 2^cost_bcrypt / a_scram. +// 1. measure the bcrypt authentication cost for costs 1-19. +// 2. assuming the bcrypt latency is a_bcrypt*2^c + b_bcrypt, where c +// is the bcrypt cost, use statistical regression to derive +// a_bcrypt and b_bcrypt. (we found b_bcrypt to be negligible.) +// 3. measure the SCRAM authn cost for iter counts 4096-1000000, +// *on the same hardware*. +// 4. assuming the SCRAM latency is a_scram*c + b_scram, +// where c is the SCRAM iter count, use stat regression +// to derive a_scram and b_scram. (we found b_scram to be negligible). +// 5. for each bcrypt cost, compute scram iter count = a_bcrypt * 2^cost_bcrypt / a_scram. // // The speed of the CPU used for the measurements is equally // represented in a_bcrypt and a_scram, so the formula eliminates any diff --git a/pkg/security/username/username.go b/pkg/security/username/username.go index 01d4d80e55a6..7f5e258bf39c 100644 --- a/pkg/security/username/username.go +++ b/pkg/security/username/username.go @@ -49,16 +49,15 @@ import ( // considered pre-normalized and can be used directly for comparisons, // lookup etc. // -// - The constructor MakeSQLUsernameFromUserInput() creates -// a username from "external input". +// - The constructor MakeSQLUsernameFromUserInput() creates +// a username from "external input". // -// - The constructor MakeSQLUsernameFromPreNormalizedString() -// creates a username when the caller can guarantee that -// the input is already pre-normalized. +// - The constructor MakeSQLUsernameFromPreNormalizedString() +// creates a username when the caller can guarantee that +// the input is already pre-normalized. // // For convenience, the SQLIdentifier() method also represents a // username in the form suitable for input back by the SQL parser. -// type SQLUsername struct { u string } diff --git a/pkg/server/admin.go b/pkg/server/admin.go index fc3f3a7bb4c2..e1fc676ed871 100644 --- a/pkg/server/admin.go +++ b/pkg/server/admin.go @@ -3117,13 +3117,13 @@ func (q *sqlQuery) QueryArguments() []interface{} { // // For example, suppose we have the following calls: // -// query.Append("SELECT * FROM foo WHERE a > $ AND a < $ ", arg1, arg2) -// query.Append("LIMIT $", limit) +// query.Append("SELECT * FROM foo WHERE a > $ AND a < $ ", arg1, arg2) +// query.Append("LIMIT $", limit) // // The query is rewritten into: // -// SELECT * FROM foo WHERE a > $1 AND a < $2 LIMIT $3 -// /* $1 = arg1, $2 = arg2, $3 = limit */ +// SELECT * FROM foo WHERE a > $1 AND a < $2 LIMIT $3 +// /* $1 = arg1, $2 = arg2, $3 = limit */ // // Note that this method does NOT return any errors. Instead, we queue up // errors, which can later be accessed. Returning an error here would make diff --git a/pkg/server/api_v2.go b/pkg/server/api_v2.go index 6e00d3d5360a..c035c95f46ad 100644 --- a/pkg/server/api_v2.go +++ b/pkg/server/api_v2.go @@ -8,29 +8,30 @@ // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. -//go:generate swagger generate spec -w . -o ../../docs/generated/swagger/spec.json --scan-models +// TODO(ricky): re-enable. +//-go:generate swagger generate spec -w . -o ../../docs/generated/swagger/spec.json --scan-models // CockroachDB v2 API // // API for querying information about CockroachDB health, nodes, ranges, // sessions, and other meta entities. // -// Schemes: http, https -// Host: localhost -// BasePath: /api/v2/ -// Version: 2.0.0 -// License: Business Source License +// Schemes: http, https +// Host: localhost +// BasePath: /api/v2/ +// Version: 2.0.0 +// License: Business Source License // -// Produces: -// - application/json +// Produces: +// - application/json // -// SecurityDefinitions: -// api_session: -// type: apiKey -// name: X-Cockroach-API-Session -// description: Handle to logged-in REST session. Use `/login/` to -// log in and get a session. -// in: header +// SecurityDefinitions: +// api_session: +// type: apiKey +// name: X-Cockroach-API-Session +// description: Handle to logged-in REST session. Use `/login/` to +// log in and get a session. +// in: header // // swagger:meta package server @@ -214,7 +215,7 @@ type listSessionsResponse struct { // swagger:operation GET /sessions/ listSessions // -// List sessions +// # List sessions // // List all sessions on this cluster. If a username is provided, only // sessions from that user are returned. @@ -223,37 +224,39 @@ type listSessionsResponse struct { // // --- // parameters: -// - name: username -// type: string -// in: query -// description: Username of user to return sessions for; if unspecified, +// - name: username +// type: string +// in: query +// description: Username of user to return sessions for; if unspecified, // sessions from all users are returned. -// required: false -// - name: exclude_closed_sessions -// type: bool -// in: query -// description: Boolean to exclude closed sessions; if unspecified, defaults +// required: false +// - name: exclude_closed_sessions +// type: bool +// in: query +// description: Boolean to exclude closed sessions; if unspecified, defaults // to false and closed sessions are included in the response. -// required: false -// - name: limit -// type: integer -// in: query -// description: Maximum number of results to return in this call. -// required: false -// - name: start -// type: string -// in: query -// description: Continuation token for results after a past limited run. -// required: false +// required: false +// - name: limit +// type: integer +// in: query +// description: Maximum number of results to return in this call. +// required: false +// - name: start +// type: string +// in: query +// description: Continuation token for results after a past limited run. +// required: false +// // produces: // - application/json // security: // - api_session: [] // responses: -// "200": -// description: List sessions response. -// schema: -// "$ref": "#/definitions/listSessionsResp" +// +// "200": +// description: List sessions response. +// schema: +// "$ref": "#/definitions/listSessionsResp" func (a *apiV2Server) listSessions(w http.ResponseWriter, r *http.Request) { ctx := r.Context() limit, start := getRPCPaginationValues(r) @@ -281,7 +284,7 @@ func (a *apiV2Server) listSessions(w http.ResponseWriter, r *http.Request) { // swagger:operation GET /health/ health // -// Check node health +// # Check node health // // Helper endpoint to check for node health. If `ready` is true, it also checks // if this node is fully operational and ready to accept SQL connections. @@ -290,20 +293,22 @@ func (a *apiV2Server) listSessions(w http.ResponseWriter, r *http.Request) { // // --- // parameters: -// - name: ready -// type: boolean -// in: query -// description: If true, check whether this node is ready to accept SQL +// - name: ready +// type: boolean +// in: query +// description: If true, check whether this node is ready to accept SQL // connections. If false, this endpoint always returns success, unless // the API server itself is down. -// required: false +// required: false +// // produces: // - application/json // responses: -// "200": -// description: Indicates healthy node. -// "500": -// description: Indicates unhealthy node. +// +// "200": +// description: Indicates healthy node. +// "500": +// description: Indicates unhealthy node. func (a *apiV2Server) health(w http.ResponseWriter, r *http.Request) { ready := false readyStr := r.URL.Query().Get("ready") @@ -333,7 +338,7 @@ func (a *apiV2Server) health(w http.ResponseWriter, r *http.Request) { // swagger:operation GET /rules/ rules // -// Get metric recording and alerting rule templates +// # Get metric recording and alerting rule templates // // Endpoint to export recommended metric recording and alerting rules. // These rules are intended to be used as a guideline for aggregating @@ -346,10 +351,11 @@ func (a *apiV2Server) health(w http.ResponseWriter, r *http.Request) { // produces: // - text/plain // responses: -// "200": -// description: Recording and Alert Rules -// schema: -// "$ref": "#/definitions/PrometheusRuleGroup" +// +// "200": +// description: Recording and Alert Rules +// schema: +// "$ref": "#/definitions/PrometheusRuleGroup" func (a *apiV2Server) listRules(w http.ResponseWriter, r *http.Request) { a.promRuleExporter.ScrapeRegistry(r.Context()) response, err := a.promRuleExporter.PrintAsYAML() diff --git a/pkg/server/api_v2_auth.go b/pkg/server/api_v2_auth.go index 4995a792c65f..2d5e52b715ed 100644 --- a/pkg/server/api_v2_auth.go +++ b/pkg/server/api_v2_auth.go @@ -106,43 +106,45 @@ type loginResponse struct { // swagger:operation POST /login/ login // -// API Login +// # API Login // // Creates an API session for use with API endpoints that require // authentication. // // --- // parameters: -// - name: credentials -// schema: +// - name: credentials +// schema: // type: object // properties: -// username: -// type: string -// password: -// type: string +// username: +// type: string +// password: +// type: string // required: -// - username -// - password -// in: body -// description: Credentials for login -// required: true +// - username +// - password +// in: body +// description: Credentials for login +// required: true +// // produces: // - application/json // - text/plain // consumes: // - application/x-www-form-urlencoded // responses: -// "200": -// description: Login response. -// schema: -// "$ref": "#/definitions/loginResponse" -// "400": -// description: Bad request, if required parameters absent. -// type: string -// "401": -// description: Unauthorized, if credentials don't match. -// type: string +// +// "200": +// description: Login response. +// schema: +// "$ref": "#/definitions/loginResponse" +// "400": +// description: Bad request, if required parameters absent. +// type: string +// "401": +// description: Unauthorized, if credentials don't match. +// type: string func (a *authenticationV2Server) login(w http.ResponseWriter, r *http.Request) { if r.Method != "POST" { http.Error(w, "not found", http.StatusNotFound) @@ -195,7 +197,7 @@ type logoutResponse struct { // swagger:operation POST /logout/ logout // -// API Logout +// # API Logout // // Logs out on a previously-created API session. // @@ -206,14 +208,15 @@ type logoutResponse struct { // security: // - api_session: [] // responses: -// "200": -// description: Logout response. -// schema: -// "$ref": "#/definitions/logoutResponse" -// "400": -// description: Bad request, if API session not present in headers, or -// invalid session. -// type: string +// +// "200": +// description: Logout response. +// schema: +// "$ref": "#/definitions/logoutResponse" +// "400": +// description: Bad request, if API session not present in headers, or +// invalid session. +// type: string func (a *authenticationV2Server) logout(w http.ResponseWriter, r *http.Request) { if r.Method != "POST" { http.Error(w, "not found", http.StatusNotFound) diff --git a/pkg/server/api_v2_ranges.go b/pkg/server/api_v2_ranges.go index ec20d66660c7..30cfacdbcb1a 100644 --- a/pkg/server/api_v2_ranges.go +++ b/pkg/server/api_v2_ranges.go @@ -74,7 +74,7 @@ type nodesResponse struct { // swagger:operation GET /nodes/ listNodes // -// List nodes +// # List nodes // // List all nodes on this cluster. // @@ -82,25 +82,27 @@ type nodesResponse struct { // // --- // parameters: -// - name: limit -// type: integer -// in: query -// description: Maximum number of results to return in this call. -// required: false -// - name: offset -// type: integer -// in: query -// description: Continuation offset for results after a past limited run. -// required: false +// - name: limit +// type: integer +// in: query +// description: Maximum number of results to return in this call. +// required: false +// - name: offset +// type: integer +// in: query +// description: Continuation offset for results after a past limited run. +// required: false +// // produces: // - application/json // security: // - api_session: [] // responses: -// "200": -// description: List nodes response. -// schema: -// "$ref": "#/definitions/nodesResponse" +// +// "200": +// description: List nodes response. +// schema: +// "$ref": "#/definitions/nodesResponse" func (a *apiV2Server) listNodes(w http.ResponseWriter, r *http.Request) { ctx := r.Context() limit, offset := getSimplePaginationValues(r) @@ -164,7 +166,7 @@ type rangeResponse struct { // swagger:operation GET /ranges/{range_id}/ listRange // -// Get info about a range +// # Get info about a range // // Retrieves more information about a specific range. // @@ -172,19 +174,21 @@ type rangeResponse struct { // // --- // parameters: -// - name: range_id -// in: path -// type: integer -// required: true +// - name: range_id +// in: path +// type: integer +// required: true +// // produces: // - application/json // security: // - api_session: [] // responses: -// "200": -// description: List range response -// schema: -// "$ref": "#/definitions/rangeResponse" +// +// "200": +// description: List range response +// schema: +// "$ref": "#/definitions/rangeResponse" func (a *apiV2Server) listRange(w http.ResponseWriter, r *http.Request) { ctx := r.Context() ctx = apiToOutgoingGatewayCtx(ctx, r) @@ -325,7 +329,7 @@ type nodeRangesResponse struct { // swagger:operation GET /nodes/{node_id}/ranges/ listNodeRanges // -// List ranges on a node +// # List ranges on a node // // Lists information about ranges on a specified node. If a list of range IDs // is specified, only information about those ranges is returned. @@ -334,38 +338,40 @@ type nodeRangesResponse struct { // // --- // parameters: -// - name: node_id -// in: path -// type: integer -// description: ID of node to query, or `local` for local node. -// required: true -// - name: ranges -// in: query -// type: array -// required: false -// description: IDs of ranges to return information for. All ranges returned +// - name: node_id +// in: path +// type: integer +// description: ID of node to query, or `local` for local node. +// required: true +// - name: ranges +// in: query +// type: array +// required: false +// description: IDs of ranges to return information for. All ranges returned // if unspecified. -// items: +// items: // type: integer -// - name: limit -// type: integer -// in: query -// description: Maximum number of results to return in this call. -// required: false -// - name: offset -// type: integer -// in: query -// description: Continuation offset for results after a past limited run. -// required: false +// - name: limit +// type: integer +// in: query +// description: Maximum number of results to return in this call. +// required: false +// - name: offset +// type: integer +// in: query +// description: Continuation offset for results after a past limited run. +// required: false +// // produces: // - application/json // security: // - api_session: [] // responses: -// "200": -// description: Node ranges response. -// schema: -// "$ref": "#/definitions/nodeRangesResponse" +// +// "200": +// description: Node ranges response. +// schema: +// "$ref": "#/definitions/nodeRangesResponse" func (a *apiV2Server) listNodeRanges(w http.ResponseWriter, r *http.Request) { ctx := r.Context() ctx = apiToOutgoingGatewayCtx(ctx, r) @@ -440,7 +446,7 @@ type hotRangeInfo struct { // swagger:operation GET /ranges/hot/ listHotRanges // -// List hot ranges +// # List hot ranges // // Lists information about hot ranges. If a list of range IDs // is specified, only information about those ranges is returned. @@ -449,31 +455,33 @@ type hotRangeInfo struct { // // --- // parameters: -// - name: node_id -// in: query -// type: integer -// description: ID of node to query, or `local` for local node. If +// - name: node_id +// in: query +// type: integer +// description: ID of node to query, or `local` for local node. If // unspecified, all nodes are queried. -// required: false -// - name: limit -// type: integer -// in: query -// description: Maximum number of results to return in this call. -// required: false -// - name: start -// type: string -// in: query -// description: Continuation token for results after a past limited run. -// required: false +// required: false +// - name: limit +// type: integer +// in: query +// description: Maximum number of results to return in this call. +// required: false +// - name: start +// type: string +// in: query +// description: Continuation token for results after a past limited run. +// required: false +// // produces: // - application/json // security: // - api_session: [] // responses: -// "200": -// description: Hot ranges response. -// schema: -// "$ref": "#/definitions/hotRangesResponse" +// +// "200": +// description: Hot ranges response. +// schema: +// "$ref": "#/definitions/hotRangesResponse" func (a *apiV2Server) listHotRanges(w http.ResponseWriter, r *http.Request) { ctx := r.Context() ctx = apiToOutgoingGatewayCtx(ctx, r) diff --git a/pkg/server/api_v2_sql.go b/pkg/server/api_v2_sql.go index efd9047dd65b..7e57d076a9c6 100644 --- a/pkg/server/api_v2_sql.go +++ b/pkg/server/api_v2_sql.go @@ -39,7 +39,7 @@ var sqlAPIClock timeutil.TimeSource = timeutil.DefaultTimeSource{} // swagger:operation POST /sql/ execSQL // -// Execute one or more SQL statements +// # Execute one or more SQL statements // // Executes one or more SQL statements. // @@ -66,132 +66,134 @@ var sqlAPIClock timeutil.TimeSource = timeutil.DefaultTimeSource{} // consumes: // - application/json // parameters: -// - in: body -// name: request -// schema: +// - in: body +// name: request +// schema: // type: object // required: -// - statements +// - statements // properties: -// database: -// type: string -// description: The current database for the execution. Defaults to defaultdb. -// application_name: -// type: string -// description: The SQL application_name parameter. -// timeout: -// type: string -// description: Max time budget for the execution, using Go duration syntax. Default to 5 seconds. -// max_result_size: -// type: integer -// description: -// Max size in bytes for the execution field in the response. -// Execution stops with an error if the results do not fit. -// statements: -// description: The SQL statement(s) to run. -// type: array -// items: -// type: object -// required: -// - sql -// properties: -// sql: -// type: string -// description: SQL syntax for one statement. -// arguments: -// type: array -// description: Placeholder parameter values. +// database: +// type: string +// description: The current database for the execution. Defaults to defaultdb. +// application_name: +// type: string +// description: The SQL application_name parameter. +// timeout: +// type: string +// description: Max time budget for the execution, using Go duration syntax. Default to 5 seconds. +// max_result_size: +// type: integer +// description: +// Max size in bytes for the execution field in the response. +// Execution stops with an error if the results do not fit. +// statements: +// description: The SQL statement(s) to run. +// type: array +// items: +// type: object +// required: +// - sql +// properties: +// sql: +// type: string +// description: SQL syntax for one statement. +// arguments: +// type: array +// description: Placeholder parameter values. +// // produces: // - application/json // responses: -// '405': -// description: Bad method. Only the POST method is supported. -// '400': -// description: Bad request. Bad input encoding, missing SQL or invalid parameter. -// '500': -// description: Internal error encountered. -// '200': -// description: Query results and optional execution error. -// schema: -// type: object -// required: -// - num_statements -// - execution -// properties: -// num_statements: -// type: integer -// description: The number of statements in the input SQL. -// txn_error: -// type: object -// description: The details of the error, if an error was encountered. -// required: -// - message -// - code -// properties: -// code: -// type: string -// description: The SQLSTATE 5-character code of the error. -// message: -// type: string -// additionalProperties: {} -// execution: -// type: object -// required: -// - retries -// - txn_results -// properties: -// retries: -// type: integer -// description: The number of times the transaction was retried. -// txn_results: -// type: array -// description: The result sets, one per SQL statement. -// items: -// type: object -// required: -// - statement -// - tag -// - start -// - end -// properties: -// statement: -// type: integer -// description: The statement index in the SQL input. -// tag: -// type: string -// description: The short statement tag. -// start: -// type: string -// description: Start timestamp, encoded as RFC3339. -// end: -// type: string -// description: End timestamp, encoded as RFC3339. -// rows_affected: -// type: integer -// description: The number of rows affected. -// columns: -// type: array -// description: The list of columns in the result rows. -// items: -// type: object -// properties: -// name: -// type: string -// description: The column name. -// type: -// type: string -// description: The SQL type of the column. -// oid: -// type: integer -// description: The PostgreSQL OID for the column type. -// required: -// - name -// - type -// - oid -// rows: -// type: array -// description: The result rows. -// items: {} +// +// '405': +// description: Bad method. Only the POST method is supported. +// '400': +// description: Bad request. Bad input encoding, missing SQL or invalid parameter. +// '500': +// description: Internal error encountered. +// '200': +// description: Query results and optional execution error. +// schema: +// type: object +// required: +// - num_statements +// - execution +// properties: +// num_statements: +// type: integer +// description: The number of statements in the input SQL. +// txn_error: +// type: object +// description: The details of the error, if an error was encountered. +// required: +// - message +// - code +// properties: +// code: +// type: string +// description: The SQLSTATE 5-character code of the error. +// message: +// type: string +// additionalProperties: {} +// execution: +// type: object +// required: +// - retries +// - txn_results +// properties: +// retries: +// type: integer +// description: The number of times the transaction was retried. +// txn_results: +// type: array +// description: The result sets, one per SQL statement. +// items: +// type: object +// required: +// - statement +// - tag +// - start +// - end +// properties: +// statement: +// type: integer +// description: The statement index in the SQL input. +// tag: +// type: string +// description: The short statement tag. +// start: +// type: string +// description: Start timestamp, encoded as RFC3339. +// end: +// type: string +// description: End timestamp, encoded as RFC3339. +// rows_affected: +// type: integer +// description: The number of rows affected. +// columns: +// type: array +// description: The list of columns in the result rows. +// items: +// type: object +// properties: +// name: +// type: string +// description: The column name. +// type: +// type: string +// description: The SQL type of the column. +// oid: +// type: integer +// description: The PostgreSQL OID for the column type. +// required: +// - name +// - type +// - oid +// rows: +// type: array +// description: The result rows. +// items: {} func (a *apiV2Server) execSQL(w http.ResponseWriter, r *http.Request) { // Type for the request. type requestType struct { diff --git a/pkg/server/api_v2_sql_schema.go b/pkg/server/api_v2_sql_schema.go index 034454a346f0..1e0042a2587a 100644 --- a/pkg/server/api_v2_sql_schema.go +++ b/pkg/server/api_v2_sql_schema.go @@ -34,29 +34,31 @@ type usersResponse struct { // swagger:operation GET /users/ listUsers // -// List users +// # List users // // List SQL users on this cluster. // // --- // parameters: -// - name: limit -// type: integer -// in: query -// description: Maximum number of results to return in this call. -// required: false -// - name: offset -// type: integer -// in: query -// description: Continuation token for results after a past limited run. -// required: false +// - name: limit +// type: integer +// in: query +// description: Maximum number of results to return in this call. +// required: false +// - name: offset +// type: integer +// in: query +// description: Continuation token for results after a past limited run. +// required: false +// // produces: // - application/json // responses: -// "200": -// description: Users response -// schema: -// "$ref": "#/definitions/usersResponse" +// +// "200": +// description: Users response +// schema: +// "$ref": "#/definitions/usersResponse" func (a *apiV2Server) listUsers(w http.ResponseWriter, r *http.Request) { limit, offset := getSimplePaginationValues(r) ctx := r.Context() @@ -112,35 +114,38 @@ type eventsResponse struct { // swagger:operation GET /events/ listEvents // -// List events +// # List events // // Lists the latest event log entries, in descending order. // // --- // parameters: -// - name: type -// type: string -// in: query -// description: Type of events to filter for (e.g. "create_table"). Only one +// - name: type +// type: string +// in: query +// description: Type of events to filter for (e.g. "create_table"). Only one // event type can be specified at a time. -// required: false -// - name: limit -// type: integer -// in: query -// description: Maximum number of results to return in this call. -// required: false -// - name: offset -// type: integer -// in: query -// description: Continuation token for results after a past limited run. -// required: false + +// required: false +// - name: limit +// type: integer +// in: query +// description: Maximum number of results to return in this call. +// required: false +// - name: offset +// type: integer +// in: query +// description: Continuation token for results after a past limited run. +// required: false +// // produces: // - application/json // responses: -// "200": -// description: Events response -// schema: -// "$ref": "#/definitions/eventsResponse" +// +// "200": +// description: Events response +// schema: +// "$ref": "#/definitions/eventsResponse" func (a *apiV2Server) listEvents(w http.ResponseWriter, r *http.Request) { limit, offset := getSimplePaginationValues(r) ctx := r.Context() @@ -180,29 +185,31 @@ type databasesResponse struct { // swagger:operation GET /databases/ listDatabases // -// List databases +// # List databases // // Lists all databases on this cluster. // // --- // parameters: -// - name: limit -// type: integer -// in: query -// description: Maximum number of results to return in this call. -// required: false -// - name: offset -// type: integer -// in: query -// description: Continuation token for results after a past limited run. -// required: false +// - name: limit +// type: integer +// in: query +// description: Maximum number of results to return in this call. +// required: false +// - name: offset +// type: integer +// in: query +// description: Continuation token for results after a past limited run. +// required: false +// // produces: // - application/json // responses: -// "200": -// description: Databases response -// schema: -// "$ref": "#/definitions/databasesResponse" +// +// "200": +// description: Databases response +// schema: +// "$ref": "#/definitions/databasesResponse" func (a *apiV2Server) listDatabases(w http.ResponseWriter, r *http.Request) { limit, offset := getSimplePaginationValues(r) ctx := r.Context() @@ -232,26 +239,28 @@ type databaseDetailsResponse struct { // swagger:operation GET /databases/{database}/ databaseDetails // -// Get database descriptor ID +// # Get database descriptor ID // // Returns the database's descriptor ID. // // --- // parameters: -// - name: database -// type: string -// in: path -// description: Name of database being looked up. -// required: true +// - name: database +// type: string +// in: path +// description: Name of database being looked up. +// required: true +// // produces: // - application/json // responses: -// "200": -// description: Database details response -// schema: -// "$ref": "#/definitions/databaseDetailsResponse" -// "404": -// description: Database not found +// +// "200": +// description: Database details response +// schema: +// "$ref": "#/definitions/databaseDetailsResponse" +// "404": +// description: Database not found func (a *apiV2Server) databaseDetails(w http.ResponseWriter, r *http.Request) { ctx := r.Context() username := getSQLUsername(ctx) @@ -292,37 +301,39 @@ type databaseGrantsResponse struct { // swagger:operation GET /databases/{database}/grants/ databaseGrants // -// Lists grants on a database +// # Lists grants on a database // // Returns grants on a database. Grants are the privileges granted to users // on this database. // // --- // parameters: -// - name: database -// type: string -// in: path -// description: Name of the database being looked up. -// required: true -// - name: limit -// type: integer -// in: query -// description: Maximum number of grants to return in this call. -// required: false -// - name: offset -// type: integer -// in: query -// description: Continuation token for results after a past limited run. -// required: false +// - name: database +// type: string +// in: path +// description: Name of the database being looked up. +// required: true +// - name: limit +// type: integer +// in: query +// description: Maximum number of grants to return in this call. +// required: false +// - name: offset +// type: integer +// in: query +// description: Continuation token for results after a past limited run. +// required: false +// // produces: // - application/json // responses: -// "200": -// description: Database grants response -// schema: -// "$ref": "#/definitions/databaseGrantsResponse" -// "404": -// description: Database not found +// +// "200": +// description: Database grants response +// schema: +// "$ref": "#/definitions/databaseGrantsResponse" +// "404": +// description: Database not found func (a *apiV2Server) databaseGrants(w http.ResponseWriter, r *http.Request) { ctx := r.Context() limit, offset := getSimplePaginationValues(r) @@ -365,37 +376,39 @@ type databaseTablesResponse struct { // swagger:operation GET /databases/{database}/tables/ databaseTables // -// Lists tables on a database +// # Lists tables on a database // // Lists names of all tables in the database. The names of all responses will // be schema-qualified. // // --- // parameters: -// - name: database -// type: string -// in: path -// description: Name of the database being looked up. -// required: true -// - name: limit -// type: integer -// in: query -// description: Maximum number of tables to return in this call. -// required: false -// - name: offset -// type: integer -// in: query -// description: Continuation token for results after a past limited run. -// required: false +// - name: database +// type: string +// in: path +// description: Name of the database being looked up. +// required: true +// - name: limit +// type: integer +// in: query +// description: Maximum number of tables to return in this call. +// required: false +// - name: offset +// type: integer +// in: query +// description: Continuation token for results after a past limited run. +// required: false +// // produces: // - application/json // responses: -// "200": -// description: Database tables response -// schema: -// "$ref": "#/definitions/databaseTablesResponse" -// "404": -// description: Database not found +// +// "200": +// description: Database tables response +// schema: +// "$ref": "#/definitions/databaseTablesResponse" +// "404": +// description: Database not found func (a *apiV2Server) databaseTables(w http.ResponseWriter, r *http.Request) { ctx := r.Context() limit, offset := getSimplePaginationValues(r) @@ -428,34 +441,36 @@ type tableDetailsResponse serverpb.TableDetailsResponse // swagger:operation GET /databases/{database}/tables/{table}/ tableDetails // -// Get table details +// # Get table details // // Returns details about a table. // // --- // parameters: -// - name: database -// type: string -// in: path -// description: Name of the database being looked up. -// required: true -// - name: table -// type: string -// in: path -// description: Name of table being looked up. Table may be -// schema-qualified (schema.table) and each name component that contains -// sql unsafe characters such as . or uppercase letters must be surrounded -// in double quotes like "naughty schema".table. -// required: true +// - name: database +// type: string +// in: path +// description: Name of the database being looked up. +// required: true +// - name: table +// type: string +// in: path +// description: Name of table being looked up. Table may be +// schema-qualified (schema.table) and each name component that contains +// sql unsafe characters such as . or uppercase letters must be surrounded +// in double quotes like "naughty schema".table. +// required: true +// // produces: // - application/json // responses: -// "200": -// description: Database details response -// schema: -// "$ref": "#/definitions/tableDetailsResponse" -// "404": -// description: Database or table not found +// +// "200": +// description: Database details response +// schema: +// "$ref": "#/definitions/tableDetailsResponse" +// "404": +// description: Database or table not found func (a *apiV2Server) tableDetails(w http.ResponseWriter, r *http.Request) { ctx := r.Context() username := getSQLUsername(ctx) diff --git a/pkg/server/auto_tls_init.go b/pkg/server/auto_tls_init.go index 408666c622af..143aaa9ecc9e 100644 --- a/pkg/server/auto_tls_init.go +++ b/pkg/server/auto_tls_init.go @@ -96,14 +96,14 @@ func (sb *ServiceCertificateBundle) loadCACertAndKey(certPath string, keyPath st // loadOrCreateServiceCertificates will attempt to load the service cert/key // into the service bundle. -// * If they do not exist: -// It will attempt to load the service CA cert/key pair. -// * If they do not exist: +// - If they do not exist: +// It will attempt to load the service CA cert/key pair. +// - If they do not exist: // It will generate the service CA cert/key pair. // It will persist these to disk and store them -// in the ServiceCertificateBundle. -// It will generate the service cert/key pair. -// It will persist these to disk and store them +// in the ServiceCertificateBundle. +// It will generate the service cert/key pair. +// It will persist these to disk and store them // in the ServiceCertificateBundle. func (sb *ServiceCertificateBundle) loadOrCreateServiceCertificates( ctx context.Context, diff --git a/pkg/server/config.go b/pkg/server/config.go index f46dcd7b1698..c23b4804badb 100644 --- a/pkg/server/config.go +++ b/pkg/server/config.go @@ -459,13 +459,13 @@ func MakeSQLConfig(tenID roachpb.TenantID, tempStorageCfg base.TempStorageConfig // limit if needed. Returns an error if the hard limit is too low. Returns the // value to set maxOpenFiles to for each store. // -// Minimum - 1700 per store, 256 saved for networking +// # Minimum - 1700 per store, 256 saved for networking // -// Constrained - 256 saved for networking, rest divided evenly per store +// # Constrained - 256 saved for networking, rest divided evenly per store // -// Constrained (network only) - 10000 per store, rest saved for networking +// # Constrained (network only) - 10000 per store, rest saved for networking // -// Recommended - 10000 per store, 5000 for network +// # Recommended - 10000 per store, 5000 for network // // Please note that current and max limits are commonly referred to as the soft // and hard limits respectively. @@ -545,6 +545,7 @@ type Engines []storage.Engine // Close closes all the Engines. // This method has a pointer receiver so that the following pattern works: +// // func f() { // engines := Engines(engineSlice) // defer engines.Close() // make sure the engines are Closed if this diff --git a/pkg/server/debug/goroutineui/dump.go b/pkg/server/debug/goroutineui/dump.go index 906d32d6a54d..f6fa40defee2 100644 --- a/pkg/server/debug/goroutineui/dump.go +++ b/pkg/server/debug/goroutineui/dump.go @@ -13,7 +13,6 @@ package goroutineui import ( "bytes" "io" - "io/ioutil" "runtime" "sort" "strings" @@ -49,7 +48,7 @@ func NewDump() Dump { // newDumpFromBytes is like NewDump, but treats the supplied bytes as a goroutine // dump. The function accepts the options to pass to panicparse/stack.ScanSnapshot. func newDumpFromBytes(b []byte, opts *stack.Opts) Dump { - s, _, err := stack.ScanSnapshot(bytes.NewBuffer(b), ioutil.Discard, opts) + s, _, err := stack.ScanSnapshot(bytes.NewBuffer(b), io.Discard, opts) if err != io.EOF { return Dump{err: err} } diff --git a/pkg/server/pagination.go b/pkg/server/pagination.go index ff0313a49997..7bb3689e65a0 100644 --- a/pkg/server/pagination.go +++ b/pkg/server/pagination.go @@ -174,13 +174,13 @@ func (p *paginationState) paginate( // ||| // // Where: -// - nodesQueried is a comma-separated list of node IDs that have already been -// queried (matching p.nodesQueried). -// - inProgressNode is the ID of the node where the cursor is currently at. -// - inProgressNodeIndex is the index of the response from inProgressNode's -// node-local function where the cursor is currently at. -// - nodesToQuery is a comma-separated list of node IDs of nodes that are yet -// to be queried. +// - nodesQueried is a comma-separated list of node IDs that have already been +// queried (matching p.nodesQueried). +// - inProgressNode is the ID of the node where the cursor is currently at. +// - inProgressNodeIndex is the index of the response from inProgressNode's +// node-local function where the cursor is currently at. +// - nodesToQuery is a comma-separated list of node IDs of nodes that are yet +// to be queried. // // All node IDs and indices are represented as unsigned 32-bit ints, and // comma-separated lists are allowed to have trailing commas. The character diff --git a/pkg/server/pagination_test.go b/pkg/server/pagination_test.go index 5286246a1c30..99c92c6ee1c2 100644 --- a/pkg/server/pagination_test.go +++ b/pkg/server/pagination_test.go @@ -39,12 +39,13 @@ import ( // // Calls paginate(). // input args: -// - limit: max number of elements to return. -// - offset: index offset since the start of slice. -// - input: comma-separated list of ints used as input to simplePaginate. +// - limit: max number of elements to return. +// - offset: index offset since the start of slice. +// - input: comma-separated list of ints used as input to simplePaginate. +// // output args: -// - result: the sub-sliced input returned from simplePaginate. -// - next: the next offset. +// - result: the sub-sliced input returned from simplePaginate. +// - next: the next offset. func TestSimplePaginate(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) @@ -99,14 +100,14 @@ func TestSimplePaginate(t *testing.T) { // // Resets and defines a new paginationState. // input args: -// - queried: list of queried nodeIDs, comma-separated -// - in-progress: node ID of current cursor position's node -// - in-progress-index: index of current cursor position within current node's -// response -// - to-query: list of node IDs yet to query, comma-separated -// output args: -// - printed-state: textual representation of current pagination state. +// - queried: list of queried nodeIDs, comma-separated +// - in-progress: node ID of current cursor position's node +// - in-progress-index: index of current cursor position within current node's +// response +// - to-query: list of node IDs yet to query, comma-separated // +// output args: +// - printed-state: textual representation of current pagination state. // // merge-node-ids // @@ -115,10 +116,10 @@ func TestSimplePaginate(t *testing.T) { // // Calls mergeNodeIDs(). // input args: -// - nodes: sorted node IDs to merge into pagination state, using mergeNodeIDs. -// output args: -// - printed-state: textual representation of current pagination state. +// - nodes: sorted node IDs to merge into pagination state, using mergeNodeIDs. // +// output args: +// - printed-state: textual representation of current pagination state. // // paginate // limit= @@ -132,16 +133,16 @@ func TestSimplePaginate(t *testing.T) { // // Calls paginate() // input args: -// - limit: Max objects to return from paginate(). -// - nodeID: ID of node the response is coming from. -// - length: length of values in current node's response. -// output args: -// - start: Start idx of response slice. -// - end: End idx of response slice. -// - newLimit: Limit to be used on next call to paginate(), if current slice -// doesn't have `limit` remaining items. 0 if `limit` was reached. -// - printed-state: textual representation of current pagination state. +// - limit: Max objects to return from paginate(). +// - nodeID: ID of node the response is coming from. +// - length: length of values in current node's response. // +// output args: +// - start: Start idx of response slice. +// - end: End idx of response slice. +// - newLimit: Limit to be used on next call to paginate(), if current slice +// doesn't have `limit` remaining items. 0 if `limit` was reached. +// - printed-state: textual representation of current pagination state. // // unmarshal // @@ -150,10 +151,10 @@ func TestSimplePaginate(t *testing.T) { // // Unmarshals base64-encoded string into a paginationState. Opposite of marshal. // input args: -// - input: base64-encoded string to unmarshal. -// output args: -// - printed-state: textual representation of unmarshalled pagination state. +// - input: base64-encoded string to unmarshal. // +// output args: +// - printed-state: textual representation of unmarshalled pagination state. // // marshal // ---- @@ -161,7 +162,7 @@ func TestSimplePaginate(t *testing.T) { // // Marshals current state to base64-encoded string. // output args: -// - text: base64-encoded string that can be passed to unmarshal. +// - text: base64-encoded string that can be passed to unmarshal. func TestPaginationState(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) diff --git a/pkg/server/server.go b/pkg/server/server.go index 3f24182238d2..4a752c5f23d0 100644 --- a/pkg/server/server.go +++ b/pkg/server/server.go @@ -1010,11 +1010,11 @@ func (s *Server) Start(ctx context.Context) error { // underinitialized services. This is avoided with some additional // complexity that can be summarized as follows: // -// - before blocking trying to connect to the Gossip network, we already open -// the admin UI (so that its diagnostics are available) -// - we also allow our Gossip and our connection health Ping service -// - everything else returns Unavailable errors (which are retryable) -// - once the node has started, unlock all RPCs. +// - before blocking trying to connect to the Gossip network, we already open +// the admin UI (so that its diagnostics are available) +// - we also allow our Gossip and our connection health Ping service +// - everything else returns Unavailable errors (which are retryable) +// - once the node has started, unlock all RPCs. // // The passed context can be used to trace the server startup. The context // should represent the general startup operation. diff --git a/pkg/server/server_test.go b/pkg/server/server_test.go index ba4a9a66e09d..1ab96b98157b 100644 --- a/pkg/server/server_test.go +++ b/pkg/server/server_test.go @@ -15,7 +15,6 @@ import ( "context" "fmt" "io" - "io/ioutil" "math" "net/http" "net/http/httptest" @@ -266,7 +265,7 @@ func TestPlainHTTPServer(t *testing.T) { } else { func() { defer resp.Body.Close() - if _, err := io.Copy(ioutil.Discard, resp.Body); err != nil { + if _, err := io.Copy(io.Discard, resp.Body); err != nil { t.Error(err) } }() diff --git a/pkg/server/status.go b/pkg/server/status.go index 7a318dcf8b3f..bc02f447ec93 100644 --- a/pkg/server/status.go +++ b/pkg/server/status.go @@ -1103,10 +1103,11 @@ func checkFilePattern(pattern string) error { // the log configuration. For example, consider the following config: // // file-groups: -// groupA: -// dir: dir1 -// groupB: -// dir: dir2 +// +// groupA: +// dir: dir1 +// groupB: +// dir: dir2 // // The result of ListLogFiles on this config will return the list // {cockroach-groupA.XXX.log, cockroach-groupB.XXX.log}, without @@ -1223,14 +1224,15 @@ func parseInt64WithDefault(s string, defaultValue int64) (int64, error) { // Logs returns the log entries parsed from the log files stored on // the server. Log entries are returned in reverse chronological order. The // following options are available: -// * "starttime" query parameter filters the log entries to only ones that -// occurred on or after the "starttime". Defaults to a day ago. -// * "endtime" query parameter filters the log entries to only ones that -// occurred before on on the "endtime". Defaults to the current time. -// * "pattern" query parameter filters the log entries by the provided regexp -// pattern if it exists. Defaults to nil. -// * "max" query parameter is the hard limit of the number of returned log -// entries. Defaults to defaultMaxLogEntries. +// - "starttime" query parameter filters the log entries to only ones that +// occurred on or after the "starttime". Defaults to a day ago. +// - "endtime" query parameter filters the log entries to only ones that +// occurred before on on the "endtime". Defaults to the current time. +// - "pattern" query parameter filters the log entries by the provided regexp +// pattern if it exists. Defaults to nil. +// - "max" query parameter is the hard limit of the number of returned log +// entries. Defaults to defaultMaxLogEntries. +// // To filter the log messages to only retrieve messages from a given level, // use a pattern that excludes all messages at the undesired levels. // (e.g. "^[^IW]" to only get errors, fatals and panics). An exclusive diff --git a/pkg/server/status/recorder_test.go b/pkg/server/status/recorder_test.go index c03a25734393..25f27021437d 100644 --- a/pkg/server/status/recorder_test.go +++ b/pkg/server/status/recorder_test.go @@ -12,7 +12,7 @@ package status import ( "context" - "io/ioutil" + "io" "os" "reflect" "sort" @@ -377,7 +377,7 @@ func TestMetricsRecorder(t *testing.T) { if _, err := recorder.MarshalJSON(); err != nil { t.Error(err) } - _ = recorder.PrintAsText(ioutil.Discard) + _ = recorder.PrintAsText(io.Discard) _ = recorder.GetTimeSeriesData() wg.Done() }() diff --git a/pkg/server/telemetry/doc.go b/pkg/server/telemetry/doc.go index 4219ff255620..9602d3ebeec3 100644 --- a/pkg/server/telemetry/doc.go +++ b/pkg/server/telemetry/doc.go @@ -28,9 +28,9 @@ existing diagnostics reporting if enabled. Some notes on using these: - "some.feature" should always be a literal string constant -- it must not include any user-submitted data. - Contention-sensitive, high-volume callers should use an initial `GetCounter` - to get a Counter they can then `Inc` repeatedly instead to avoid contention - and map lookup over around the name resolution on each increment. - - When naming a counter, by convention we use dot-separated, dashed names, eg. - `feature-area.specific-feature`. + to get a Counter they can then `Inc` repeatedly instead to avoid contention + and map lookup over around the name resolution on each increment. + - When naming a counter, by convention we use dot-separated, dashed names, eg. + `feature-area.specific-feature`. */ package telemetry diff --git a/pkg/server/tenantsettingswatcher/watcher.go b/pkg/server/tenantsettingswatcher/watcher.go index 6799331ab377..5fa84f73ac00 100644 --- a/pkg/server/tenantsettingswatcher/watcher.go +++ b/pkg/server/tenantsettingswatcher/watcher.go @@ -32,20 +32,20 @@ import ( // // Sample usage: // -// w := tenantsettingswatcher.New(...) -// if err := w.Start(ctx); err != nil { ... } +// w := tenantsettingswatcher.New(...) +// if err := w.Start(ctx); err != nil { ... } // -// // Get overrides and keep them up to date. -// all, allCh := w.AllTenantOverrides() -// tenant, tenantCh := w.TenantOverrides(tenantID) -// select { -// case <-allCh: -// all, allCh = w.AllTenantOverrides() -// case <-tenantCh: -// tenant, tenantCh = w.TenantOverrides(tenantID) -// case <-ctx.Done(): -// ... -// } +// // Get overrides and keep them up to date. +// all, allCh := w.AllTenantOverrides() +// tenant, tenantCh := w.TenantOverrides(tenantID) +// select { +// case <-allCh: +// all, allCh = w.AllTenantOverrides() +// case <-tenantCh: +// tenant, tenantCh = w.TenantOverrides(tenantID) +// case <-ctx.Done(): +// ... +// } type Watcher struct { clock *hlc.Clock f *rangefeed.Factory diff --git a/pkg/server/testserver.go b/pkg/server/testserver.go index 1776d9145d4c..30cc223ab7fc 100644 --- a/pkg/server/testserver.go +++ b/pkg/server/testserver.go @@ -303,12 +303,11 @@ func makeTestConfigFromParams(params base.TestServerArgs) Config { // // Example usage of a TestServer: // -// s, db, kvDB := serverutils.StartServer(t, base.TestServerArgs{}) -// defer s.Stopper().Stop() -// // If really needed, in tests that can depend on server, downcast to -// // server.TestServer: -// ts := s.(*server.TestServer) -// +// s, db, kvDB := serverutils.StartServer(t, base.TestServerArgs{}) +// defer s.Stopper().Stop() +// // If really needed, in tests that can depend on server, downcast to +// // server.TestServer: +// ts := s.(*server.TestServer) type TestServer struct { Cfg *Config params base.TestServerArgs diff --git a/pkg/settings/doc.go b/pkg/settings/doc.go index d33e4bcaa4f8..33b39f8a9f29 100644 --- a/pkg/settings/doc.go +++ b/pkg/settings/doc.go @@ -27,10 +27,12 @@ setting is to be used. For example, to add an "enterprise" flag, adding into license_check.go: var enterpriseEnabled = settings.RegisterBoolSetting( + settings.TenantWritable, "enterprise.enabled", "some doc for the setting", false, + ) Then use with `if enterpriseEnabled.Get() ...` diff --git a/pkg/settings/setting.go b/pkg/settings/setting.go index 0bfef5bb5f56..f44b0ba0723e 100644 --- a/pkg/settings/setting.go +++ b/pkg/settings/setting.go @@ -84,19 +84,20 @@ type NonMaskedSetting interface { // SystemOnly.RegisterIntSetting(). // // Guidelines for choosing a class: -// - Make sure to read the descriptions below carefully to understand the -// differences in semantics. // -// - If the setting controls a user-visible aspect of SQL, it should be a -// TenantWritable setting. +// - Make sure to read the descriptions below carefully to understand the +// differences in semantics. // -// - Control settings relevant to tenant-specific internal implementation -// should be TenantReadOnly. +// - If the setting controls a user-visible aspect of SQL, it should be a +// TenantWritable setting. // -// - When in doubt, the first choice to consider should be TenantReadOnly. +// - Control settings relevant to tenant-specific internal implementation +// should be TenantReadOnly. // -// - SystemOnly should be used with caution: even internal tenant code is -// disallowed from using these settings at all. +// - When in doubt, the first choice to consider should be TenantReadOnly. +// +// - SystemOnly should be used with caution: even internal tenant code is +// disallowed from using these settings at all. type Class int8 const ( diff --git a/pkg/spanconfig/spanconfig.go b/pkg/spanconfig/spanconfig.go index b59c558c8d81..955154c59552 100644 --- a/pkg/spanconfig/spanconfig.go +++ b/pkg/spanconfig/spanconfig.go @@ -84,15 +84,20 @@ type KVAccessor interface { // for the span[3]. // // [1]: The contents of the StoreReader and ProtectedTSReader at t1 corresponds -// exactly to the contents of the global span configuration state at t0 -// where t0 <= t1. If the StoreReader or ProtectedTSReader is read from at -// t2 where t2 > t1, it's guaranteed to observe a view of the global state -// at t >= t0. +// +// exactly to the contents of the global span configuration state at t0 +// where t0 <= t1. If the StoreReader or ProtectedTSReader is read from at +// t2 where t2 > t1, it's guaranteed to observe a view of the global state +// at t >= t0. +// // [2]: For the canonical KVSubscriber implementation, this is typically lagging -// by the closed timestamp target duration. +// +// by the closed timestamp target duration. +// // [3]: The canonical KVSubscriber implementation is bounced whenever errors -// occur, which may result in the re-transmission of earlier updates -// (typically through a coarsely targeted [min,max) span). +// +// occur, which may result in the re-transmission of earlier updates +// (typically through a coarsely targeted [min,max) span). type KVSubscriber interface { StoreReader ProtectedTSReader @@ -106,15 +111,15 @@ type KVSubscriber interface { // // Concretely, for the following zone configuration hierarchy: // -// CREATE DATABASE db; -// CREATE TABLE db.t1(); -// ALTER DATABASE db CONFIGURE ZONE USING num_replicas=7; -// ALTER TABLE db.t1 CONFIGURE ZONE USING num_voters=5; +// CREATE DATABASE db; +// CREATE TABLE db.t1(); +// ALTER DATABASE db CONFIGURE ZONE USING num_replicas=7; +// ALTER TABLE db.t1 CONFIGURE ZONE USING num_voters=5; // // The SQLTranslator produces the following translation (represented as a diff // against RANGE DEFAULT for brevity): // -// Table/5{3-4} num_replicas=7 num_voters=5 +// Table/5{3-4} num_replicas=7 num_voters=5 type SQLTranslator interface { // Translate generates the span configuration state given a list of // {descriptor, named zone} IDs. Entries are unique, and are omitted for IDs @@ -286,28 +291,27 @@ type Limiter interface { // indexes, partitions and sub-partitions) and figures out the actual key // boundaries that we may need to split over. For example: // -// CREATE TABLE db.parts(i INT PRIMARY KEY, j INT) PARTITION BY LIST (i) ( -// PARTITION one_and_five VALUES IN (1, 5), -// PARTITION four_and_three VALUES IN (4, 3), -// PARTITION everything_else VALUES IN (6, default) -// ); +// CREATE TABLE db.parts(i INT PRIMARY KEY, j INT) PARTITION BY LIST (i) ( +// PARTITION one_and_five VALUES IN (1, 5), +// PARTITION four_and_three VALUES IN (4, 3), +// PARTITION everything_else VALUES IN (6, default) +// ); // // We'd spit out 15: // -// + 1 between start of table and start of 1st index -// + 1 between start of index and start of 1st partition-by-list value -// + 1 for 1st partition-by-list value -// + 1 for 2nd partition-by-list value -// + 1 for 3rd partition-by-list value -// + 1 for 4th partition-by-list value -// + 1 for 5th partition-by-list value -// + 1 for 6th partition-by-list value -// + 5 gap(s) between 6 partition-by-list value spans -// + 1 between end of 6th partition-by-list value span and end of index -// + 13 for 1st index -// + 1 between end of 1st index and end of table -// = 15 -// +// - 1 between start of table and start of 1st index +// - 1 between start of index and start of 1st partition-by-list value +// - 1 for 1st partition-by-list value +// - 1 for 2nd partition-by-list value +// - 1 for 3rd partition-by-list value +// - 1 for 4th partition-by-list value +// - 1 for 5th partition-by-list value +// - 1 for 6th partition-by-list value +// - 5 gap(s) between 6 partition-by-list value spans +// - 1 between end of 6th partition-by-list value span and end of index +// - 13 for 1st index +// - 1 between end of 1st index and end of table +// = 15 type Splitter interface { Splits(ctx context.Context, table catalog.TableDescriptor) (int, error) } diff --git a/pkg/spanconfig/spanconfigkvaccessor/kvaccessor_test.go b/pkg/spanconfig/spanconfigkvaccessor/kvaccessor_test.go index 6be31e99692a..cd7971aa8602 100644 --- a/pkg/spanconfig/spanconfigkvaccessor/kvaccessor_test.go +++ b/pkg/spanconfig/spanconfigkvaccessor/kvaccessor_test.go @@ -37,28 +37,28 @@ import ( // TestDataDriven runs datadriven tests against the kvaccessor interface. // The syntax is as follows: // -// kvaccessor-get -// span [a,e) -// span [a,b) -// span [b,c) -// system-target {cluster} -// system-target {source=1,target=20} -// system-target {source=1,target=1} -// system-target {source=20,target=20} -// system-target {source=1, all-tenant-keyspace-targets-set} -// ---- +// kvaccessor-get +// span [a,e) +// span [a,b) +// span [b,c) +// system-target {cluster} +// system-target {source=1,target=20} +// system-target {source=1,target=1} +// system-target {source=20,target=20} +// system-target {source=1, all-tenant-keyspace-targets-set} +// ---- // -// kvaccessor-update -// delete [c,e) -// upsert [c,d):C -// upsert [d,e):D -// delete {source=1,target=1} -// upsert {source=1,target=1}:A -// upsert {cluster}:F -// ---- +// kvaccessor-update +// delete [c,e) +// upsert [c,d):C +// upsert [d,e):D +// delete {source=1,target=1} +// upsert {source=1,target=1}:A +// upsert {cluster}:F +// ---- // -// kvaccessor-get-all-system-span-configs-that-apply tenant-id= -// ---- +// kvaccessor-get-all-system-span-configs-that-apply tenant-id= +// ---- // // They tie into GetSpanConfigRecords and UpdateSpanConfigRecords // respectively. For kvaccessor-get, each listed target is added to the set of diff --git a/pkg/spanconfig/spanconfigkvsubscriber/datadriven_test.go b/pkg/spanconfig/spanconfigkvsubscriber/datadriven_test.go index 642d367aa73a..f2abd08593ab 100644 --- a/pkg/spanconfig/spanconfigkvsubscriber/datadriven_test.go +++ b/pkg/spanconfig/spanconfigkvsubscriber/datadriven_test.go @@ -41,60 +41,60 @@ import ( // TestDataDriven runs datadriven tests against the KVSubscriber interface. // The syntax is as follows: // -// update -// delete [c,e) -// upsert [c,d):C -// upsert [d,e):D -// upsert {entire-keyspace}:X -// delete {source=1,target=20} -// ---- +// update +// delete [c,e) +// upsert [c,d):C +// upsert [d,e):D +// upsert {entire-keyspace}:X +// delete {source=1,target=20} +// ---- // -// get -// span [a,b) -// span [b,c) -// ---- -// [a,b):A -// [b,d):B +// get +// span [a,b) +// span [b,c) +// ---- +// [a,b):A +// [b,d):B // -// start -// ---- +// start +// ---- // -// updates -// ---- -// [a,b) -// [b,d) -// [e,f) +// updates +// ---- +// [a,b) +// [b,d) +// [e,f) // -// store-reader key=b -// ---- -// [b,d):B +// store-reader key=b +// ---- +// [b,d):B // -// store-reader compute-split=[a,c) -// ---- -// b +// store-reader compute-split=[a,c) +// ---- +// b // -// store-reader needs-split=[b,h) -// ---- -// true +// store-reader needs-split=[b,h) +// ---- +// true // -// inject-buffer-overflow -// ---- -// ok +// inject-buffer-overflow +// ---- +// ok // -// - update and get tie into GetSpanConfigRecords and UpdateSpanConfigRecords -// respectively on the KVAccessor interface, and are a convenient shorthand to -// populate the system table that the KVSubscriber subscribes to. The input is -// processed in a single batch. -// - start starts the subscription process. It can also be used to verify -// behavior when re-establishing subscriptions after hard errors. -// - updates lists the span updates the KVSubscriber receives, in the listed -// order. Updates in a batch are de-duped. -// - store-reader {key,compute-split,needs-split} relate to GetSpanConfigForKey, -// ComputeSplitKey and NeedsSplit respectively on the StoreReader subset of the -// KVSubscriber interface. -// - inject-buffer-overflow can be used to inject rangefeed buffer overflow -// errors within the kvsubscriber. It pokes into the internals of the -// kvsubscriber and is useful to test teardown and recovery behavior. +// - update and get tie into GetSpanConfigRecords and UpdateSpanConfigRecords +// respectively on the KVAccessor interface, and are a convenient shorthand to +// populate the system table that the KVSubscriber subscribes to. The input is +// processed in a single batch. +// - start starts the subscription process. It can also be used to verify +// behavior when re-establishing subscriptions after hard errors. +// - updates lists the span updates the KVSubscriber receives, in the listed +// order. Updates in a batch are de-duped. +// - store-reader {key,compute-split,needs-split} relate to GetSpanConfigForKey, +// ComputeSplitKey and NeedsSplit respectively on the StoreReader subset of the +// KVSubscriber interface. +// - inject-buffer-overflow can be used to inject rangefeed buffer overflow +// errors within the kvsubscriber. It pokes into the internals of the +// kvsubscriber and is useful to test teardown and recovery behavior. // // Text of the form [a,b) and [a,b):C correspond to spans and span config // records; see spanconfigtestutils.Parse{Span,Config,SpanConfigRecord} for more diff --git a/pkg/spanconfig/spanconfigkvsubscriber/kvsubscriber.go b/pkg/spanconfig/spanconfigkvsubscriber/kvsubscriber.go index dca3612b17f6..35b904ac5b3d 100644 --- a/pkg/spanconfig/spanconfigkvsubscriber/kvsubscriber.go +++ b/pkg/spanconfig/spanconfigkvsubscriber/kvsubscriber.go @@ -73,19 +73,24 @@ var updateBehindNanos = metric.Metadata{ // we could diff the two data structures and only emit targeted updates. // // [1]: For a given key k, it's config may be stored as part of a larger span S -// (where S.start <= k < S.end). It's possible for S to get deleted and -// replaced with sub-spans S1...SN in the same transaction if the span is -// getting split. When applying these updates, we need to make sure to -// process the deletion event for S before processing S1...SN. +// +// (where S.start <= k < S.end). It's possible for S to get deleted and +// replaced with sub-spans S1...SN in the same transaction if the span is +// getting split. When applying these updates, we need to make sure to +// process the deletion event for S before processing S1...SN. +// // [2]: In our example above deleting the config for S and adding configs for -// S1...SN we want to make sure that we apply the full set of updates all -// at once -- lest we expose the intermediate state where the config for S -// was deleted but the configs for S1...SN were not yet applied. +// +// S1...SN we want to make sure that we apply the full set of updates all +// at once -- lest we expose the intermediate state where the config for S +// was deleted but the configs for S1...SN were not yet applied. +// // [3]: TODO(irfansharif): When tearing down the subscriber due to underlying -// errors, we could also capture a checkpoint to use the next time the -// subscriber is established. That way we can avoid the full initial scan -// over the span configuration state and simply pick up where we left off -// with our existing spanconfig.Store. +// +// errors, we could also capture a checkpoint to use the next time the +// subscriber is established. That way we can avoid the full initial scan +// over the span configuration state and simply pick up where we left off +// with our existing spanconfig.Store. type KVSubscriber struct { fallback roachpb.SpanConfig knobs *spanconfig.TestingKnobs @@ -192,13 +197,14 @@ func New( // invoked in the single async task thread. // // [1]: It's possible for retryable errors to occur internally, at which point -// we tear down the existing subscription and re-establish another. When -// unsubscribed, the exposed spanconfig.StoreReader continues to be -// readable (though no longer incrementally maintained -- the view gets -// progressively staler overtime). Existing handlers are kept intact and -// notified when the subscription is re-established. After re-subscribing, -// the exported StoreReader will be up-to-date and continue to be -// incrementally maintained. +// +// we tear down the existing subscription and re-establish another. When +// unsubscribed, the exposed spanconfig.StoreReader continues to be +// readable (though no longer incrementally maintained -- the view gets +// progressively staler overtime). Existing handlers are kept intact and +// notified when the subscription is re-established. After re-subscribing, +// the exported StoreReader will be up-to-date and continue to be +// incrementally maintained. func (s *KVSubscriber) Start(ctx context.Context, stopper *stop.Stopper) error { return rangefeedcache.Start(ctx, stopper, s.rfc, nil /* onError */) } diff --git a/pkg/spanconfig/spanconfigmanager/manager.go b/pkg/spanconfig/spanconfigmanager/manager.go index 130d77c09d87..39ea03f103e9 100644 --- a/pkg/spanconfig/spanconfigmanager/manager.go +++ b/pkg/spanconfig/spanconfigmanager/manager.go @@ -57,7 +57,8 @@ var jobEnabledSetting = settings.RegisterBoolSetting( // captures all relevant dependencies for the job. // // [1]: The reconciliation job is responsible for reconciling a tenant's zone -// configurations with the clusters span configurations. +// +// configurations with the clusters span configurations. type Manager struct { db *kv.DB jr *jobs.Registry diff --git a/pkg/spanconfig/spanconfigptsreader/adapter.go b/pkg/spanconfig/spanconfigptsreader/adapter.go index 80129e30493e..f6f9002fe258 100644 --- a/pkg/spanconfig/spanconfigptsreader/adapter.go +++ b/pkg/spanconfig/spanconfigptsreader/adapter.go @@ -39,7 +39,8 @@ import ( // interface. // // TODO(arul): In 22.2, we would have completely migrated away from the old -// subsystem, and we'd be able to get rid of this interface. +// +// subsystem, and we'd be able to get rid of this interface. type adapter struct { cache protectedts.Cache kvSubscriber spanconfig.KVSubscriber diff --git a/pkg/spanconfig/spanconfigreconciler/reconciler.go b/pkg/spanconfig/spanconfigreconciler/reconciler.go index 20d00874cad6..8b994665d762 100644 --- a/pkg/spanconfig/spanconfigreconciler/reconciler.go +++ b/pkg/spanconfig/spanconfigreconciler/reconciler.go @@ -119,12 +119,13 @@ func New( // entries we can then simply issue deletes for. // // [1]: #73399 proposes a new KV request type that would let us more rapidly -// trigger reconciliation after a tenant's SQL transaction. If we're able -// to do this fast enough, it would be reasonable to wait for -// reconciliation to happen before returning to the client. We could -// alternatively use it as part of a handshake protocol during pod -// suspension, to ensure that all outstanding work ("reconciliation" has -// been done before a pod is suspended. +// +// trigger reconciliation after a tenant's SQL transaction. If we're able +// to do this fast enough, it would be reasonable to wait for +// reconciliation to happen before returning to the client. We could +// alternatively use it as part of a handshake protocol during pod +// suspension, to ensure that all outstanding work ("reconciliation" has +// been done before a pod is suspended. // // TODO(irfansharif): The descriptions above presume holding the entire set of // span configs in memory, but we could break away from that by adding @@ -442,10 +443,10 @@ type incrementalReconciler struct { } // reconcile runs the incremental reconciliation process. It takes in: -// - the timestamp to start the incremental process from (typically a timestamp -// we've already reconciled up until); -// - a callback that it invokes periodically with timestamps that it's -// reconciled up until. +// - the timestamp to start the incremental process from (typically a timestamp +// we've already reconciled up until); +// - a callback that it invokes periodically with timestamps that it's +// reconciled up until. func (r *incrementalReconciler) reconcile( ctx context.Context, startTS hlc.Timestamp, callback func(reconciledUpUntil hlc.Timestamp) error, ) error { @@ -623,7 +624,8 @@ func (r *incrementalReconciler) filterForMissingProtectedTimestampSystemTargets( // they've been GC-ed away[1]. // // [1]: Or if the ExcludeDroppedDescriptorsFromLookup testing knob is used, -// this includes dropped descriptors. +// +// this includes dropped descriptors. func (r *incrementalReconciler) filterForMissingTableIDs( ctx context.Context, txn *kv.Txn, descsCol *descs.Collection, updates []spanconfig.SQLUpdate, ) (descpb.IDs, error) { diff --git a/pkg/spanconfig/spanconfigsplitter/splitter.go b/pkg/spanconfig/spanconfigsplitter/splitter.go index 97d58327d640..ba5ae5516437 100644 --- a/pkg/spanconfig/spanconfigsplitter/splitter.go +++ b/pkg/spanconfig/spanconfigsplitter/splitter.go @@ -47,7 +47,7 @@ func New(codec keys.SQLCodec, knobs *spanconfig.TestingKnobs) *Splitter { // split points for the given table descriptor. It's able to do this without // decoding partition keys. Consider our table hierarchy: // -// table -> index -> partition -> partition -> (...) +// table -> index -> partition -> partition -> (...) // // Where each partition is either a PARTITION BY LIST kind (where it can then be // further partitioned), or a PARTITION BY RANGE kind (no further partitioning @@ -56,43 +56,42 @@ func New(codec keys.SQLCodec, knobs *spanconfig.TestingKnobs) *Splitter { // (a) Contiguous {index, list partition} -> range partition // (b) Non-contiguous table -> index, {index, list partition} -> list partition // -// - Contiguous links are the sort where each child span is contiguous with -// another, and that the set of all child spans encompass the parent's span. -// For an index that's partitioned by range: +// - Contiguous links are the sort where each child span is contiguous with +// another, and that the set of all child spans encompass the parent's span. +// For an index that's partitioned by range: // -// CREATE TABLE db.range(i INT PRIMARY KEY, j INT) PARTITION BY RANGE (i) ( -// PARTITION less_than_five VALUES FROM (minvalue) to (5), -// PARTITION between_five_and_ten VALUES FROM (5) to (10), -// PARTITION greater_than_ten VALUES FROM (10) to (maxvalue) -// ); +// CREATE TABLE db.range(i INT PRIMARY KEY, j INT) PARTITION BY RANGE (i) ( +// PARTITION less_than_five VALUES FROM (minvalue) to (5), +// PARTITION between_five_and_ten VALUES FROM (5) to (10), +// PARTITION greater_than_ten VALUES FROM (10) to (maxvalue) +// ); // -// With table ID as 106, the parent index span is /Table/106/{1-2}. The child -// spans are /Table/106/1{-/5}, /Table/106/1/{5-10} and /Table/106/{1/10-2}. -// They're contiguous; put together they wholly encompass the parent span. +// With table ID as 106, the parent index span is /Table/106/{1-2}. The child +// spans are /Table/106/1{-/5}, /Table/106/1/{5-10} and /Table/106/{1/10-2}. +// They're contiguous; put together they wholly encompass the parent span. // -// - Non-contiguous links, by contrast, are when child spans are neither -// contiguous with respect to one another, nor do they start and end at -// the parent span's boundaries. For a table with a secondary index: +// - Non-contiguous links, by contrast, are when child spans are neither +// contiguous with respect to one another, nor do they start and end at +// the parent span's boundaries. For a table with a secondary index: // -// CREATE TABLE db.t(i INT PRIMARY KEY, j INT); -// CREATE INDEX idx ON db.t (j); -// DROP INDEX db.t@idx; -// CREATE INDEX idx ON db.t (j); +// CREATE TABLE db.t(i INT PRIMARY KEY, j INT); +// CREATE INDEX idx ON db.t (j); +// DROP INDEX db.t@idx; +// CREATE INDEX idx ON db.t (j); // -// With table ID as 106, the parent table span is /Table/10{6-7}. The child -// spans are /Table/106/{1-2} and /Table/106/{3-4}. Compared to the parent -// span, we're missing /Table/106{-/1}, /Table/106/{2-3}, /Table/10{6/4-7}. +// With table ID as 106, the parent table span is /Table/10{6-7}. The child +// spans are /Table/106/{1-2} and /Table/106/{3-4}. Compared to the parent +// span, we're missing /Table/106{-/1}, /Table/106/{2-3}, /Table/10{6/4-7}. // // For N children: -// - For a contiguous link, the number of splits equals the number of child -// elements (i.e. N). -// - For a non-contiguous link, the number of splits equals N + 1 + N. For N -// children, there are N - 1 gaps. There are also 2 gaps at the start and end -// of the parent span. Summing that with the N children span themselves, we -// get to the formula above. This assumes that the N child elements aren't -// further subdivided, if they are (we can compute it recursively), the -// formula becomes N + 1 + Σ(grand child spans). -// +// - For a contiguous link, the number of splits equals the number of child +// elements (i.e. N). +// - For a non-contiguous link, the number of splits equals N + 1 + N. For N +// children, there are N - 1 gaps. There are also 2 gaps at the start and end +// of the parent span. Summing that with the N children span themselves, we +// get to the formula above. This assumes that the N child elements aren't +// further subdivided, if they are (we can compute it recursively), the +// formula becomes N + 1 + Σ(grand child spans). // // It's possible to compute split points more precisely if we did decode keys. // We could, for example, recognize that partition-by-list values are adjacent, @@ -104,8 +103,9 @@ func New(codec keys.SQLCodec, knobs *spanconfig.TestingKnobs) *Splitter { // more than fine for our current usages. // // [1]: Today it's possible to GC type descriptors before GC-ing table -// descriptors that refer to them. This interface is used near by this GC -// activity, so type information is not always available. +// +// descriptors that refer to them. This interface is used near by this GC +// activity, so type information is not always available. func (s *Splitter) Splits(ctx context.Context, table catalog.TableDescriptor) (int, error) { if isNil(table) { return 0, nil // nothing to do diff --git a/pkg/spanconfig/spanconfigstore/entry_interval_btree.go b/pkg/spanconfig/spanconfigstore/entry_interval_btree.go index 111824393799..79df86f0fed4 100644 --- a/pkg/spanconfig/spanconfigstore/entry_interval_btree.go +++ b/pkg/spanconfig/spanconfigstore/entry_interval_btree.go @@ -32,17 +32,20 @@ const ( // cmp returns a value indicating the sort order relationship between // a and b. The comparison is performed lexicographically on -// (a.Key(), a.EndKey(), a.ID()) +// +// (a.Key(), a.EndKey(), a.ID()) +// // and -// (b.Key(), b.EndKey(), b.ID()) +// +// (b.Key(), b.EndKey(), b.ID()) +// // tuples. // // Given c = cmp(a, b): // -// c == -1 if (a.Key(), a.EndKey(), a.ID()) < (b.Key(), b.EndKey(), b.ID()) -// c == 0 if (a.Key(), a.EndKey(), a.ID()) == (b.Key(), b.EndKey(), b.ID()) -// c == 1 if (a.Key(), a.EndKey(), a.ID()) > (b.Key(), b.EndKey(), b.ID()) -// +// c == -1 if (a.Key(), a.EndKey(), a.ID()) < (b.Key(), b.EndKey(), b.ID()) +// c == 0 if (a.Key(), a.EndKey(), a.ID()) == (b.Key(), b.EndKey(), b.ID()) +// c == 1 if (a.Key(), a.EndKey(), a.ID()) > (b.Key(), b.EndKey(), b.ID()) func cmp(a, b *entry) int { c := bytes.Compare(a.Key(), b.Key()) if c != 0 { @@ -325,21 +328,21 @@ func (n *node) find(item *entry) (index int, found bool) { // // Before: // -// +-----------+ -// | x y z | -// +--/-/-\-\--+ +// +-----------+ +// | x y z | +// +--/-/-\-\--+ // // After: // -// +-----------+ -// | y | -// +----/-\----+ -// / \ -// v v +// +-----------+ +// | y | +// +----/-\----+ +// / \ +// v v +// // +-----------+ +-----------+ // | x | | z | // +-----------+ +-----------+ -// func (n *node) split(i int) (*entry, *node) { out := n.items[i] var next *node @@ -1004,9 +1007,9 @@ func (i *iterator) Cur() *entry { // is to minimize the number of key comparisons performed in total. The // algorithm operates based on the following two invariants maintained by // augmented interval btree: -// 1. all items are sorted in the btree based on their start key. -// 2. all btree nodes maintain the upper bound end key of all items -// in their subtree. +// 1. all items are sorted in the btree based on their start key. +// 2. all btree nodes maintain the upper bound end key of all items +// in their subtree. // // The scan algorithm starts in "unconstrained minimum" and "unconstrained // maximum" states. To enter a "constrained minimum" state, the scan must reach @@ -1021,28 +1024,28 @@ func (i *iterator) Cur() *entry { // // The scan algorithm works like a standard btree forward scan with the // following augmentations: -// 1. before tranversing the tree, the scan performs a binary search on the -// root node's items to determine a "soft" lower-bound constraint position -// and a "hard" upper-bound constraint position in the root's children. -// 2. when tranversing into a child node in the lower or upper bound constraint -// position, the constraint is refined by searching the child's items. -// 3. the initial traversal down the tree follows the left-most children -// whose upper bound end keys are equal to or greater than the start key -// of the search range. The children followed will be equal to or less -// than the soft lower bound constraint. -// 4. once the initial tranversal completes and the scan is in the left-most -// btree node whose upper bound overlaps the search range, key comparisons -// must be performed with each item in the tree. This is necessary because -// any of these items may have end keys that cause them to overlap with the -// search range. -// 5. once the scan reaches the lower bound constraint position (the first item -// with a start key equal to or greater than the search range's start key), -// it can begin scaning without performing key comparisons. This is allowed -// because all items from this point forward will have end keys that are -// greater than the search range's start key. -// 6. once the scan reaches the upper bound constraint position, it terminates. -// It does so because the item at this position is the first item with a -// start key larger than the search range's end key. +// 1. before tranversing the tree, the scan performs a binary search on the +// root node's items to determine a "soft" lower-bound constraint position +// and a "hard" upper-bound constraint position in the root's children. +// 2. when tranversing into a child node in the lower or upper bound constraint +// position, the constraint is refined by searching the child's items. +// 3. the initial traversal down the tree follows the left-most children +// whose upper bound end keys are equal to or greater than the start key +// of the search range. The children followed will be equal to or less +// than the soft lower bound constraint. +// 4. once the initial tranversal completes and the scan is in the left-most +// btree node whose upper bound overlaps the search range, key comparisons +// must be performed with each item in the tree. This is necessary because +// any of these items may have end keys that cause them to overlap with the +// search range. +// 5. once the scan reaches the lower bound constraint position (the first item +// with a start key equal to or greater than the search range's start key), +// it can begin scaning without performing key comparisons. This is allowed +// because all items from this point forward will have end keys that are +// greater than the search range's start key. +// 6. once the scan reaches the upper bound constraint position, it terminates. +// It does so because the item at this position is the first item with a +// start key larger than the search range's end key. type overlapScan struct { // The "soft" lower-bound constraint. constrMinN *node diff --git a/pkg/spanconfig/spanconfigstore/span_store.go b/pkg/spanconfig/spanconfigstore/span_store.go index 13ef1a6a7a9d..fe5b6eb3e0a2 100644 --- a/pkg/spanconfig/spanconfigstore/span_store.go +++ b/pkg/spanconfig/spanconfigstore/span_store.go @@ -286,21 +286,21 @@ func (s *spanConfigStore) apply( // overlapping spans in their entirety and re-adding the non-overlapping // segments. Pseudo-code: // -// for entry in store.overlapping(update.span): -// union, intersection = union(update.span, entry), intersection(update.span, entry) -// pre = span{union.start_key, intersection.start_key} -// post = span{intersection.end_key, union.end_key} +// for entry in store.overlapping(update.span): +// union, intersection = union(update.span, entry), intersection(update.span, entry) +// pre = span{union.start_key, intersection.start_key} +// post = span{intersection.end_key, union.end_key} // -// delete {span=entry.span, conf=entry.conf} -// if entry.contains(update.span.start_key): -// # First entry overlapping with update. -// add {span=pre, conf=entry.conf} if non-empty -// if entry.contains(update.span.end_key): -// # Last entry overlapping with update. -// add {span=post, conf=entry.conf} if non-empty +// delete {span=entry.span, conf=entry.conf} +// if entry.contains(update.span.start_key): +// # First entry overlapping with update. +// add {span=pre, conf=entry.conf} if non-empty +// if entry.contains(update.span.end_key): +// # Last entry overlapping with update. +// add {span=post, conf=entry.conf} if non-empty // -// if adding: -// add {span=update.span, conf=update.conf} # add ourselves +// if adding: +// add {span=update.span, conf=update.conf} # add ourselves // // When extending to a set of updates, things are more involved (but only // slightly!). Let's assume that the updates are non-overlapping and sorted @@ -310,9 +310,9 @@ func (s *spanConfigStore) apply( // processing one update at a time in sorted order, we want to only re-add the // gap between the consecutive updates. // -// keyspace a b c d e f g h i j -// existing state [--------X--------) -// updates [--A--) [--B--) +// keyspace a b c d e f g h i j +// existing state [--------X--------) +// updates [--A--) [--B--) // // When processing [a,c):A, after deleting [b,h):X, it would be incorrect to // re-add [c,h):X since we're also looking to apply [g,i):B. Instead of @@ -326,9 +326,9 @@ func (s *spanConfigStore) apply( // want to re-add [c,d):X and carry forward [f,h):X to the update after (i.e. // [g,i):C)). // -// keyspace a b c d e f g h i j -// existing state [--------X--------) -// updates [--A--) [--B--) [--C--) +// keyspace a b c d e f g h i j +// existing state [--------X--------) +// updates [--A--) [--B--) [--C--) // // One final note: we're iterating through the updates without actually applying // any mutations. Going back to our first example, when processing [g,i):B, @@ -339,38 +339,37 @@ func (s *spanConfigStore) apply( // we need to exclude any that overlap with the segment that was carried over. // Pseudo-code: // -// carry-over = -// for update in updates: -// carried-over, carry-over = carry-over, -// if update.overlap(carried-over): -// # Fill in the gap between consecutive updates. -// add {span=span{carried-over.start_key, update.start_key}, conf=carried-over.conf} -// # Consider the trailing span after update; carry it forward if non-empty. -// carry-over = {span=span{update.end_key, carried-over.end_key}, conf=carried-over.conf} -// else: -// add {span=carried-over.span, conf=carried-over.conf} if non-empty +// carry-over = +// for update in updates: +// carried-over, carry-over = carry-over, +// if update.overlap(carried-over): +// # Fill in the gap between consecutive updates. +// add {span=span{carried-over.start_key, update.start_key}, conf=carried-over.conf} +// # Consider the trailing span after update; carry it forward if non-empty. +// carry-over = {span=span{update.end_key, carried-over.end_key}, conf=carried-over.conf} +// else: +// add {span=carried-over.span, conf=carried-over.conf} if non-empty // -// for entry in store.overlapping(update.span): -// if entry.overlap(carried-over): -// continue # already processed +// for entry in store.overlapping(update.span): +// if entry.overlap(carried-over): +// continue # already processed // -// union, intersection = union(update.span, entry), intersection(update.span, entry) -// pre = span{union.start_key, intersection.start_key} -// post = span{intersection.end_key, union.end_key} +// union, intersection = union(update.span, entry), intersection(update.span, entry) +// pre = span{union.start_key, intersection.start_key} +// post = span{intersection.end_key, union.end_key} // -// delete {span=entry.span, conf=entry.conf} -// if entry.contains(update.span.start_key): -// # First entry overlapping with update. -// add {span=pre, conf=entry.conf} if non-empty -// if entry.contains(update.span.end_key): -// # Last entry overlapping with update. -// carry-over = {span=post, conf=entry.conf} +// delete {span=entry.span, conf=entry.conf} +// if entry.contains(update.span.start_key): +// # First entry overlapping with update. +// add {span=pre, conf=entry.conf} if non-empty +// if entry.contains(update.span.end_key): +// # Last entry overlapping with update. +// carry-over = {span=post, conf=entry.conf} // -// if adding: -// add {span=update.span, conf=update.conf} # add ourselves -// -// add {span=carry-over.span, conf=carry-over.conf} if non-empty +// if adding: +// add {span=update.span, conf=update.conf} # add ourselves // +// add {span=carry-over.span, conf=carry-over.conf} if non-empty func (s *spanConfigStore) accumulateOpsFor( ctx context.Context, dryrun bool, updates []spanconfig.Update, ) (toDelete, toAdd []entry, _ error) { diff --git a/pkg/spanconfig/spanconfigstore/store_test.go b/pkg/spanconfig/spanconfigstore/store_test.go index 1949eb71cb8f..6ba3729f1bea 100644 --- a/pkg/spanconfig/spanconfigstore/store_test.go +++ b/pkg/spanconfig/spanconfigstore/store_test.go @@ -65,44 +65,44 @@ func (s *spanConfigStore) TestingSplitKeys(tb testing.TB, start, end roachpb.RKe // TestDataDriven runs datadriven tests against the Store interface. // The syntax is as follows: // -// apply -// delete [a,c) -// set [c,h):X -// set {entire-keyspace}:X -// set {source=1,target=1}:Y -// ---- -// deleted [b,d) -// deleted [e,g) -// added [c,h):X -// added {entire-keyspace}:X -// added {source=1,target=1}:Y +// apply +// delete [a,c) +// set [c,h):X +// set {entire-keyspace}:X +// set {source=1,target=1}:Y +// ---- +// deleted [b,d) +// deleted [e,g) +// added [c,h):X +// added {entire-keyspace}:X +// added {source=1,target=1}:Y // -// get key=b -// ---- -// conf=A # or conf=FALLBACK if the key is not present +// get key=b +// ---- +// conf=A # or conf=FALLBACK if the key is not present // -// needs-split span=[b,h) -// ---- -// true +// needs-split span=[b,h) +// ---- +// true // -// compute-split span=[b,h) -// ---- -// key=c +// compute-split span=[b,h) +// ---- +// key=c // -// split-keys span=[b,h) -// ---- -// key=c +// split-keys span=[b,h) +// ---- +// key=c // -// overlapping span=[b,h) -// ---- -// [b,d):A -// [d,f):B -// [f,h):A +// overlapping span=[b,h) +// ---- +// [b,d):A +// [d,f):B +// [f,h):A // -// interned -// ---- -// A (refs = 2) -// B (refs = 1) +// interned +// ---- +// A (refs = 2) +// B (refs = 1) // // Text of the form [a,b), {entire-keyspace}, {source=1,target=20}, and [a,b):C // correspond to targets {spans, system targets} and span config records; see diff --git a/pkg/spanconfig/spanconfigtestutils/utils.go b/pkg/spanconfig/spanconfigtestutils/utils.go index cf415762bd87..3d4fc0d8114c 100644 --- a/pkg/spanconfig/spanconfigtestutils/utils.go +++ b/pkg/spanconfig/spanconfigtestutils/utils.go @@ -144,13 +144,12 @@ func ParseSpanConfigRecord(t testing.TB, conf string) spanconfig.Record { // kvaccessor-get arguments into the relevant spans. The input is of the // following form: // -// span [a,e) -// span [a,b) -// span [b,c) -// system-target {source=1,target=1} -// system-target {source=20,target=20} -// system-target {source=1,target=20} -// +// span [a,e) +// span [a,b) +// span [b,c) +// system-target {source=1,target=1} +// system-target {source=20,target=20} +// system-target {source=1,target=20} func ParseKVAccessorGetArguments(t testing.TB, input string) []spanconfig.Target { var targets []spanconfig.Target for _, line := range strings.Split(input, "\n") { @@ -183,14 +182,13 @@ func ParseKVAccessorGetArguments(t testing.TB, input string) []spanconfig.Target // kvaccessor-update arguments into the relevant targets and records. The input // is of the following form: // -// delete [c,e) -// upsert [c,d):C -// upsert [d,e):D -// delete {source=1,target=1} -// delete {source=1,target=20} -// upsert {source=1,target=1}:A -// delete {source=1,target=20}:D -// +// delete [c,e) +// upsert [c,d):C +// upsert [d,e):D +// delete {source=1,target=1} +// delete {source=1,target=20} +// upsert {source=1,target=1}:A +// delete {source=1,target=20}:D func ParseKVAccessorUpdateArguments( t testing.TB, input string, ) ([]spanconfig.Target, []spanconfig.Record) { @@ -221,10 +219,9 @@ func ParseKVAccessorUpdateArguments( // ParseStoreApplyArguments is a helper function that parses datadriven // store update arguments. The input is of the following form: // -// delete [c,e) -// set [c,d):C -// set [d,e):D -// +// delete [c,e) +// set [c,d):C +// set [d,e):D func ParseStoreApplyArguments(t testing.TB, input string) (updates []spanconfig.Update) { for _, line := range strings.Split(input, "\n") { line = strings.TrimSpace(line) diff --git a/pkg/sql/alter_primary_key.go b/pkg/sql/alter_primary_key.go index 5f5f1ca1ecdf..4353362f5800 100644 --- a/pkg/sql/alter_primary_key.go +++ b/pkg/sql/alter_primary_key.go @@ -645,13 +645,13 @@ func (p *planner) shouldCreateIndexes( // We only recreate the old primary key of the table as a unique secondary // index if: -// * The table has a primary key (no DROP PRIMARY KEY statements have -// been executed). -// * The primary key is not the default rowid primary key. -// * The new primary key isn't the same set of columns and directions -// other than hash sharding. -// * There is no partitioning change. -// * There is no existing secondary index on the old primary key columns. +// - The table has a primary key (no DROP PRIMARY KEY statements have +// been executed). +// - The primary key is not the default rowid primary key. +// - The new primary key isn't the same set of columns and directions +// other than hash sharding. +// - There is no partitioning change. +// - There is no existing secondary index on the old primary key columns. func shouldCopyPrimaryKey( desc *tabledesc.Mutable, newPK *descpb.IndexDescriptor, diff --git a/pkg/sql/alter_role.go b/pkg/sql/alter_role.go index 6192bd481cf4..3ee44e23330d 100644 --- a/pkg/sql/alter_role.go +++ b/pkg/sql/alter_role.go @@ -554,13 +554,17 @@ func (n *alterRoleSetNode) getRoleName( // returns a newSettings list with any occurrence of varName removed. // // E.g. Suppose there is an existing row in `system.database_role_settings`: -// (24, max, {timezone=America/New_York, use_declarative_schema_changer=off, statement_timeout=10s}) +// +// (24, max, {timezone=America/New_York, use_declarative_schema_changer=off, statement_timeout=10s}) +// // and -// n.varName = 'use_declarative_schema_changer', +// +// n.varName = 'use_declarative_schema_changer', +// // then the return of this function will be -// 1. oldSettings = {timezone=America/New_York, use_declarative_schema_changer=off, statement_timeout=10s} -// 2. newSettings = {timezone=America/New_York, statement_timeout=10s} -// 3. err = nil +// 1. oldSettings = {timezone=America/New_York, use_declarative_schema_changer=off, statement_timeout=10s} +// 2. newSettings = {timezone=America/New_York, statement_timeout=10s} +// 3. err = nil func (n *alterRoleSetNode) makeNewSettings( params runParams, opName string, roleName username.SQLUsername, ) (oldSettings []string, newSettings []string, err error) { diff --git a/pkg/sql/alter_table.go b/pkg/sql/alter_table.go index bd5e02e59906..c66621f3d35b 100644 --- a/pkg/sql/alter_table.go +++ b/pkg/sql/alter_table.go @@ -69,8 +69,9 @@ type alterTableNode struct { // AlterTable applies a schema change on a table. // Privileges: CREATE on table. -// notes: postgres requires CREATE on the table. -// mysql requires ALTER, CREATE, INSERT on the table. +// +// notes: postgres requires CREATE on the table. +// mysql requires ALTER, CREATE, INSERT on the table. func (p *planner) AlterTable(ctx context.Context, n *tree.AlterTable) (planNode, error) { if err := checkSchemaChangeEnabled( ctx, diff --git a/pkg/sql/backfill.go b/pkg/sql/backfill.go index 2b63ee87932b..c4bb02960769 100644 --- a/pkg/sql/backfill.go +++ b/pkg/sql/backfill.go @@ -1990,7 +1990,6 @@ func countIndexRowsAndMaybeCheckUniqueness( // backfillIndexes fills the missing columns in the indexes of the // leased tables. // -// // If temporaryIndexes is non-empty, we assume that we are using the // MVCC-compatible backfilling process. This mutation has already been // checked to ensure all newly added indexes are using one type of @@ -2010,58 +2009,72 @@ func countIndexRowsAndMaybeCheckUniqueness( // Finally, the new index is brought into the DELETE_AND_WRITE_ONLY // state for validation. // -// ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────┐ -// │ │ │ │ │ │ -// │ PrimaryIndex │ │ NewIndex │ │ TempIndex │ +// ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────┐ +// │ │ │ │ │ │ +// │ PrimaryIndex │ │ NewIndex │ │ TempIndex │ +// // t0 │ (PUBLIC) │ │ (BACKFILLING) │ │ (DELETE_ONLY) │ -// │ │ │ │ │ │ -// └─────────────────┘ └─────────────────┘ └────────┬────────┘ -// │ -// ┌────────▼────────┐ -// │ │ -// │ TempIndex │ +// +// │ │ │ │ │ │ +// └─────────────────┘ └─────────────────┘ └────────┬────────┘ +// │ +// ┌────────▼────────┐ +// │ │ +// │ TempIndex │ +// // t1 │(DELETE_AND_WRITE) │ -// │ │ │ -// └────────┬────────┘ │ -// │ │ -// ┌─────────────────┐ ┌─────────────────┐ ┌────────▼────────┐ │ TempIndex receiving writes -// │ │ │ │ │ │ │ -// │ PrimaryIndex ├────────►│ NewIndex │ │ TempIndex │ │ +// +// │ │ │ +// └────────┬────────┘ │ +// │ │ +// ┌─────────────────┐ ┌─────────────────┐ ┌────────▼────────┐ │ TempIndex receiving writes +// │ │ │ │ │ │ │ +// │ PrimaryIndex ├────────►│ NewIndex │ │ TempIndex │ │ +// // t2 │ (PUBLIC) │ Backfill│ (BACKFILLING) │ │(DELETE_AND_WRITE│ │ -// │ │ │ │ │ │ │ -// └─────────────────┘ └────────┬────────┘ └─────────────────┘ │ -// │ │ -// ┌────────▼────────┐ │ -// │ │ │ -// │ NewIndex │ │ +// +// │ │ │ │ │ │ │ +// └─────────────────┘ └────────┬────────┘ └─────────────────┘ │ +// │ │ +// ┌────────▼────────┐ │ +// │ │ │ +// │ NewIndex │ │ +// // t3 │ (DELETE_ONLY) │ │ -// │ │ │ -// └────────┬────────┘ │ -// │ │ -// ┌────────▼────────┐ │ -// │ │ │ -// │ NewIndex │ │ │ -// │ (MERGING) │ │ │ +// +// │ │ │ +// └────────┬────────┘ │ +// │ │ +// ┌────────▼────────┐ │ +// │ │ │ +// │ NewIndex │ │ │ +// │ (MERGING) │ │ │ +// // t4 │ │ │ │ NewIndex receiving writes -// └─────────────────┘ │ │ -// │ │ -// ┌─────────────────┐ ┌─────────────────┐ │ │ -// │ │ │ │ │ │ -// │ NewIndex │◄────────────┤ TempIndex │ │ │ +// +// └─────────────────┘ │ │ +// │ │ +// ┌─────────────────┐ ┌─────────────────┐ │ │ +// │ │ │ │ │ │ +// │ NewIndex │◄────────────┤ TempIndex │ │ │ +// // t5 │ (MERGING) │ BatchMerge │(DELETE_AND_WRITE│ │ │ -// │ │ │ │ │ │ -// └────────┬────────┘ └───────┬─────────┘ │ │ -// │ │ │ │ -// ┌────────▼────────┐ ┌───────▼─────────┐ │ │ -// │ │ │ │ │ │ -// │ NewIndex │ │ TempIndex │ │ +// +// │ │ │ │ │ │ +// └────────┬────────┘ └───────┬─────────┘ │ │ +// │ │ │ │ +// ┌────────▼────────┐ ┌───────▼─────────┐ │ │ +// │ │ │ │ │ │ +// │ NewIndex │ │ TempIndex │ │ +// // t6 │(DELETE_AND_WRITE) │ (DELETE_ONLY) │ │ -// │ │ │ │ │ -// └───────┬─────────┘ └───────┬─────────┘ │ -// │ │ -// │ │ -// ▼ ▼ -// [validate and make public] [ dropped ] +// +// │ │ │ │ │ +// └───────┬─────────┘ └───────┬─────────┘ │ +// │ │ +// │ │ +// ▼ ▼ +// [validate and make public] [ dropped ] // // This operates over multiple goroutines concurrently and is thus not // able to reuse the original kv.Txn safely. diff --git a/pkg/sql/catalog/catalogkeys/keys.go b/pkg/sql/catalog/catalogkeys/keys.go index 4cf863859671..43de53126393 100644 --- a/pkg/sql/catalog/catalogkeys/keys.go +++ b/pkg/sql/catalog/catalogkeys/keys.go @@ -77,7 +77,7 @@ func IndexKeyValDirs(index catalog.Index) []encoding.Direction { // PrettyKey pretty-prints the specified key, skipping over the first `skip` // fields. The pretty printed key looks like: // -// /Table///... +// /Table///... // // We always strip off the /Table prefix and then `skip` more fields. Note that // this assumes that the fields themselves do not contain '/', but that is diff --git a/pkg/sql/catalog/catformat/index.go b/pkg/sql/catalog/catformat/index.go index af83096ca9d4..22e1c65856bd 100644 --- a/pkg/sql/catalog/catformat/index.go +++ b/pkg/sql/catalog/catformat/index.go @@ -45,12 +45,11 @@ const ( // If tableName is anonymous then no table name is included in the formatted // string. For example: // -// INDEX i (a) WHERE b > 0 +// INDEX i (a) WHERE b > 0 // // If tableName is not anonymous, then "ON" and the name is included: // -// INDEX i ON t (a) WHERE b > 0 -// +// INDEX i ON t (a) WHERE b > 0 func IndexForDisplay( ctx context.Context, table catalog.TableDescriptor, diff --git a/pkg/sql/catalog/catpb/default_privilege.go b/pkg/sql/catalog/catpb/default_privilege.go index 1748c9df93ce..98410f3ec8f9 100644 --- a/pkg/sql/catalog/catpb/default_privilege.go +++ b/pkg/sql/catalog/catpb/default_privilege.go @@ -21,8 +21,9 @@ import ( // DefaultPrivilegesRole represents the creator role that the default privileges // are being altered for. // Either: -// role should be populated -// forAllRoles should be true. +// +// role should be populated +// forAllRoles should be true. type DefaultPrivilegesRole struct { Role username.SQLUsername ForAllRoles bool diff --git a/pkg/sql/catalog/catpb/privilege.go b/pkg/sql/catalog/catpb/privilege.go index e13cb62043db..b933b66dc00e 100644 --- a/pkg/sql/catalog/catpb/privilege.go +++ b/pkg/sql/catalog/catpb/privilege.go @@ -24,6 +24,7 @@ import ( ) // PrivilegeDescVersion is a custom type for PrivilegeDescriptor versions. +// //go:generate stringer -type=PrivilegeDescVersion type PrivilegeDescVersion uint32 diff --git a/pkg/sql/catalog/colinfo/ordering.go b/pkg/sql/catalog/colinfo/ordering.go index cfac6cf73937..830b135acb3f 100644 --- a/pkg/sql/catalog/colinfo/ordering.go +++ b/pkg/sql/catalog/colinfo/ordering.go @@ -26,7 +26,9 @@ type ColumnOrderInfo struct { } // ColumnOrdering is used to describe a desired column ordering. For example, -// []ColumnOrderInfo{ {3, encoding.Descending}, {1, encoding.Ascending} } +// +// []ColumnOrderInfo{ {3, encoding.Descending}, {1, encoding.Ascending} } +// // represents an ordering first by column 3 (descending), then by column 1 (ascending). type ColumnOrdering []ColumnOrderInfo @@ -67,9 +69,9 @@ func (ordering ColumnOrdering) String(columns ResultColumns) string { var NoOrdering ColumnOrdering // CompareDatums compares two datum rows according to a column ordering. Returns: -// - 0 if lhs and rhs are equal on the ordering columns; -// - less than 0 if lhs comes first; -// - greater than 0 if rhs comes first. +// - 0 if lhs and rhs are equal on the ordering columns; +// - less than 0 if lhs comes first; +// - greater than 0 if rhs comes first. func CompareDatums(ordering ColumnOrdering, evalCtx *eval.Context, lhs, rhs tree.Datums) int { for _, c := range ordering { // TODO(pmattis): This is assuming that the datum types are compatible. I'm diff --git a/pkg/sql/catalog/descpb/structured.go b/pkg/sql/catalog/descpb/structured.go index 76a1b5051f2f..adf39be897ba 100644 --- a/pkg/sql/catalog/descpb/structured.go +++ b/pkg/sql/catalog/descpb/structured.go @@ -48,6 +48,7 @@ func (ids IDs) Contains(targetID ID) bool { // FormatVersion is a custom type for TableDescriptor versions of the sql to // key:value mapping. +// //go:generate stringer -type=FormatVersion type FormatVersion uint32 diff --git a/pkg/sql/catalog/descs/collection_test.go b/pkg/sql/catalog/descs/collection_test.go index 820abaa89600..75615e2e9724 100644 --- a/pkg/sql/catalog/descs/collection_test.go +++ b/pkg/sql/catalog/descs/collection_test.go @@ -621,14 +621,14 @@ func TestCollectionPreservesPostDeserializationChanges(t *testing.T) { // since it reads all descriptors from storage, which can be huge. // // The testing strategy is to -// 1. Create tables that are very large into the database (so that when we read them -// into memory later with Collection, a lot of memory will be allocated and used). -// 2. Hook up a monitor with infinite budget to this Collection and invoke method -// so that this Collection reads all the descriptors into memory. With an unlimited -// monitor, this should succeed without error. -// 3. Change the monitor budget to something small. Repeat step 2 and expect an error -// being thrown out when reading all those descriptors into memory to validate the -// memory monitor indeed kicked in and had an effect. +// 1. Create tables that are very large into the database (so that when we read them +// into memory later with Collection, a lot of memory will be allocated and used). +// 2. Hook up a monitor with infinite budget to this Collection and invoke method +// so that this Collection reads all the descriptors into memory. With an unlimited +// monitor, this should succeed without error. +// 3. Change the monitor budget to something small. Repeat step 2 and expect an error +// being thrown out when reading all those descriptors into memory to validate the +// memory monitor indeed kicked in and had an effect. func TestCollectionProperlyUsesMemoryMonitoring(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) diff --git a/pkg/sql/catalog/descs/txn_with_executor_datadriven_test.go b/pkg/sql/catalog/descs/txn_with_executor_datadriven_test.go index 4fb235edc079..3a7748368f24 100644 --- a/pkg/sql/catalog/descs/txn_with_executor_datadriven_test.go +++ b/pkg/sql/catalog/descs/txn_with_executor_datadriven_test.go @@ -39,9 +39,10 @@ import ( // // The commands are exec and query which take statements as input. // The args are: -// db: db name -// search_path: csv of strings, optional -// error: expected error, optional +// +// db: db name +// search_path: csv of strings, optional +// error: expected error, optional func TestTxnWithExecutorDataDriven(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) diff --git a/pkg/sql/catalog/internal/catkv/stored_catalog.go b/pkg/sql/catalog/internal/catkv/stored_catalog.go index 1586052dd984..af5886aa46f8 100644 --- a/pkg/sql/catalog/internal/catkv/stored_catalog.go +++ b/pkg/sql/catalog/internal/catkv/stored_catalog.go @@ -251,11 +251,10 @@ func (sc *StoredCatalog) IsIDKnownToNotExist(id descpb.ID) bool { // Descriptors are physically keyed by ID, so we need to resolve their ID by // querying the system.namespace table first, which is what this method does. // We can avoid having to do this in some special cases: -// - When the descriptor name and ID are hard-coded. This is the case for the -// system database and for the tables in it. -// - When we're looking up a schema for which we already have the descriptor -// of the parent database. The schema ID can be looked up in it. -// +// - When the descriptor name and ID are hard-coded. This is the case for the +// system database and for the tables in it. +// - When we're looking up a schema for which we already have the descriptor +// of the parent database. The schema ID can be looked up in it. func (sc *StoredCatalog) LookupDescriptorID( ctx context.Context, txn *kv.Txn, parentID, parentSchemaID descpb.ID, name string, ) (descpb.ID, error) { @@ -278,11 +277,10 @@ func (sc *StoredCatalog) LookupDescriptorID( // GetByName reads a descriptor from the storage layer by name. // // This is a three-step process: -// 1. resolve the descriptor's ID using the name information, -// 2. actually read the descriptor from storage, -// 3. check that the name in the descriptor is the one we expect; meaning that -// there is no RENAME underway for instance. -// +// 1. resolve the descriptor's ID using the name information, +// 2. actually read the descriptor from storage, +// 3. check that the name in the descriptor is the one we expect; meaning that +// there is no RENAME underway for instance. func (sc *StoredCatalog) GetByName( ctx context.Context, txn *kv.Txn, parentID descpb.ID, parentSchemaID descpb.ID, name string, ) (catalog.Descriptor, error) { diff --git a/pkg/sql/catalog/lease/lease.go b/pkg/sql/catalog/lease/lease.go index b55a25842991..f77879119920 100644 --- a/pkg/sql/catalog/lease/lease.go +++ b/pkg/sql/catalog/lease/lease.go @@ -215,7 +215,9 @@ type historicalDescriptor struct { // // In the following scenario v4 is our oldest active lease // [v1@t1 ][v2@t3 ][v3@t5 ][v4@t7 -// [start end] +// +// [start end] +// // getDescriptorsFromStoreForInterval(..., start, end) will get back: // [v3, v2] (reverse order) // @@ -331,10 +333,10 @@ func getDescriptorsFromStoreForInterval( // descriptor version we are interested in, resulting at most 2 KV calls. // // TODO(vivek, james): Future work: -// 1. Translate multiple simultaneous calls to this method into a single call -// as is done for acquireNodeLease(). -// 2. Figure out a sane policy on when these descriptors should be purged. -// They are currently purged in PurgeOldVersions. +// 1. Translate multiple simultaneous calls to this method into a single call +// as is done for acquireNodeLease(). +// 2. Figure out a sane policy on when these descriptors should be purged. +// They are currently purged in PurgeOldVersions. func (m *Manager) readOlderVersionForTimestamp( ctx context.Context, id descpb.ID, timestamp hlc.Timestamp, ) ([]historicalDescriptor, error) { diff --git a/pkg/sql/catalog/lease/lease_internal_test.go b/pkg/sql/catalog/lease/lease_internal_test.go index ada1cf31e76b..aaa68a39a114 100644 --- a/pkg/sql/catalog/lease/lease_internal_test.go +++ b/pkg/sql/catalog/lease/lease_internal_test.go @@ -815,21 +815,21 @@ CREATE TABLE t.test (k CHAR PRIMARY KEY, v CHAR); // case through descriptorState.acquire(), the second through // descriptorState.acquireFreshestFromStore()) and a release of the lease that was // just acquired. Precisely: -// 1. Thread 1 calls either acquireFreshestFromStore() or acquire(). -// 2. Thread 1 releases the lock on descriptorState and starts acquisition of a lease -// from the store, blocking until it's finished. -// 3. Thread 2 calls acquire(). The lease has not been acquired yet, so it -// also enters the acquisition code path (calling DoChan). -// 4. Thread 2 proceeds to release the lock on descriptorState waiting for the -// in-flight acquisition. -// 4. The lease is acquired from the store and the waiting routines are -// unblocked. -// 5. Thread 2 unblocks first, and releases the new lease, for whatever reason. -// 5. Thread 1 wakes up. At this point, a naive implementation would use the -// newly acquired lease, which would be incorrect. The test checks that -// acquireFreshestFromStore() or acquire() notices, after re-acquiring the -// descriptorState lock, that the new lease has been released and acquires a new -// one. +// 1. Thread 1 calls either acquireFreshestFromStore() or acquire(). +// 2. Thread 1 releases the lock on descriptorState and starts acquisition of a lease +// from the store, blocking until it's finished. +// 3. Thread 2 calls acquire(). The lease has not been acquired yet, so it +// also enters the acquisition code path (calling DoChan). +// 4. Thread 2 proceeds to release the lock on descriptorState waiting for the +// in-flight acquisition. +// 4. The lease is acquired from the store and the waiting routines are +// unblocked. +// 5. Thread 2 unblocks first, and releases the new lease, for whatever reason. +// 5. Thread 1 wakes up. At this point, a naive implementation would use the +// newly acquired lease, which would be incorrect. The test checks that +// acquireFreshestFromStore() or acquire() notices, after re-acquiring the +// descriptorState lock, that the new lease has been released and acquires a new +// one. func TestLeaseAcquireAndReleaseConcurrently(t *testing.T) { defer leaktest.AfterTest(t)() diff --git a/pkg/sql/catalog/multiregion/region_config.go b/pkg/sql/catalog/multiregion/region_config.go index 301fbfdc0550..fb52eba31dbf 100644 --- a/pkg/sql/catalog/multiregion/region_config.go +++ b/pkg/sql/catalog/multiregion/region_config.go @@ -387,10 +387,10 @@ func ValidateRegionConfig(config RegionConfig) error { } // ValidateSuperRegions validates that: -// 1. Region names are unique within a super region and are sorted. -// 2. All region within a super region map to a region on the RegionConfig. -// 3. Super region names are unique. -// 4. Each region can only belong to one super region. +// 1. Region names are unique within a super region and are sorted. +// 2. All region within a super region map to a region on the RegionConfig. +// 3. Super region names are unique. +// 4. Each region can only belong to one super region. func ValidateSuperRegions( superRegions []descpb.SuperRegion, survivalGoal descpb.SurvivalGoal, @@ -468,10 +468,10 @@ func ValidateSuperRegions( // ValidateZoneConfigExtensions validates that zone configuration extensions are // coherent with the rest of the multi-region configuration. It validates that: -// 1. All per-region extensions map to a region on the RegionConfig. -// 2. TODO(nvanbenschoten): add more zone config extension validation in the -// future to ensure zone config extensions do not subvert other portions -// of the multi-region config (e.g. like breaking REGION survivability). +// 1. All per-region extensions map to a region on the RegionConfig. +// 2. TODO(nvanbenschoten): add more zone config extension validation in the +// future to ensure zone config extensions do not subvert other portions +// of the multi-region config (e.g. like breaking REGION survivability). func ValidateZoneConfigExtensions( regionNames catpb.RegionNames, zoneCfgExtensions descpb.ZoneConfigExtensions, diff --git a/pkg/sql/catalog/nstree/map_test.go b/pkg/sql/catalog/nstree/map_test.go index e031064f2833..1344b53fbd8a 100644 --- a/pkg/sql/catalog/nstree/map_test.go +++ b/pkg/sql/catalog/nstree/map_test.go @@ -26,32 +26,27 @@ import ( // TestNameMapDataDriven tests the NameMap using a data-driven // exposition format. The tests support the following commands: // -// add [parent-id=...] [parent-schema-id=...] name=... id=... -// Calls the add method with an entry matching the spec. -// Prints the entry. +// add [parent-id=...] [parent-schema-id=...] name=... id=... +// Calls the add method with an entry matching the spec. +// Prints the entry. // -// remove id=... -// Calls the Remove method on the specified id. -// Prints whether it was removed. +// remove id=... +// Calls the Remove method on the specified id. +// Prints whether it was removed. // -// iterate-by-id [stop-after=] -// Iterates and prints the entries, ordered by ID. -// If stop-after is specified, after that many entries have been -// iterated, then an error will be returned. If there is an input, -// it will be used as the error message, otherwise, the error will -// be iterutil.StopIteration. +// iterate-by-id [stop-after=] +// Iterates and prints the entries, ordered by ID. +// If stop-after is specified, after that many entries have been +// iterated, then an error will be returned. If there is an input, +// it will be used as the error message, otherwise, the error will +// be iterutil.StopIteration. // -// clear -// Clears the tree. -// -// get-by-id id=... -// Gets the entry with the given ID and prints its entry. -// If no such entry exists, "not found" will be printed. -// -// get-by-name [parent-id=...] [parent-schema-id=...] name=... -// Gets the entry with the given name and prints its entry. -// If no such entry exists, "not found" will be printed. +// clear +// Clears the tree. // +// get-by-id id=... +// Gets the entry with the given ID and prints its entry. +// If no such entry exists, "not found" will be printed. func TestNameMapDataDriven(t *testing.T) { datadriven.Walk(t, testutils.TestDataPath(t, "name_map"), func(t *testing.T, path string) { var nm NameMap diff --git a/pkg/sql/catalog/nstree/set_test.go b/pkg/sql/catalog/nstree/set_test.go index f078ad391c67..61cb2b67da32 100644 --- a/pkg/sql/catalog/nstree/set_test.go +++ b/pkg/sql/catalog/nstree/set_test.go @@ -24,17 +24,16 @@ import ( // TestSetDataDriven tests the Set using a data-driven // exposition format. The tests support the following commands: // -// add [parent-id=...] [parent-schema-id=...] name=... -// Calls the add method with an entry matching the spec. -// Prints the entry. +// add [parent-id=...] [parent-schema-id=...] name=... +// Calls the add method with an entry matching the spec. +// Prints the entry. // -// contains [parent-id=...] [parent-schema-id=...] name=... -// Calls the Remove method on the specified id. -// Prints whether it is contained removed. -// -// clear -// Clears the tree. +// contains [parent-id=...] [parent-schema-id=...] name=... +// Calls the Remove method on the specified id. +// Prints whether it is contained removed. // +// clear +// Clears the tree. func TestSetDataDriven(t *testing.T) { datadriven.Walk(t, testutils.TestDataPath(t, "set"), func(t *testing.T, path string) { var tr Set diff --git a/pkg/sql/catalog/resolver/resolver.go b/pkg/sql/catalog/resolver/resolver.go index c89479fb81be..de706f188635 100644 --- a/pkg/sql/catalog/resolver/resolver.go +++ b/pkg/sql/catalog/resolver/resolver.go @@ -295,7 +295,8 @@ func ResolveTargetObject( // ResolveSchemaNameByID resolves a schema's name based on db and schema id. // Instead, we have to rely on a scan of the kv table. // TODO (SQLSchema): The remaining uses of this should be plumbed through -// the desc.Collection's ResolveSchemaByID. +// +// the desc.Collection's ResolveSchemaByID. func ResolveSchemaNameByID( ctx context.Context, txn *kv.Txn, @@ -327,7 +328,7 @@ type SchemaEntryForDB struct { // GetForDatabase looks up and returns all available // schema ids to SchemaEntryForDB structures for a -//given database. +// given database. func GetForDatabase( ctx context.Context, txn *kv.Txn, codec keys.SQLCodec, db catalog.DatabaseDescriptor, ) (map[descpb.ID]SchemaEntryForDB, error) { diff --git a/pkg/sql/catalog/schemaexpr/check_constraint.go b/pkg/sql/catalog/schemaexpr/check_constraint.go index 62b3da2479d9..6f975c5150e3 100644 --- a/pkg/sql/catalog/schemaexpr/check_constraint.go +++ b/pkg/sql/catalog/schemaexpr/check_constraint.go @@ -154,9 +154,9 @@ func (b *CheckConstraintBuilder) generateUniqueName(expr tree.Expr) (string, err // // For example: // -// CHECK (a < 0) => check_a -// CHECK (a < 0 AND b = 'foo') => check_a_b -// CHECK (a < 0 AND b = 'foo' AND a < 10) => check_a_b_a +// CHECK (a < 0) => check_a +// CHECK (a < 0 AND b = 'foo') => check_a_b +// CHECK (a < 0 AND b = 'foo' AND a < 10) => check_a_b_a // // Note that the generated name is not guaranteed to be unique among the other // constraints of the table. diff --git a/pkg/sql/catalog/schemaexpr/column.go b/pkg/sql/catalog/schemaexpr/column.go index 7fa7061a93aa..60e971b74845 100644 --- a/pkg/sql/catalog/schemaexpr/column.go +++ b/pkg/sql/catalog/schemaexpr/column.go @@ -30,9 +30,9 @@ import ( // // For example: // -// tab.a > 0 AND db.tab.b = 'foo' -// => -// a > 0 AND b = 'foo' +// tab.a > 0 AND db.tab.b = 'foo' +// => +// a > 0 AND b = 'foo' // // This dequalification is necessary when CHECK constraints, computed columns, // or partial index predicates are created. If the table name was not stripped, diff --git a/pkg/sql/catalog/schemaexpr/computed_column_rewrites.go b/pkg/sql/catalog/schemaexpr/computed_column_rewrites.go index ae48e3a992ed..77532539289a 100644 --- a/pkg/sql/catalog/schemaexpr/computed_column_rewrites.go +++ b/pkg/sql/catalog/schemaexpr/computed_column_rewrites.go @@ -26,7 +26,7 @@ type ComputedColumnRewritesMap map[string]tree.Expr // ParseComputedColumnRewrites parses a string of the form: // -// (before expression) -> (after expression) [, (before expression) -> (after expression) ...] +// (before expression) -> (after expression) [, (before expression) -> (after expression) ...] // // into a ComputedColumnRewritesMap. // diff --git a/pkg/sql/catalog/schemaexpr/hash_sharded_compute_expr.go b/pkg/sql/catalog/schemaexpr/hash_sharded_compute_expr.go index 00bcd05e6b7d..88a8e2b0e748 100644 --- a/pkg/sql/catalog/schemaexpr/hash_sharded_compute_expr.go +++ b/pkg/sql/catalog/schemaexpr/hash_sharded_compute_expr.go @@ -16,8 +16,7 @@ import "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" // column based on the column names and the number of buckets. The expression will be // of the form: // -// mod(fnv32(crdb_internal.datums_to_bytes(...)),buckets) -// +// mod(fnv32(crdb_internal.datums_to_bytes(...)),buckets) func MakeHashShardComputeExpr(colNames []string, buckets int) *string { unresolvedFunc := func(funcName string) tree.ResolvableFunctionReference { return tree.ResolvableFunctionReference{ diff --git a/pkg/sql/catalog/schemaexpr/partial_index.go b/pkg/sql/catalog/schemaexpr/partial_index.go index 3689d571d448..5e6e19daf63d 100644 --- a/pkg/sql/catalog/schemaexpr/partial_index.go +++ b/pkg/sql/catalog/schemaexpr/partial_index.go @@ -39,7 +39,6 @@ import ( // functions. // - It does not reference a column which is in the process of being added // or removed. -// func ValidatePartialIndexPredicate( ctx context.Context, desc catalog.MutableTableDescriptor, diff --git a/pkg/sql/catalog/schemaexpr/unique_contraint.go b/pkg/sql/catalog/schemaexpr/unique_contraint.go index 4d707b2ac5e8..af27e7ab1900 100644 --- a/pkg/sql/catalog/schemaexpr/unique_contraint.go +++ b/pkg/sql/catalog/schemaexpr/unique_contraint.go @@ -30,7 +30,6 @@ import ( // - It does not include subqueries. // - It does not include non-immutable, aggregate, window, or set returning // functions. -// func ValidateUniqueWithoutIndexPredicate( ctx context.Context, tn tree.TableName, diff --git a/pkg/sql/catalog/seqexpr/sequence.go b/pkg/sql/catalog/seqexpr/sequence.go index 2c79b4afca13..f72fd9f35489 100644 --- a/pkg/sql/catalog/seqexpr/sequence.go +++ b/pkg/sql/catalog/seqexpr/sequence.go @@ -304,27 +304,35 @@ func seqNameToIDMappingInExpr( // one that matches all parts of `targetTableName`, if that part exists // in both names. // Example 1: -// allTableNamesByID = {23 : 'db.sc1.t', 25 : 'db.sc2.t'} -// tableName = 'sc2.t' -// return = 25 (because `db.sc2.t` best-matches `sc2.t`) +// +// allTableNamesByID = {23 : 'db.sc1.t', 25 : 'db.sc2.t'} +// tableName = 'sc2.t' +// return = 25 (because `db.sc2.t` best-matches `sc2.t`) +// // Example 2: -// allTableNamesByID = {23 : 'db.sc1.t', 25 : 'sc2.t'} -// tableName = 'sc2.t' -// return = 25 (because `sc2.t` best-matches `sc2.t`) +// +// allTableNamesByID = {23 : 'db.sc1.t', 25 : 'sc2.t'} +// tableName = 'sc2.t' +// return = 25 (because `sc2.t` best-matches `sc2.t`) +// // Example 3: -// allTableNamesByID = {23 : 'db.sc1.t', 25 : 'sc2.t'} -// tableName = 'db.sc2.t' -// return = 25 (because `sc2.t` best-matches `db.sc2.t`) +// +// allTableNamesByID = {23 : 'db.sc1.t', 25 : 'sc2.t'} +// tableName = 'db.sc2.t' +// return = 25 (because `sc2.t` best-matches `db.sc2.t`) // // Example 4: -// allTableNamesByID = {23 : 'sc1.t', 25 : 'sc2.t'} +// +// allTableNamesByID = {23 : 'sc1.t', 25 : 'sc2.t'} // tableName = 't' // return = non-nil error (because both 'sc1.t' and 'sc2.t' are equally good matches // for 't' and we cannot decide, i.e., >1 valid candidates left.) +// // Example 5: -// allTableNamesByID = {23 : 'sc1.t', 25 : 'sc2.t'} -// tableName = 't2' -// return = non-nil error (because neither 'sc1.t' nor 'sc2.t' matches 't2', that is, 0 valid candidate left) +// +// allTableNamesByID = {23 : 'sc1.t', 25 : 'sc2.t'} +// tableName = 't2' +// return = non-nil error (because neither 'sc1.t' nor 'sc2.t' matches 't2', that is, 0 valid candidate left) func findUniqueBestMatchingForTableName( allTableNamesByID map[descpb.ID]*tree.TableName, targetTableName tree.TableName, ) (match descpb.ID, err error) { diff --git a/pkg/sql/catalog/table_col_map.go b/pkg/sql/catalog/table_col_map.go index 29e723bc3ff8..0651454f5496 100644 --- a/pkg/sql/catalog/table_col_map.go +++ b/pkg/sql/catalog/table_col_map.go @@ -95,7 +95,9 @@ func (s *TableColMap) ForEach(f func(colID descpb.ColumnID, returnIndex int)) { } // String prints out the contents of the map in the following format: -// map[key1:val1 key2:val2 ...] +// +// map[key1:val1 key2:val2 ...] +// // The keys are in ascending order. func (s *TableColMap) String() string { var buf bytes.Buffer diff --git a/pkg/sql/catalog/tabledesc/index.go b/pkg/sql/catalog/tabledesc/index.go index 36c41982b78c..aece5c9b65e5 100644 --- a/pkg/sql/catalog/tabledesc/index.go +++ b/pkg/sql/catalog/tabledesc/index.go @@ -395,10 +395,9 @@ func (w index) UseDeletePreservingEncoding() bool { // ForcePut returns true if writes to the index should only use Put (rather than // CPut or InitPut). This is used by: // -// * indexes currently being built by the MVCC-compliant index backfiller, and -// * the temporary indexes that support that process, and -// * old primary indexes which are being dropped. -// +// - indexes currently being built by the MVCC-compliant index backfiller, and +// - the temporary indexes that support that process, and +// - old primary indexes which are being dropped. func (w index) ForcePut() bool { return w.Merging() || w.desc.UseDeletePreservingEncoding || w.Dropped() && w.IsUnique() && w.GetEncodingType() == descpb.PrimaryIndexEncoding diff --git a/pkg/sql/catalog/tabledesc/structured.go b/pkg/sql/catalog/tabledesc/structured.go index 159f93d93ec1..65b7c706085b 100644 --- a/pkg/sql/catalog/tabledesc/structured.go +++ b/pkg/sql/catalog/tabledesc/structured.go @@ -129,15 +129,14 @@ func (desc *wrapper) IndexKeysPerRow(idx catalog.Index) int { // of tableDesc's indexes, roughly following Postgres's conventions for naming // anonymous indexes. For example: // -// CREATE INDEX ON t (a) -// => t_a_idx +// CREATE INDEX ON t (a) +// => t_a_idx // -// CREATE UNIQUE INDEX ON t (a, b) -// => t_a_b_key -// -// CREATE INDEX ON t ((a + b), c, lower(d)) -// => t_expr_c_expr1_idx +// CREATE UNIQUE INDEX ON t (a, b) +// => t_a_b_key // +// CREATE INDEX ON t ((a + b), c, lower(d)) +// => t_expr_c_expr1_idx func BuildIndexName(tableDesc *Mutable, idx *descpb.IndexDescriptor) (string, error) { // An index name has a segment for the table name, each key column, and a // final word (either "idx" or "key"). @@ -953,7 +952,7 @@ func (desc *Mutable) allocateColumnFamilyIDs(columnNames map[string]descpb.Colum // should be put in a new family. // // Current heuristics: -// - Put all columns in family 0. +// - Put all columns in family 0. func fitColumnToFamily(desc *Mutable, col descpb.ColumnDescriptor) (int, bool) { // Fewer column families means fewer kv entries, which is generally faster. // On the other hand, an update to any column in a family requires that they @@ -1609,12 +1608,17 @@ func (desc *wrapper) IsPrimaryIndexDefaultRowID() bool { // MakeMutationComplete updates the descriptor upon completion of a mutation. // There are three Validity types for the mutations: // Validated - The constraint has already been added and validated, should -// never be the case for a validated constraint to enter this -// method. +// +// never be the case for a validated constraint to enter this +// method. +// // Validating - The constraint has already been added, and just needs to be -// marked as validated. +// +// marked as validated. +// // Unvalidated - The constraint has not yet been added, and needs to be added -// for the first time. +// +// for the first time. func (desc *Mutable) MakeMutationComplete(m descpb.DescriptorMutation) error { switch m.Direction { case descpb.DescriptorMutation_ADD: diff --git a/pkg/sql/catalog/tabledesc/table_desc.go b/pkg/sql/catalog/tabledesc/table_desc.go index e264ab58234f..129846e81fbe 100644 --- a/pkg/sql/catalog/tabledesc/table_desc.go +++ b/pkg/sql/catalog/tabledesc/table_desc.go @@ -300,10 +300,10 @@ func (desc *wrapper) ActiveIndexes() []catalog.Index { // NonDropIndexes returns a slice of all non-drop indexes in the underlying // proto, in their canonical order. This means: -// - the primary index, if the table is a physical table, -// - the public non-primary indexes in the Indexes array, in order, -// - the non-public indexes present in the Mutations array, in order, -// if the mutation is not a drop. +// - the primary index, if the table is a physical table, +// - the public non-primary indexes in the Indexes array, in order, +// - the non-public indexes present in the Mutations array, in order, +// if the mutation is not a drop. // // See also catalog.Index.Ordinal(). func (desc *wrapper) NonDropIndexes() []catalog.Index { diff --git a/pkg/sql/check.go b/pkg/sql/check.go index 3e6cfff6118f..200c2214260f 100644 --- a/pkg/sql/check.go +++ b/pkg/sql/check.go @@ -86,7 +86,9 @@ func validateCheckExpr( // // SELECT s.a_id, s.b_id, s.pk1, s.pk2 FROM child@c_idx // WHERE -// (a_id IS NULL OR b_id IS NULL) AND (a_id IS NOT NULL OR b_id IS NOT NULL) +// +// (a_id IS NULL OR b_id IS NULL) AND (a_id IS NOT NULL OR b_id IS NOT NULL) +// // LIMIT 1; func matchFullUnacceptableKeyQuery( srcTbl catalog.TableDescriptor, fk *descpb.ForeignKeyConstraint, limitResults bool, @@ -150,12 +152,14 @@ func matchFullUnacceptableKeyQuery( // query: // // SELECT s.a_id, s.b_id, s.rowid -// FROM ( -// SELECT a_id, b_id, rowid -// FROM [ AS src]@{IGNORE_FOREIGN_KEYS} -// WHERE a_id IS NOT NULL AND b_id IS NOT NULL -// ) AS s -// LEFT JOIN [ AS target] AS t ON s.a_id = t.a AND s.b_id = t.b +// +// FROM ( +// SELECT a_id, b_id, rowid +// FROM [ AS src]@{IGNORE_FOREIGN_KEYS} +// WHERE a_id IS NOT NULL AND b_id IS NOT NULL +// ) AS s +// LEFT JOIN [ AS target] AS t ON s.a_id = t.a AND s.b_id = t.b +// // WHERE t.a IS NULL // LIMIT 1 -- if limitResults is set // diff --git a/pkg/sql/colconv/vec_to_datum.eg.go b/pkg/sql/colconv/vec_to_datum.eg.go index cec9fe2eb933..5a4f17d45bc1 100644 --- a/pkg/sql/colconv/vec_to_datum.eg.go +++ b/pkg/sql/colconv/vec_to_datum.eg.go @@ -94,10 +94,10 @@ func getNewVecToDatumConverter(batchWidth int, willRelease bool) *VecToDatumConv } // NewVecToDatumConverter creates a new VecToDatumConverter. -// - batchWidth determines the width of the batches that it will be converting. -// - vecIdxsToConvert determines which vectors need to be converted. -// - willRelease indicates whether the caller intends to call Release() on the -// converter. +// - batchWidth determines the width of the batches that it will be converting. +// - vecIdxsToConvert determines which vectors need to be converted. +// - willRelease indicates whether the caller intends to call Release() on the +// converter. func NewVecToDatumConverter( batchWidth int, vecIdxsToConvert []int, willRelease bool, ) *VecToDatumConverter { diff --git a/pkg/sql/colconv/vec_to_datum_tmpl.go b/pkg/sql/colconv/vec_to_datum_tmpl.go index 30640491d1ba..3e6309954189 100644 --- a/pkg/sql/colconv/vec_to_datum_tmpl.go +++ b/pkg/sql/colconv/vec_to_datum_tmpl.go @@ -104,10 +104,10 @@ func getNewVecToDatumConverter(batchWidth int, willRelease bool) *VecToDatumConv } // NewVecToDatumConverter creates a new VecToDatumConverter. -// - batchWidth determines the width of the batches that it will be converting. -// - vecIdxsToConvert determines which vectors need to be converted. -// - willRelease indicates whether the caller intends to call Release() on the -// converter. +// - batchWidth determines the width of the batches that it will be converting. +// - vecIdxsToConvert determines which vectors need to be converted. +// - willRelease indicates whether the caller intends to call Release() on the +// converter. func NewVecToDatumConverter( batchWidth int, vecIdxsToConvert []int, willRelease bool, ) *VecToDatumConverter { diff --git a/pkg/sql/colexec/aggregators_util.go b/pkg/sql/colexec/aggregators_util.go index f1eed1a6bb5a..9fee773c8fc5 100644 --- a/pkg/sql/colexec/aggregators_util.go +++ b/pkg/sql/colexec/aggregators_util.go @@ -410,17 +410,17 @@ func newFilteringDistinctHashAggregatorHelper( // paying attention to distinct tuples if the corresponding function performs // DISTINCT aggregation (as well as to any present FILTER clauses). // For such functions the approach is as follows: -// 1. Store the input state because we will be modifying some of it. -// 2. Convert all aggregate columns of functions that perform DISTINCT -// aggregation. -// 3. For every function: -// 1) Apply the filter to the selection vector of the input. -// 2) Update the (possibly updated) selection vector to include only tuples -// we haven't yet seen making sure to remember that new tuples we have -// just seen. -// 3) Execute Compute on the updated state. -// 4) Restore the state to the original state (if it might have been -// modified). +// 1. Store the input state because we will be modifying some of it. +// 2. Convert all aggregate columns of functions that perform DISTINCT +// aggregation. +// 3. For every function: +// 1. Apply the filter to the selection vector of the input. +// 2. Update the (possibly updated) selection vector to include only tuples +// we haven't yet seen making sure to remember that new tuples we have +// just seen. +// 3. Execute Compute on the updated state. +// 4. Restore the state to the original state (if it might have been +// modified). func (h *filteringDistinctHashAggregatorHelper) performAggregation( ctx context.Context, vecs []coldata.Vec, inputLen int, sel []int, bucket *aggBucket, _ []bool, ) { diff --git a/pkg/sql/colexec/case.go b/pkg/sql/colexec/case.go index ee4ef7232b64..c98272c832ea 100644 --- a/pkg/sql/colexec/case.go +++ b/pkg/sql/colexec/case.go @@ -127,9 +127,11 @@ func (c *caseOp) Child(nth int, verbose bool) execopnode.OpNode { // NewCaseOp returns an operator that runs a case statement. // buffer is a bufferOp that will return the input batch repeatedly. // caseOps is a list of operator chains, one per branch in the case statement. -// Each caseOp is connected to the input buffer op, and filters the input based -// on the case arm's WHEN condition, and then projects the remaining selected -// tuples based on the case arm's THEN condition. +// +// Each caseOp is connected to the input buffer op, and filters the input based +// on the case arm's WHEN condition, and then projects the remaining selected +// tuples based on the case arm's THEN condition. +// // elseOp is the ELSE condition. // whenCol is the index into the input batch to read from. // thenCol is the index into the output batch to write to. diff --git a/pkg/sql/colexec/colexecbase/cast.eg.go b/pkg/sql/colexec/colexecbase/cast.eg.go index f9b18905948e..d55ce1850367 100644 --- a/pkg/sql/colexec/colexecbase/cast.eg.go +++ b/pkg/sql/colexec/colexecbase/cast.eg.go @@ -1188,12 +1188,12 @@ func (c *castOpNullAny) Next() coldata.Batch { // column into the output column, without performing the deselection step. Not // performing the deselection is justified by the following: // -// 1. to be in line with other cast operators -// 2. AND/OR projection operators cannot handle when a different batch is -// returned than the one they fed into the projection chain (which might -// contain casts) -// 3. performing the deselection would require copying over all vectors, not -// just the output one. +// 1. to be in line with other cast operators +// 2. AND/OR projection operators cannot handle when a different batch is +// returned than the one they fed into the projection chain (which might +// contain casts) +// 3. performing the deselection would require copying over all vectors, not +// just the output one. // // This operator should be planned rarely enough (if ever) to not be very // important. diff --git a/pkg/sql/colexec/colexecbase/cast_tmpl.go b/pkg/sql/colexec/colexecbase/cast_tmpl.go index 7f705641c44f..151fa7e146af 100644 --- a/pkg/sql/colexec/colexecbase/cast_tmpl.go +++ b/pkg/sql/colexec/colexecbase/cast_tmpl.go @@ -294,12 +294,12 @@ func (c *castOpNullAny) Next() coldata.Batch { // column into the output column, without performing the deselection step. Not // performing the deselection is justified by the following: // -// 1. to be in line with other cast operators -// 2. AND/OR projection operators cannot handle when a different batch is -// returned than the one they fed into the projection chain (which might -// contain casts) -// 3. performing the deselection would require copying over all vectors, not -// just the output one. +// 1. to be in line with other cast operators +// 2. AND/OR projection operators cannot handle when a different batch is +// returned than the one they fed into the projection chain (which might +// contain casts) +// 3. performing the deselection would require copying over all vectors, not +// just the output one. // // This operator should be planned rarely enough (if ever) to not be very // important. diff --git a/pkg/sql/colexec/colexecbase/simple_project_test.go b/pkg/sql/colexec/colexecbase/simple_project_test.go index 7da4e9684966..48f45348c524 100644 --- a/pkg/sql/colexec/colexecbase/simple_project_test.go +++ b/pkg/sql/colexec/colexecbase/simple_project_test.go @@ -91,9 +91,9 @@ func TestSimpleProjectOp(t *testing.T) { // TestSimpleProjectOpWithUnorderedSynchronizer sets up the following // structure: // -// input 1 -- -// | --> unordered synchronizer --> simpleProjectOp --> constInt64Op -// input 2 -- +// input 1 -- +// | --> unordered synchronizer --> simpleProjectOp --> constInt64Op +// input 2 -- // // and makes sure that the output is as expected. The idea is to test // simpleProjectOp in case when it receives multiple "different internally" diff --git a/pkg/sql/colexec/colexecdisk/disk_spiller.go b/pkg/sql/colexec/colexecdisk/disk_spiller.go index 448485471ca0..a3177026c5c4 100644 --- a/pkg/sql/colexec/colexecdisk/disk_spiller.go +++ b/pkg/sql/colexec/colexecdisk/disk_spiller.go @@ -60,18 +60,18 @@ import ( // NewOneInputDiskSpiller returns a new oneInputDiskSpiller. It takes the // following arguments: -// - inMemoryOp - the in-memory operator that will be consuming input and doing -// computations until it either successfully processes the whole input or -// reaches its memory limit. -// - inMemoryMemMonitorName - the name of the memory monitor of the in-memory -// operator. diskSpiller will catch an OOM error only if this name is -// contained within the error message. -// - diskBackedOpConstructor - the function to construct the disk-backed -// operator when given an input operator. We take in a constructor rather -// than an already created operator in order to hide the complexity of buffer -// exporting operator that serves as the input to the disk-backed operator. -// - spillingCallbackFn will be called when the spilling from in-memory to disk -// backed operator occurs. It should only be set in tests. +// - inMemoryOp - the in-memory operator that will be consuming input and doing +// computations until it either successfully processes the whole input or +// reaches its memory limit. +// - inMemoryMemMonitorName - the name of the memory monitor of the in-memory +// operator. diskSpiller will catch an OOM error only if this name is +// contained within the error message. +// - diskBackedOpConstructor - the function to construct the disk-backed +// operator when given an input operator. We take in a constructor rather +// than an already created operator in order to hide the complexity of buffer +// exporting operator that serves as the input to the disk-backed operator. +// - spillingCallbackFn will be called when the spilling from in-memory to disk +// backed operator occurs. It should only be set in tests. func NewOneInputDiskSpiller( input colexecop.Operator, inMemoryOp colexecop.BufferingInMemoryOperator, @@ -127,18 +127,18 @@ func NewOneInputDiskSpiller( // NewTwoInputDiskSpiller returns a new twoInputDiskSpiller. It takes the // following arguments: -// - inMemoryOp - the in-memory operator that will be consuming inputs and -// doing computations until it either successfully processes the whole inputs -// or reaches its memory limit. -// - inMemoryMemMonitorName - the name of the memory monitor of the in-memory -// operator. diskSpiller will catch an OOM error only if this name is -// contained within the error message. -// - diskBackedOpConstructor - the function to construct the disk-backed -// operator when given two input operators. We take in a constructor rather -// than an already created operator in order to hide the complexity of buffer -// exporting operators that serves as inputs to the disk-backed operator. -// - spillingCallbackFn will be called when the spilling from in-memory to disk -// backed operator occurs. It should only be set in tests. +// - inMemoryOp - the in-memory operator that will be consuming inputs and +// doing computations until it either successfully processes the whole inputs +// or reaches its memory limit. +// - inMemoryMemMonitorName - the name of the memory monitor of the in-memory +// operator. diskSpiller will catch an OOM error only if this name is +// contained within the error message. +// - diskBackedOpConstructor - the function to construct the disk-backed +// operator when given two input operators. We take in a constructor rather +// than an already created operator in order to hide the complexity of buffer +// exporting operators that serves as inputs to the disk-backed operator. +// - spillingCallbackFn will be called when the spilling from in-memory to disk +// backed operator occurs. It should only be set in tests. func NewTwoInputDiskSpiller( inputOne, inputTwo colexecop.Operator, inMemoryOp colexecop.BufferingInMemoryOperator, diff --git a/pkg/sql/colexec/colexecdisk/external_sort.go b/pkg/sql/colexec/colexecdisk/external_sort.go index a4931d219a2b..d383d48f8ea7 100644 --- a/pkg/sql/colexec/colexecdisk/external_sort.go +++ b/pkg/sql/colexec/colexecdisk/external_sort.go @@ -78,27 +78,27 @@ const ( // // The (simplified) diagram of the components involved is as follows: // -// input -// | -// ↓ -// input partitioner -// | -// ↓ -// in-memory sorter -// | -// ↓ -// ------------------------------------------ -// | external sorter | -// | --------------- | -// | | -// | partition1 partition2 ... partitionN | -// | | | | | -// | ↓ ↓ ↓ | -// | merger (ordered synchronizer) | -// ------------------------------------------ -// | -// ↓ -// output +// input +// | +// ↓ +// input partitioner +// | +// ↓ +// in-memory sorter +// | +// ↓ +// ------------------------------------------ +// | external sorter | +// | --------------- | +// | | +// | partition1 partition2 ... partitionN | +// | | | | | +// | ↓ ↓ ↓ | +// | merger (ordered synchronizer) | +// ------------------------------------------ +// | +// ↓ +// output // // There are a couple of implicit upstream links in the setup: // - input partitioner checks the allocator used by the in-memory sorter to see diff --git a/pkg/sql/colexec/colexechash/hash.go b/pkg/sql/colexec/colexechash/hash.go index 59140cb3f344..efd3104d58ee 100644 --- a/pkg/sql/colexec/colexechash/hash.go +++ b/pkg/sql/colexec/colexechash/hash.go @@ -55,6 +55,7 @@ func readUnaligned64(p unsafe.Pointer) uint64 { } // Should be a built-in for unsafe.Pointer? +// //go:nosplit func add(p unsafe.Pointer, x uintptr) unsafe.Pointer { return unsafe.Pointer(uintptr(p) + x) @@ -66,6 +67,7 @@ func add(p unsafe.Pointer, x uintptr) unsafe.Pointer { // output depends on the input. noescape is inlined and currently // compiles down to zero instructions. // USE CAREFULLY! +// //go:nosplit func noescape(p unsafe.Pointer) unsafe.Pointer { x := uintptr(p) diff --git a/pkg/sql/colexec/colexechash/hashtable.go b/pkg/sql/colexec/colexechash/hashtable.go index 17f388a36b17..6e24b66e1281 100644 --- a/pkg/sql/colexec/colexechash/hashtable.go +++ b/pkg/sql/colexec/colexechash/hashtable.go @@ -66,7 +66,8 @@ const ( // // For each tuple with the ordinal 'i' (the ordinal among all tuples in the // hash table or within a single probing batch), keyID is calculated as: -// keyID = i + 1. +// +// keyID = i + 1. // // keyID of 0 is reserved to indicate the end of the hash chain. type keyID = uint64 @@ -508,114 +509,118 @@ func (ht *HashTable) FullBuild(input colexecop.Operator) { // // Let's go through an example of how this function works: our input stream // contains the following tuples: -// {-6}, {-6}, {-7}, {-5}, {-8}, {-5}, {-5}, {-8}. +// +// {-6}, {-6}, {-7}, {-5}, {-8}, {-5}, {-5}, {-8}. +// // (Note that negative values are chosen in order to visually distinguish them // from the IDs that we'll be working with below.) // We will use coldata.BatchSize() == 4 and let's assume that we will use a // hash function -// h(-5) = 1, h(-6) = 1, h(-7) = 0, h(-8) = 0 +// +// h(-5) = 1, h(-6) = 1, h(-7) = 0, h(-8) = 0 +// // with two buckets in the hash table. // // I. we get a batch [-6, -6, -7, -5]. -// 1. ComputeHashAndBuildChains: -// a) compute hash buckets: -// HashBuffer = [1, 1, 0, 1] -// ProbeScratch.Next = [reserved, 1, 1, 0, 1] -// b) build 'Next' chains between hash buckets: -// ProbeScratch.First = [3, 1] (length of First == # of hash buckets) -// ProbeScratch.Next = [reserved, 2, 4, 0, 0] -// (Note that we have a hash collision in the bucket with hash 1.) -// 2. RemoveDuplicates within the batch: -// 1) first iteration in FindBuckets: -// a) all 4 tuples included to be checked against heads of their hash -// chains: -// ToCheck = [0, 1, 2, 3] -// ToCheckID = [1, 1, 3, 1] -// b) after performing the equality check using CheckProbeForDistinct, -// tuples 0, 1, 2 are found to be equal to the heads of their hash -// chains while tuple 3 (-5) has a hash collision with tuple 0 (-6), -// so it is kept for another iteration: -// ToCheck = [3] -// ToCheckID = [x, x, x, 2] -// HeadID = [1, 1, 3, x] -// 2) second iteration in FindBuckets finds that tuple 3 (-5) again has a -// hash collision with tuple 1 (-6), so it is kept for another -// iteration: -// ToCheck = [3] -// ToCheckID = [x, x, x, 4] -// HeadID = [1, 1, 3, x] -// 3) third iteration finds a match for tuple (the tuple itself), no more -// tuples to check, so the iterations stop: -// ToCheck = [] -// HeadID = [1, 1, 3, 4] -// 4) the duplicates are represented by having the same HeadID values, and -// all duplicates are removed in updateSel: -// batch = [-6, -6, -7, -5] -// length = 3, sel = [0, 2, 3] -// Notably, HashBuffer is compacted accordingly: -// HashBuffer = [1, 0, 1] -// 3. The hash table is empty, so RemoveDuplicates against the hash table is -// skipped. -// 4. All 3 tuples still present in the batch are distinct, they are appended -// to the hash table and will be emitted to the output: -// Vals = [-6, -7, -5] -// BuildScratch.First = [2, 1] -// BuildScratch.Next = [reserved, 3, 0, 0] -// We have fully processed the first batch. +// 1. ComputeHashAndBuildChains: +// a) compute hash buckets: +// HashBuffer = [1, 1, 0, 1] +// ProbeScratch.Next = [reserved, 1, 1, 0, 1] +// b) build 'Next' chains between hash buckets: +// ProbeScratch.First = [3, 1] (length of First == # of hash buckets) +// ProbeScratch.Next = [reserved, 2, 4, 0, 0] +// (Note that we have a hash collision in the bucket with hash 1.) +// 2. RemoveDuplicates within the batch: +// 1. first iteration in FindBuckets: +// a) all 4 tuples included to be checked against heads of their hash +// chains: +// ToCheck = [0, 1, 2, 3] +// ToCheckID = [1, 1, 3, 1] +// b) after performing the equality check using CheckProbeForDistinct, +// tuples 0, 1, 2 are found to be equal to the heads of their hash +// chains while tuple 3 (-5) has a hash collision with tuple 0 (-6), +// so it is kept for another iteration: +// ToCheck = [3] +// ToCheckID = [x, x, x, 2] +// HeadID = [1, 1, 3, x] +// 2. second iteration in FindBuckets finds that tuple 3 (-5) again has a +// hash collision with tuple 1 (-6), so it is kept for another +// iteration: +// ToCheck = [3] +// ToCheckID = [x, x, x, 4] +// HeadID = [1, 1, 3, x] +// 3. third iteration finds a match for tuple (the tuple itself), no more +// tuples to check, so the iterations stop: +// ToCheck = [] +// HeadID = [1, 1, 3, 4] +// 4. the duplicates are represented by having the same HeadID values, and +// all duplicates are removed in updateSel: +// batch = [-6, -6, -7, -5] +// length = 3, sel = [0, 2, 3] +// Notably, HashBuffer is compacted accordingly: +// HashBuffer = [1, 0, 1] +// 3. The hash table is empty, so RemoveDuplicates against the hash table is +// skipped. +// 4. All 3 tuples still present in the batch are distinct, they are appended +// to the hash table and will be emitted to the output: +// Vals = [-6, -7, -5] +// BuildScratch.First = [2, 1] +// BuildScratch.Next = [reserved, 3, 0, 0] +// We have fully processed the first batch. // // II. we get a batch [-8, -5, -5, -8]. -// 1. ComputeHashAndBuildChains: -// a) compute hash buckets: -// HashBuffer = [0, 1, 1, 0] -// ProbeScratch.Next = [reserved, 0, 1, 1, 0] -// b) build 'Next' chains between hash buckets: -// ProbeScratch.First = [1, 2] -// ProbeScratch.Next = [reserved, 4, 3, 0, 0] -// 2. RemoveDuplicates within the batch: -// 1) first iteration in FindBuckets: -// a) all 4 tuples included to be checked against heads of their hash -// chains: -// ToCheck = [0, 1, 2, 3] -// ToCheckID = [1, 2, 2, 1] -// b) after performing the equality check using CheckProbeForDistinct, -// all tuples are found to be equal to the heads of their hash -// chains, no more tuples to check, so the iterations stop: -// ToCheck = [] -// HeadID = [1, 2, 2, 1] -// 2) the duplicates are represented by having the same HeadID values, and -// all duplicates are removed in updateSel: -// batch = [-8, -5, -5, -8] -// length = 2, sel = [0, 1] -// Notably, HashBuffer is compacted accordingly: -// HashBuffer = [0, 1] -// 3. RemoveDuplicates against the hash table: -// 1) first iteration in FindBuckets: -// a) both tuples included to be checked against heads of their hash -// chains of the hash table (meaning BuildScratch.First and -// BuildScratch.Next are used to populate ToCheckID values): -// ToCheck = [0, 1] -// ToCheckID = [2, 1] -// b) after performing the equality check using CheckBuildForDistinct, -// both tuples are found to have hash collisions (-8 with -7 and -5 -// with -6), so both are kept for another iteration: -// ToCheck = [0, 1] -// ToCheckID = [0, 2] -// 2) second iteration in FindBuckets finds that tuple 1 (-5) has a match -// whereas tuple 0 (-8) is distinct (because its ToCheckID is 0), no -// more tuples to check: -// ToCheck = [] -// HeadID = [1, 0] -// 3) duplicates are represented by having HeadID value of 0, so the batch -// is updated to only include tuple -8: -// batch = [-8, -5, -5, -8] -// length = 1, sel = [0] -// HashBuffer = [0] -// 4. The single tuple still present in the batch is distinct, it is appended -// to the hash table and will be emitted to the output: -// Vals = [-6, -7, -5, -8] -// BuildScratch.First = [2, 1] -// BuildScratch.Next = [reserved, 3, 4, 0, 0] -// We have fully processed the second batch and the input as a whole. +// 1. ComputeHashAndBuildChains: +// a) compute hash buckets: +// HashBuffer = [0, 1, 1, 0] +// ProbeScratch.Next = [reserved, 0, 1, 1, 0] +// b) build 'Next' chains between hash buckets: +// ProbeScratch.First = [1, 2] +// ProbeScratch.Next = [reserved, 4, 3, 0, 0] +// 2. RemoveDuplicates within the batch: +// 1. first iteration in FindBuckets: +// a) all 4 tuples included to be checked against heads of their hash +// chains: +// ToCheck = [0, 1, 2, 3] +// ToCheckID = [1, 2, 2, 1] +// b) after performing the equality check using CheckProbeForDistinct, +// all tuples are found to be equal to the heads of their hash +// chains, no more tuples to check, so the iterations stop: +// ToCheck = [] +// HeadID = [1, 2, 2, 1] +// 2. the duplicates are represented by having the same HeadID values, and +// all duplicates are removed in updateSel: +// batch = [-8, -5, -5, -8] +// length = 2, sel = [0, 1] +// Notably, HashBuffer is compacted accordingly: +// HashBuffer = [0, 1] +// 3. RemoveDuplicates against the hash table: +// 1. first iteration in FindBuckets: +// a) both tuples included to be checked against heads of their hash +// chains of the hash table (meaning BuildScratch.First and +// BuildScratch.Next are used to populate ToCheckID values): +// ToCheck = [0, 1] +// ToCheckID = [2, 1] +// b) after performing the equality check using CheckBuildForDistinct, +// both tuples are found to have hash collisions (-8 with -7 and -5 +// with -6), so both are kept for another iteration: +// ToCheck = [0, 1] +// ToCheckID = [0, 2] +// 2. second iteration in FindBuckets finds that tuple 1 (-5) has a match +// whereas tuple 0 (-8) is distinct (because its ToCheckID is 0), no +// more tuples to check: +// ToCheck = [] +// HeadID = [1, 0] +// 3. duplicates are represented by having HeadID value of 0, so the batch +// is updated to only include tuple -8: +// batch = [-8, -5, -5, -8] +// length = 1, sel = [0] +// HashBuffer = [0] +// 4. The single tuple still present in the batch is distinct, it is appended +// to the hash table and will be emitted to the output: +// Vals = [-6, -7, -5, -8] +// BuildScratch.First = [2, 1] +// BuildScratch.Next = [reserved, 3, 4, 0, 0] +// We have fully processed the second batch and the input as a whole. // // NOTE: b *must* be a non-zero length batch. func (ht *HashTable) DistinctBuild(batch coldata.Batch) { diff --git a/pkg/sql/colexec/colexecjoin/hashjoiner.go b/pkg/sql/colexec/colexecjoin/hashjoiner.go index e7cd552f96dc..d7ef556d1f76 100644 --- a/pkg/sql/colexec/colexecjoin/hashjoiner.go +++ b/pkg/sql/colexec/colexecjoin/hashjoiner.go @@ -95,12 +95,12 @@ type hashJoinerSourceSpec struct { // // In the vectorized implementation of the build phase, the following tasks are // performed: -// 1. The bucket number (hash value) of each key tuple is computed and stored -// into a buckets array. -// 2. The values in the buckets array is normalized to fit within the hash table -// numBuckets. -// 3. The bucket-chaining hash table organization is prepared with the computed -// buckets. +// 1. The bucket number (hash value) of each key tuple is computed and stored +// into a buckets array. +// 2. The values in the buckets array is normalized to fit within the hash table +// numBuckets. +// 3. The bucket-chaining hash table organization is prepared with the computed +// buckets. // // Depending on the value of the spec.rightDistinct flag, there are two // variations of the probe phase. The planner will set rightDistinct to true if @@ -109,54 +109,54 @@ type hashJoinerSourceSpec struct { // In the columnarized implementation of the distinct build table probe phase, // the following tasks are performed by the fastProbe function: // -// 1. Compute the bucket number for each probe row's key tuple and store the -// results into the buckets array. -// 2. In order to find the position of these key tuples in the hash table: -// - First find the first element in the bucket's linked list for each key tuple -// and store it in the ToCheckID array. Initialize the ToCheck array with the -// full sequence of input indices (0...batchSize - 1). -// - While ToCheck is not empty, each element in ToCheck represents a position -// of the key tuples for which the key has not yet been found in the hash -// table. Perform a multi-column equality check to see if the key columns -// match that of the build table's key columns at ToCheckID. -// - Update the differs array to store whether or not the probe's key tuple -// matched the corresponding build's key tuple. -// - Select the indices that differed and store them into ToCheck since they -// need to be further processed. -// - For the differing tuples, find the next ID in that bucket of the hash table -// and put it into the ToCheckID array. -// 3. Now, ToCheckID for every probe's key tuple contains the index of the -// matching build's key tuple in the hash table. Use it to project output -// columns from the has table to build the resulting batch. +// 1. Compute the bucket number for each probe row's key tuple and store the +// results into the buckets array. +// 2. In order to find the position of these key tuples in the hash table: +// - First find the first element in the bucket's linked list for each key tuple +// and store it in the ToCheckID array. Initialize the ToCheck array with the +// full sequence of input indices (0...batchSize - 1). +// - While ToCheck is not empty, each element in ToCheck represents a position +// of the key tuples for which the key has not yet been found in the hash +// table. Perform a multi-column equality check to see if the key columns +// match that of the build table's key columns at ToCheckID. +// - Update the differs array to store whether or not the probe's key tuple +// matched the corresponding build's key tuple. +// - Select the indices that differed and store them into ToCheck since they +// need to be further processed. +// - For the differing tuples, find the next ID in that bucket of the hash table +// and put it into the ToCheckID array. +// 3. Now, ToCheckID for every probe's key tuple contains the index of the +// matching build's key tuple in the hash table. Use it to project output +// columns from the has table to build the resulting batch. // // In the columnarized implementation of the non-distinct build table probe // phase, the following tasks are performed by the probe function: // -// 1. Compute the bucket number for each probe row's key tuple and store the -// results into the buckets array. -// 2. In order to find the position of these key tuples in the hash table: -// - First find the first element in the bucket's linked list for each key tuple -// and store it in the ToCheckID array. Initialize the ToCheck array with the -// full sequence of input indices (0...batchSize - 1). -// - While ToCheck is not empty, each element in ToCheck represents a position -// of the key tuples for which the key has not yet been visited by any prior -// probe. Perform a multi-column equality check to see if the key columns -// match that of the build table's key columns at ToCheckID. -// - Update the differs array to store whether or not the probe's key tuple -// matched the corresponding build's key tuple. -// - For the indices that did not differ, we can lazily update the HashTable's -// same linked list to store a list of all identical keys starting at head. -// Once a key has been added to ht.Same, ht.Visited is set to true. For the -// indices that have never been visited, we want to continue checking this -// bucket for identical values by adding this key to ToCheck. -// - Select the indices that differed and store them into ToCheck since they -// need to be further processed. -// - For the differing tuples, find the next ID in that bucket of the hash table -// and put it into the ToCheckID array. -// 3. Now, head stores the keyID of the first match in the build table for every -// probe table key. ht.Same is used to select all build key matches for each -// probe key, which are added to the resulting batch. Output batching is done -// to ensure that each batch is at most coldata.BatchSize(). +// 1. Compute the bucket number for each probe row's key tuple and store the +// results into the buckets array. +// 2. In order to find the position of these key tuples in the hash table: +// - First find the first element in the bucket's linked list for each key tuple +// and store it in the ToCheckID array. Initialize the ToCheck array with the +// full sequence of input indices (0...batchSize - 1). +// - While ToCheck is not empty, each element in ToCheck represents a position +// of the key tuples for which the key has not yet been visited by any prior +// probe. Perform a multi-column equality check to see if the key columns +// match that of the build table's key columns at ToCheckID. +// - Update the differs array to store whether or not the probe's key tuple +// matched the corresponding build's key tuple. +// - For the indices that did not differ, we can lazily update the HashTable's +// same linked list to store a list of all identical keys starting at head. +// Once a key has been added to ht.Same, ht.Visited is set to true. For the +// indices that have never been visited, we want to continue checking this +// bucket for identical values by adding this key to ToCheck. +// - Select the indices that differed and store them into ToCheck since they +// need to be further processed. +// - For the differing tuples, find the next ID in that bucket of the hash table +// and put it into the ToCheckID array. +// 3. Now, head stores the keyID of the first match in the build table for every +// probe table key. ht.Same is used to select all build key matches for each +// probe key, which are added to the resulting batch. Output batching is done +// to ensure that each batch is at most coldata.BatchSize(). // // In the case that an outer join on the probe table side is performed, every // single probe row is kept even if its ToCheckID is 0. If a ToCheckID of 0 is diff --git a/pkg/sql/colexec/colexecjoin/mergejoiner_exceptall.eg.go b/pkg/sql/colexec/colexecjoin/mergejoiner_exceptall.eg.go index 4d75af9d1dcb..ee15822284ab 100644 --- a/pkg/sql/colexec/colexecjoin/mergejoiner_exceptall.eg.go +++ b/pkg/sql/colexec/colexecjoin/mergejoiner_exceptall.eg.go @@ -10899,22 +10899,22 @@ func (o *mergeJoinExceptAllOp) probeBodyLSelfalseRSelfalse() { // output by repeating each row in the group numRepeats times. For example, // given an input table: // -// L1 | L2 -// -------- -// 1 | a -// 1 | b +// L1 | L2 +// -------- +// 1 | a +// 1 | b // // and leftGroups = [{startIdx: 0, endIdx: 2, numRepeats: 3}] // then buildLeftGroupsFromBatch expands this to // -// L1 | L2 -// -------- -// 1 | a -// 1 | a -// 1 | a -// 1 | b -// 1 | b -// 1 | b +// L1 | L2 +// -------- +// 1 | a +// 1 | a +// 1 | a +// 1 | b +// 1 | b +// 1 | b // // Note: this is different from buildRightGroupsFromBatch in that each row of // group is repeated numRepeats times, instead of a simple copy of the group as @@ -12492,20 +12492,24 @@ func (o *mergeJoinExceptAllOp) buildLeftGroupsFromBatch( // buildRightGroupsFromBatch takes a []group and repeats each group numRepeats // times. For example, given an input table: -// R1 | R2 -// -------- -// 1 | a -// 1 | b +// +// R1 | R2 +// -------- +// 1 | a +// 1 | b +// // and rightGroups = [{startIdx: 0, endIdx: 2, numRepeats: 3}] // then buildRightGroups expands this to -// R1 | R2 -// -------- -// 1 | a -// 1 | b -// 1 | a -// 1 | b -// 1 | a -// 1 | b +// +// R1 | R2 +// -------- +// 1 | a +// 1 | b +// 1 | a +// 1 | b +// 1 | a +// 1 | b +// // Note: this is different from buildLeftGroupsFromBatch in that each group is // not expanded but directly copied numRepeats times. // SIDE EFFECTS: writes into o.output. diff --git a/pkg/sql/colexec/colexecjoin/mergejoiner_fullouter.eg.go b/pkg/sql/colexec/colexecjoin/mergejoiner_fullouter.eg.go index 14f30f6c8276..f26eb464e146 100644 --- a/pkg/sql/colexec/colexecjoin/mergejoiner_fullouter.eg.go +++ b/pkg/sql/colexec/colexecjoin/mergejoiner_fullouter.eg.go @@ -12035,22 +12035,22 @@ func (o *mergeJoinFullOuterOp) probeBodyLSelfalseRSelfalse() { // output by repeating each row in the group numRepeats times. For example, // given an input table: // -// L1 | L2 -// -------- -// 1 | a -// 1 | b +// L1 | L2 +// -------- +// 1 | a +// 1 | b // // and leftGroups = [{startIdx: 0, endIdx: 2, numRepeats: 3}] // then buildLeftGroupsFromBatch expands this to // -// L1 | L2 -// -------- -// 1 | a -// 1 | a -// 1 | a -// 1 | b -// 1 | b -// 1 | b +// L1 | L2 +// -------- +// 1 | a +// 1 | a +// 1 | a +// 1 | b +// 1 | b +// 1 | b // // Note: this is different from buildRightGroupsFromBatch in that each row of // group is repeated numRepeats times, instead of a simple copy of the group as @@ -13606,20 +13606,24 @@ func (o *mergeJoinFullOuterOp) buildLeftGroupsFromBatch( // buildRightGroupsFromBatch takes a []group and repeats each group numRepeats // times. For example, given an input table: -// R1 | R2 -// -------- -// 1 | a -// 1 | b +// +// R1 | R2 +// -------- +// 1 | a +// 1 | b +// // and rightGroups = [{startIdx: 0, endIdx: 2, numRepeats: 3}] // then buildRightGroups expands this to -// R1 | R2 -// -------- -// 1 | a -// 1 | b -// 1 | a -// 1 | b -// 1 | a -// 1 | b +// +// R1 | R2 +// -------- +// 1 | a +// 1 | b +// 1 | a +// 1 | b +// 1 | a +// 1 | b +// // Note: this is different from buildLeftGroupsFromBatch in that each group is // not expanded but directly copied numRepeats times. // SIDE EFFECTS: writes into o.output. diff --git a/pkg/sql/colexec/colexecjoin/mergejoiner_inner.eg.go b/pkg/sql/colexec/colexecjoin/mergejoiner_inner.eg.go index 90016ee5ce55..a4a095e7e046 100644 --- a/pkg/sql/colexec/colexecjoin/mergejoiner_inner.eg.go +++ b/pkg/sql/colexec/colexecjoin/mergejoiner_inner.eg.go @@ -7695,22 +7695,22 @@ func (o *mergeJoinInnerOp) probeBodyLSelfalseRSelfalse() { // output by repeating each row in the group numRepeats times. For example, // given an input table: // -// L1 | L2 -// -------- -// 1 | a -// 1 | b +// L1 | L2 +// -------- +// 1 | a +// 1 | b // // and leftGroups = [{startIdx: 0, endIdx: 2, numRepeats: 3}] // then buildLeftGroupsFromBatch expands this to // -// L1 | L2 -// -------- -// 1 | a -// 1 | a -// 1 | a -// 1 | b -// 1 | b -// 1 | b +// L1 | L2 +// -------- +// 1 | a +// 1 | a +// 1 | a +// 1 | b +// 1 | b +// 1 | b // // Note: this is different from buildRightGroupsFromBatch in that each row of // group is repeated numRepeats times, instead of a simple copy of the group as @@ -9222,20 +9222,24 @@ func (o *mergeJoinInnerOp) buildLeftGroupsFromBatch( // buildRightGroupsFromBatch takes a []group and repeats each group numRepeats // times. For example, given an input table: -// R1 | R2 -// -------- -// 1 | a -// 1 | b +// +// R1 | R2 +// -------- +// 1 | a +// 1 | b +// // and rightGroups = [{startIdx: 0, endIdx: 2, numRepeats: 3}] // then buildRightGroups expands this to -// R1 | R2 -// -------- -// 1 | a -// 1 | b -// 1 | a -// 1 | b -// 1 | a -// 1 | b +// +// R1 | R2 +// -------- +// 1 | a +// 1 | b +// 1 | a +// 1 | b +// 1 | a +// 1 | b +// // Note: this is different from buildLeftGroupsFromBatch in that each group is // not expanded but directly copied numRepeats times. // SIDE EFFECTS: writes into o.output. diff --git a/pkg/sql/colexec/colexecjoin/mergejoiner_intersectall.eg.go b/pkg/sql/colexec/colexecjoin/mergejoiner_intersectall.eg.go index 0d486876c88f..165de1f77c3f 100644 --- a/pkg/sql/colexec/colexecjoin/mergejoiner_intersectall.eg.go +++ b/pkg/sql/colexec/colexecjoin/mergejoiner_intersectall.eg.go @@ -8399,22 +8399,22 @@ func (o *mergeJoinIntersectAllOp) probeBodyLSelfalseRSelfalse() { // output by repeating each row in the group numRepeats times. For example, // given an input table: // -// L1 | L2 -// -------- -// 1 | a -// 1 | b +// L1 | L2 +// -------- +// 1 | a +// 1 | b // // and leftGroups = [{startIdx: 0, endIdx: 2, numRepeats: 3}] // then buildLeftGroupsFromBatch expands this to // -// L1 | L2 -// -------- -// 1 | a -// 1 | a -// 1 | a -// 1 | b -// 1 | b -// 1 | b +// L1 | L2 +// -------- +// 1 | a +// 1 | a +// 1 | a +// 1 | b +// 1 | b +// 1 | b // // Note: this is different from buildRightGroupsFromBatch in that each row of // group is repeated numRepeats times, instead of a simple copy of the group as @@ -9926,20 +9926,24 @@ func (o *mergeJoinIntersectAllOp) buildLeftGroupsFromBatch( // buildRightGroupsFromBatch takes a []group and repeats each group numRepeats // times. For example, given an input table: -// R1 | R2 -// -------- -// 1 | a -// 1 | b +// +// R1 | R2 +// -------- +// 1 | a +// 1 | b +// // and rightGroups = [{startIdx: 0, endIdx: 2, numRepeats: 3}] // then buildRightGroups expands this to -// R1 | R2 -// -------- -// 1 | a -// 1 | b -// 1 | a -// 1 | b -// 1 | a -// 1 | b +// +// R1 | R2 +// -------- +// 1 | a +// 1 | b +// 1 | a +// 1 | b +// 1 | a +// 1 | b +// // Note: this is different from buildLeftGroupsFromBatch in that each group is // not expanded but directly copied numRepeats times. // SIDE EFFECTS: writes into o.output. diff --git a/pkg/sql/colexec/colexecjoin/mergejoiner_leftanti.eg.go b/pkg/sql/colexec/colexecjoin/mergejoiner_leftanti.eg.go index 01a5339a2f90..d33e7d6fdf8c 100644 --- a/pkg/sql/colexec/colexecjoin/mergejoiner_leftanti.eg.go +++ b/pkg/sql/colexec/colexecjoin/mergejoiner_leftanti.eg.go @@ -9843,22 +9843,22 @@ func (o *mergeJoinLeftAntiOp) probeBodyLSelfalseRSelfalse() { // output by repeating each row in the group numRepeats times. For example, // given an input table: // -// L1 | L2 -// -------- -// 1 | a -// 1 | b +// L1 | L2 +// -------- +// 1 | a +// 1 | b // // and leftGroups = [{startIdx: 0, endIdx: 2, numRepeats: 3}] // then buildLeftGroupsFromBatch expands this to // -// L1 | L2 -// -------- -// 1 | a -// 1 | a -// 1 | a -// 1 | b -// 1 | b -// 1 | b +// L1 | L2 +// -------- +// 1 | a +// 1 | a +// 1 | a +// 1 | b +// 1 | b +// 1 | b // // Note: this is different from buildRightGroupsFromBatch in that each row of // group is repeated numRepeats times, instead of a simple copy of the group as @@ -11436,20 +11436,24 @@ func (o *mergeJoinLeftAntiOp) buildLeftGroupsFromBatch( // buildRightGroupsFromBatch takes a []group and repeats each group numRepeats // times. For example, given an input table: -// R1 | R2 -// -------- -// 1 | a -// 1 | b +// +// R1 | R2 +// -------- +// 1 | a +// 1 | b +// // and rightGroups = [{startIdx: 0, endIdx: 2, numRepeats: 3}] // then buildRightGroups expands this to -// R1 | R2 -// -------- -// 1 | a -// 1 | b -// 1 | a -// 1 | b -// 1 | a -// 1 | b +// +// R1 | R2 +// -------- +// 1 | a +// 1 | b +// 1 | a +// 1 | b +// 1 | a +// 1 | b +// // Note: this is different from buildLeftGroupsFromBatch in that each group is // not expanded but directly copied numRepeats times. // SIDE EFFECTS: writes into o.output. diff --git a/pkg/sql/colexec/colexecjoin/mergejoiner_leftouter.eg.go b/pkg/sql/colexec/colexecjoin/mergejoiner_leftouter.eg.go index 8d082d1db671..16aaaea994ae 100644 --- a/pkg/sql/colexec/colexecjoin/mergejoiner_leftouter.eg.go +++ b/pkg/sql/colexec/colexecjoin/mergejoiner_leftouter.eg.go @@ -9887,22 +9887,22 @@ func (o *mergeJoinLeftOuterOp) probeBodyLSelfalseRSelfalse() { // output by repeating each row in the group numRepeats times. For example, // given an input table: // -// L1 | L2 -// -------- -// 1 | a -// 1 | b +// L1 | L2 +// -------- +// 1 | a +// 1 | b // // and leftGroups = [{startIdx: 0, endIdx: 2, numRepeats: 3}] // then buildLeftGroupsFromBatch expands this to // -// L1 | L2 -// -------- -// 1 | a -// 1 | a -// 1 | a -// 1 | b -// 1 | b -// 1 | b +// L1 | L2 +// -------- +// 1 | a +// 1 | a +// 1 | a +// 1 | b +// 1 | b +// 1 | b // // Note: this is different from buildRightGroupsFromBatch in that each row of // group is repeated numRepeats times, instead of a simple copy of the group as @@ -11414,20 +11414,24 @@ func (o *mergeJoinLeftOuterOp) buildLeftGroupsFromBatch( // buildRightGroupsFromBatch takes a []group and repeats each group numRepeats // times. For example, given an input table: -// R1 | R2 -// -------- -// 1 | a -// 1 | b +// +// R1 | R2 +// -------- +// 1 | a +// 1 | b +// // and rightGroups = [{startIdx: 0, endIdx: 2, numRepeats: 3}] // then buildRightGroups expands this to -// R1 | R2 -// -------- -// 1 | a -// 1 | b -// 1 | a -// 1 | b -// 1 | a -// 1 | b +// +// R1 | R2 +// -------- +// 1 | a +// 1 | b +// 1 | a +// 1 | b +// 1 | a +// 1 | b +// // Note: this is different from buildLeftGroupsFromBatch in that each group is // not expanded but directly copied numRepeats times. // SIDE EFFECTS: writes into o.output. diff --git a/pkg/sql/colexec/colexecjoin/mergejoiner_leftsemi.eg.go b/pkg/sql/colexec/colexecjoin/mergejoiner_leftsemi.eg.go index e0effe361bef..34d2909f91c0 100644 --- a/pkg/sql/colexec/colexecjoin/mergejoiner_leftsemi.eg.go +++ b/pkg/sql/colexec/colexecjoin/mergejoiner_leftsemi.eg.go @@ -7651,22 +7651,22 @@ func (o *mergeJoinLeftSemiOp) probeBodyLSelfalseRSelfalse() { // output by repeating each row in the group numRepeats times. For example, // given an input table: // -// L1 | L2 -// -------- -// 1 | a -// 1 | b +// L1 | L2 +// -------- +// 1 | a +// 1 | b // // and leftGroups = [{startIdx: 0, endIdx: 2, numRepeats: 3}] // then buildLeftGroupsFromBatch expands this to // -// L1 | L2 -// -------- -// 1 | a -// 1 | a -// 1 | a -// 1 | b -// 1 | b -// 1 | b +// L1 | L2 +// -------- +// 1 | a +// 1 | a +// 1 | a +// 1 | b +// 1 | b +// 1 | b // // Note: this is different from buildRightGroupsFromBatch in that each row of // group is repeated numRepeats times, instead of a simple copy of the group as @@ -9178,20 +9178,24 @@ func (o *mergeJoinLeftSemiOp) buildLeftGroupsFromBatch( // buildRightGroupsFromBatch takes a []group and repeats each group numRepeats // times. For example, given an input table: -// R1 | R2 -// -------- -// 1 | a -// 1 | b +// +// R1 | R2 +// -------- +// 1 | a +// 1 | b +// // and rightGroups = [{startIdx: 0, endIdx: 2, numRepeats: 3}] // then buildRightGroups expands this to -// R1 | R2 -// -------- -// 1 | a -// 1 | b -// 1 | a -// 1 | b -// 1 | a -// 1 | b +// +// R1 | R2 +// -------- +// 1 | a +// 1 | b +// 1 | a +// 1 | b +// 1 | a +// 1 | b +// // Note: this is different from buildLeftGroupsFromBatch in that each group is // not expanded but directly copied numRepeats times. // SIDE EFFECTS: writes into o.output. diff --git a/pkg/sql/colexec/colexecjoin/mergejoiner_rightanti.eg.go b/pkg/sql/colexec/colexecjoin/mergejoiner_rightanti.eg.go index a55f89496ed3..0b67992848b2 100644 --- a/pkg/sql/colexec/colexecjoin/mergejoiner_rightanti.eg.go +++ b/pkg/sql/colexec/colexecjoin/mergejoiner_rightanti.eg.go @@ -9799,22 +9799,22 @@ func (o *mergeJoinRightAntiOp) probeBodyLSelfalseRSelfalse() { // output by repeating each row in the group numRepeats times. For example, // given an input table: // -// L1 | L2 -// -------- -// 1 | a -// 1 | b +// L1 | L2 +// -------- +// 1 | a +// 1 | b // // and leftGroups = [{startIdx: 0, endIdx: 2, numRepeats: 3}] // then buildLeftGroupsFromBatch expands this to // -// L1 | L2 -// -------- -// 1 | a -// 1 | a -// 1 | a -// 1 | b -// 1 | b -// 1 | b +// L1 | L2 +// -------- +// 1 | a +// 1 | a +// 1 | a +// 1 | b +// 1 | b +// 1 | b // // Note: this is different from buildRightGroupsFromBatch in that each row of // group is repeated numRepeats times, instead of a simple copy of the group as @@ -11370,20 +11370,24 @@ func (o *mergeJoinRightAntiOp) buildLeftGroupsFromBatch( // buildRightGroupsFromBatch takes a []group and repeats each group numRepeats // times. For example, given an input table: -// R1 | R2 -// -------- -// 1 | a -// 1 | b +// +// R1 | R2 +// -------- +// 1 | a +// 1 | b +// // and rightGroups = [{startIdx: 0, endIdx: 2, numRepeats: 3}] // then buildRightGroups expands this to -// R1 | R2 -// -------- -// 1 | a -// 1 | b -// 1 | a -// 1 | b -// 1 | a -// 1 | b +// +// R1 | R2 +// -------- +// 1 | a +// 1 | b +// 1 | a +// 1 | b +// 1 | a +// 1 | b +// // Note: this is different from buildLeftGroupsFromBatch in that each group is // not expanded but directly copied numRepeats times. // SIDE EFFECTS: writes into o.output. diff --git a/pkg/sql/colexec/colexecjoin/mergejoiner_rightouter.eg.go b/pkg/sql/colexec/colexecjoin/mergejoiner_rightouter.eg.go index eb691751ba6f..881d5585d5e0 100644 --- a/pkg/sql/colexec/colexecjoin/mergejoiner_rightouter.eg.go +++ b/pkg/sql/colexec/colexecjoin/mergejoiner_rightouter.eg.go @@ -9843,22 +9843,22 @@ func (o *mergeJoinRightOuterOp) probeBodyLSelfalseRSelfalse() { // output by repeating each row in the group numRepeats times. For example, // given an input table: // -// L1 | L2 -// -------- -// 1 | a -// 1 | b +// L1 | L2 +// -------- +// 1 | a +// 1 | b // // and leftGroups = [{startIdx: 0, endIdx: 2, numRepeats: 3}] // then buildLeftGroupsFromBatch expands this to // -// L1 | L2 -// -------- -// 1 | a -// 1 | a -// 1 | a -// 1 | b -// 1 | b -// 1 | b +// L1 | L2 +// -------- +// 1 | a +// 1 | a +// 1 | a +// 1 | b +// 1 | b +// 1 | b // // Note: this is different from buildRightGroupsFromBatch in that each row of // group is repeated numRepeats times, instead of a simple copy of the group as @@ -11414,20 +11414,24 @@ func (o *mergeJoinRightOuterOp) buildLeftGroupsFromBatch( // buildRightGroupsFromBatch takes a []group and repeats each group numRepeats // times. For example, given an input table: -// R1 | R2 -// -------- -// 1 | a -// 1 | b +// +// R1 | R2 +// -------- +// 1 | a +// 1 | b +// // and rightGroups = [{startIdx: 0, endIdx: 2, numRepeats: 3}] // then buildRightGroups expands this to -// R1 | R2 -// -------- -// 1 | a -// 1 | b -// 1 | a -// 1 | b -// 1 | a -// 1 | b +// +// R1 | R2 +// -------- +// 1 | a +// 1 | b +// 1 | a +// 1 | b +// 1 | a +// 1 | b +// // Note: this is different from buildLeftGroupsFromBatch in that each group is // not expanded but directly copied numRepeats times. // SIDE EFFECTS: writes into o.output. diff --git a/pkg/sql/colexec/colexecjoin/mergejoiner_rightsemi.eg.go b/pkg/sql/colexec/colexecjoin/mergejoiner_rightsemi.eg.go index f818638e338e..bd507f9a24ae 100644 --- a/pkg/sql/colexec/colexecjoin/mergejoiner_rightsemi.eg.go +++ b/pkg/sql/colexec/colexecjoin/mergejoiner_rightsemi.eg.go @@ -7607,22 +7607,22 @@ func (o *mergeJoinRightSemiOp) probeBodyLSelfalseRSelfalse() { // output by repeating each row in the group numRepeats times. For example, // given an input table: // -// L1 | L2 -// -------- -// 1 | a -// 1 | b +// L1 | L2 +// -------- +// 1 | a +// 1 | b // // and leftGroups = [{startIdx: 0, endIdx: 2, numRepeats: 3}] // then buildLeftGroupsFromBatch expands this to // -// L1 | L2 -// -------- -// 1 | a -// 1 | a -// 1 | a -// 1 | b -// 1 | b -// 1 | b +// L1 | L2 +// -------- +// 1 | a +// 1 | a +// 1 | a +// 1 | b +// 1 | b +// 1 | b // // Note: this is different from buildRightGroupsFromBatch in that each row of // group is repeated numRepeats times, instead of a simple copy of the group as @@ -9134,20 +9134,24 @@ func (o *mergeJoinRightSemiOp) buildLeftGroupsFromBatch( // buildRightGroupsFromBatch takes a []group and repeats each group numRepeats // times. For example, given an input table: -// R1 | R2 -// -------- -// 1 | a -// 1 | b +// +// R1 | R2 +// -------- +// 1 | a +// 1 | b +// // and rightGroups = [{startIdx: 0, endIdx: 2, numRepeats: 3}] // then buildRightGroups expands this to -// R1 | R2 -// -------- -// 1 | a -// 1 | b -// 1 | a -// 1 | b -// 1 | a -// 1 | b +// +// R1 | R2 +// -------- +// 1 | a +// 1 | b +// 1 | a +// 1 | b +// 1 | a +// 1 | b +// // Note: this is different from buildLeftGroupsFromBatch in that each group is // not expanded but directly copied numRepeats times. // SIDE EFFECTS: writes into o.output. diff --git a/pkg/sql/colexec/colexecjoin/mergejoiner_tmpl.go b/pkg/sql/colexec/colexecjoin/mergejoiner_tmpl.go index dec4680187e5..387d51a78f50 100644 --- a/pkg/sql/colexec/colexecjoin/mergejoiner_tmpl.go +++ b/pkg/sql/colexec/colexecjoin/mergejoiner_tmpl.go @@ -807,22 +807,22 @@ func _LEFT_SWITCH(_JOIN_TYPE joinTypeInfo, _HAS_SELECTION bool) { // */}} // output by repeating each row in the group numRepeats times. For example, // given an input table: // -// L1 | L2 -// -------- -// 1 | a -// 1 | b +// L1 | L2 +// -------- +// 1 | a +// 1 | b // // and leftGroups = [{startIdx: 0, endIdx: 2, numRepeats: 3}] // then buildLeftGroupsFromBatch expands this to // -// L1 | L2 -// -------- -// 1 | a -// 1 | a -// 1 | a -// 1 | b -// 1 | b -// 1 | b +// L1 | L2 +// -------- +// 1 | a +// 1 | a +// 1 | a +// 1 | b +// 1 | b +// 1 | b // // Note: this is different from buildRightGroupsFromBatch in that each row of // group is repeated numRepeats times, instead of a simple copy of the group as @@ -968,20 +968,24 @@ func _RIGHT_SWITCH(_JOIN_TYPE joinTypeInfo, _HAS_SELECTION bool) { // */}} // buildRightGroupsFromBatch takes a []group and repeats each group numRepeats // times. For example, given an input table: -// R1 | R2 -// -------- -// 1 | a -// 1 | b +// +// R1 | R2 +// -------- +// 1 | a +// 1 | b +// // and rightGroups = [{startIdx: 0, endIdx: 2, numRepeats: 3}] // then buildRightGroups expands this to -// R1 | R2 -// -------- -// 1 | a -// 1 | b -// 1 | a -// 1 | b -// 1 | a -// 1 | b +// +// R1 | R2 +// -------- +// 1 | a +// 1 | b +// 1 | a +// 1 | b +// 1 | a +// 1 | b +// // Note: this is different from buildLeftGroupsFromBatch in that each group is // not expanded but directly copied numRepeats times. // SIDE EFFECTS: writes into o.output. diff --git a/pkg/sql/colexec/colexecjoin/mergejoiner_util.go b/pkg/sql/colexec/colexecjoin/mergejoiner_util.go index 10dae4754e7f..93bd56004eb0 100644 --- a/pkg/sql/colexec/colexecjoin/mergejoiner_util.go +++ b/pkg/sql/colexec/colexecjoin/mergejoiner_util.go @@ -82,19 +82,23 @@ func getGroupsBufferCapacity(size int) int { // Since we have a circular buffer, it is possible for groups to wrap when // capacity is reached. Consider an example when size = 3 and startIdx = 6 when // maximum number of groups is present: -// buffer = [1, 2, 3, 4, 5, x, 0] -// (integers denote different groups and 'x' stands for a garbage). +// +// buffer = [1, 2, 3, 4, 5, x, 0] +// (integers denote different groups and 'x' stands for a garbage). +// // When getGroups() is called, for ease of usage we need to return the buffer // "flattened out", and in order to reduce allocation, we actually reserve // 4*size. In the example above we will copy the buffer as: -// buffer = [1, 2, 3, 4, 5, x, 0, 1, 2, 3, 4, 5] +// +// buffer = [1, 2, 3, 4, 5, x, 0, 1, 2, 3, 4, 5] +// // and will return buffer[6:12] when getGroups is called. // The calculations for why 4*size is sufficient: -// - the largest position in which the first group can be placed is 2*size -// (capacity field enforces that) -// - the largest number of groups to copy from the "physical" start of the -// buffer is 2*size-1 -// - adding those two numbers we arrive at 4*size. +// - the largest position in which the first group can be placed is 2*size +// (capacity field enforces that) +// - the largest number of groups to copy from the "physical" start of the +// buffer is 2*size-1 +// - adding those two numbers we arrive at 4*size. func getGroupsSlicesLen(size int) int { return 4 * size } diff --git a/pkg/sql/colexec/colexectestutils/utils.go b/pkg/sql/colexec/colexectestutils/utils.go index 14270524f9e3..7df441751095 100644 --- a/pkg/sql/colexec/colexectestutils/utils.go +++ b/pkg/sql/colexec/colexectestutils/utils.go @@ -327,9 +327,9 @@ func RunTests( // RunTestsWithTyps is the same as RunTests with an ability to specify the // types of the input tuples. -// - typs is the type schema of the input tuples. Note that this is a multi- -// dimensional slice which allows for specifying different schemas for each -// of the inputs. +// - typs is the type schema of the input tuples. Note that this is a multi- +// dimensional slice which allows for specifying different schemas for each +// of the inputs. func RunTestsWithTyps( t *testing.T, allocator *colmem.Allocator, @@ -660,15 +660,15 @@ func RunTestsWithoutAllNullsInjectionWithErrorHandler( // testing facility than RunTests, because it can't get a handle on the operator // under test and therefore can't perform as many extra checks. You should // always prefer using RunTests over RunTestsWithFn. -// - tups is the sets of input tuples. -// - typs is the type schema of the input tuples. Note that this is a multi- -// dimensional slice which allows for specifying different schemas for each -// of the inputs. This can also be left nil in which case the types will be -// determined at the runtime looking at the first input tuple, and if the -// determination doesn't succeed for a value of the tuple (likely because -// it's a nil), then that column will be assumed by default of type Int64. -// - test is a function that takes a list of input Operators and performs -// testing with t. +// - tups is the sets of input tuples. +// - typs is the type schema of the input tuples. Note that this is a multi- +// dimensional slice which allows for specifying different schemas for each +// of the inputs. This can also be left nil in which case the types will be +// determined at the runtime looking at the first input tuple, and if the +// determination doesn't succeed for a value of the tuple (likely because +// it's a nil), then that column will be assumed by default of type Int64. +// - test is a function that takes a list of input Operators and performs +// testing with t. func RunTestsWithFn( t *testing.T, allocator *colmem.Allocator, @@ -833,16 +833,18 @@ func extrapolateTypesFromTuples(tups Tuples) []*types.T { // tuples of arbitrary Go types. It's meant to be used in Operator unit tests // in conjunction with OpTestOutput like the following: // -// inputTuples := tuples{ -// {1,2,3.3,true}, -// {5,6,7.0,false}, -// } +// inputTuples := tuples{ +// {1,2,3.3,true}, +// {5,6,7.0,false}, +// } +// // tupleSource := NewOpTestInput(inputTuples, types.Bool) // opUnderTest := newFooOp(tupleSource, ...) // output := NewOpTestOutput(opUnderTest, expectedOutputTuples) -// if err := output.Verify(); err != nil { -// t.Fatal(err) -// } +// +// if err := output.Verify(); err != nil { +// t.Fatal(err) +// } type opTestInput struct { colexecop.ZeroInputNode diff --git a/pkg/sql/colexec/colexecutils/operator.go b/pkg/sql/colexec/colexecutils/operator.go index 7a1346093def..f655b5df1326 100644 --- a/pkg/sql/colexec/colexecutils/operator.go +++ b/pkg/sql/colexec/colexecutils/operator.go @@ -101,17 +101,16 @@ func (s *fixedNumTuplesNoInputOp) Next() coldata.Batch { // // The intended diagram is as follows: // -// original input (with schema [t1, ..., tN]) -// -------------- -// | -// ↓ -// vectorTypeEnforcer (will enforce that tN+1 = outputType) -// ------------------ -// | -// ↓ -// "projecting" operator (projects its output of type outputType -// --------------------- in column at position of N+1) -// +// original input (with schema [t1, ..., tN]) +// -------------- +// | +// ↓ +// vectorTypeEnforcer (will enforce that tN+1 = outputType) +// ------------------ +// | +// ↓ +// "projecting" operator (projects its output of type outputType +// --------------------- in column at position of N+1) type vectorTypeEnforcer struct { colexecop.NonExplainable allocator *colmem.Allocator diff --git a/pkg/sql/colexec/colexecwindow/range_offset_handler.eg.go b/pkg/sql/colexec/colexecwindow/range_offset_handler.eg.go index 2575e468a4b2..504ef68271cf 100644 --- a/pkg/sql/colexec/colexecwindow/range_offset_handler.eg.go +++ b/pkg/sql/colexec/colexecwindow/range_offset_handler.eg.go @@ -742,16 +742,16 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetPrecedingStartAscInt16{} // location of the last bound index. It is called for the first row of each // peer group. For example: // -// ord col -// ------- -// 1 -// 2 -// 2 -// 3 +// ord col +// ------- +// 1 +// 2 +// 2 +// 3 // -// currRow: 1 -// lastIdx: 0 -// offset: 1 +// currRow: 1 +// lastIdx: 0 +// offset: 1 // // Assume we are calculating the end index for an ascending column. In this // case, the value at the current row is '2' and the offset is '1' unit. So, @@ -889,16 +889,16 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetPrecedingStartAscInt32{} // location of the last bound index. It is called for the first row of each // peer group. For example: // -// ord col -// ------- -// 1 -// 2 -// 2 -// 3 +// ord col +// ------- +// 1 +// 2 +// 2 +// 3 // -// currRow: 1 -// lastIdx: 0 -// offset: 1 +// currRow: 1 +// lastIdx: 0 +// offset: 1 // // Assume we are calculating the end index for an ascending column. In this // case, the value at the current row is '2' and the offset is '1' unit. So, @@ -1036,16 +1036,16 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetPrecedingStartAscInt64{} // location of the last bound index. It is called for the first row of each // peer group. For example: // -// ord col -// ------- -// 1 -// 2 -// 2 -// 3 +// ord col +// ------- +// 1 +// 2 +// 2 +// 3 // -// currRow: 1 -// lastIdx: 0 -// offset: 1 +// currRow: 1 +// lastIdx: 0 +// offset: 1 // // Assume we are calculating the end index for an ascending column. In this // case, the value at the current row is '2' and the offset is '1' unit. So, @@ -1183,16 +1183,16 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetPrecedingStartAscDecimal{} // location of the last bound index. It is called for the first row of each // peer group. For example: // -// ord col -// ------- -// 1 -// 2 -// 2 -// 3 +// ord col +// ------- +// 1 +// 2 +// 2 +// 3 // -// currRow: 1 -// lastIdx: 0 -// offset: 1 +// currRow: 1 +// lastIdx: 0 +// offset: 1 // // Assume we are calculating the end index for an ascending column. In this // case, the value at the current row is '2' and the offset is '1' unit. So, @@ -1319,16 +1319,16 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetPrecedingStartAscFloat64{} // location of the last bound index. It is called for the first row of each // peer group. For example: // -// ord col -// ------- -// 1 -// 2 -// 2 -// 3 +// ord col +// ------- +// 1 +// 2 +// 2 +// 3 // -// currRow: 1 -// lastIdx: 0 -// offset: 1 +// currRow: 1 +// lastIdx: 0 +// offset: 1 // // Assume we are calculating the end index for an ascending column. In this // case, the value at the current row is '2' and the offset is '1' unit. So, @@ -1471,16 +1471,16 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetPrecedingStartAscInterval{} // location of the last bound index. It is called for the first row of each // peer group. For example: // -// ord col -// ------- -// 1 -// 2 -// 2 -// 3 +// ord col +// ------- +// 1 +// 2 +// 2 +// 3 // -// currRow: 1 -// lastIdx: 0 -// offset: 1 +// currRow: 1 +// lastIdx: 0 +// offset: 1 // // Assume we are calculating the end index for an ascending column. In this // case, the value at the current row is '2' and the offset is '1' unit. So, @@ -1600,16 +1600,16 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetPrecedingStartAscDate{} // location of the last bound index. It is called for the first row of each // peer group. For example: // -// ord col -// ------- -// 1 -// 2 -// 2 -// 3 +// ord col +// ------- +// 1 +// 2 +// 2 +// 3 // -// currRow: 1 -// lastIdx: 0 -// offset: 1 +// currRow: 1 +// lastIdx: 0 +// offset: 1 // // Assume we are calculating the end index for an ascending column. In this // case, the value at the current row is '2' and the offset is '1' unit. So, @@ -1759,16 +1759,16 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetPrecedingStartAscTimestamp{} // location of the last bound index. It is called for the first row of each // peer group. For example: // -// ord col -// ------- -// 1 -// 2 -// 2 -// 3 +// ord col +// ------- +// 1 +// 2 +// 2 +// 3 // -// currRow: 1 -// lastIdx: 0 -// offset: 1 +// currRow: 1 +// lastIdx: 0 +// offset: 1 // // Assume we are calculating the end index for an ascending column. In this // case, the value at the current row is '2' and the offset is '1' unit. So, @@ -1901,16 +1901,16 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetPrecedingStartAscDatum{} // location of the last bound index. It is called for the first row of each // peer group. For example: // -// ord col -// ------- -// 1 -// 2 -// 2 -// 3 +// ord col +// ------- +// 1 +// 2 +// 2 +// 3 // -// currRow: 1 -// lastIdx: 0 -// offset: 1 +// currRow: 1 +// lastIdx: 0 +// offset: 1 // // Assume we are calculating the end index for an ascending column. In this // case, the value at the current row is '2' and the offset is '1' unit. So, @@ -2038,16 +2038,16 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetPrecedingStartDescInt16{} // location of the last bound index. It is called for the first row of each // peer group. For example: // -// ord col -// ------- -// 1 -// 2 -// 2 -// 3 +// ord col +// ------- +// 1 +// 2 +// 2 +// 3 // -// currRow: 1 -// lastIdx: 0 -// offset: 1 +// currRow: 1 +// lastIdx: 0 +// offset: 1 // // Assume we are calculating the end index for an ascending column. In this // case, the value at the current row is '2' and the offset is '1' unit. So, @@ -2158,16 +2158,16 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetPrecedingStartDescInt32{} // location of the last bound index. It is called for the first row of each // peer group. For example: // -// ord col -// ------- -// 1 -// 2 -// 2 -// 3 +// ord col +// ------- +// 1 +// 2 +// 2 +// 3 // -// currRow: 1 -// lastIdx: 0 -// offset: 1 +// currRow: 1 +// lastIdx: 0 +// offset: 1 // // Assume we are calculating the end index for an ascending column. In this // case, the value at the current row is '2' and the offset is '1' unit. So, @@ -2278,16 +2278,16 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetPrecedingStartDescInt64{} // location of the last bound index. It is called for the first row of each // peer group. For example: // -// ord col -// ------- -// 1 -// 2 -// 2 -// 3 +// ord col +// ------- +// 1 +// 2 +// 2 +// 3 // -// currRow: 1 -// lastIdx: 0 -// offset: 1 +// currRow: 1 +// lastIdx: 0 +// offset: 1 // // Assume we are calculating the end index for an ascending column. In this // case, the value at the current row is '2' and the offset is '1' unit. So, @@ -2398,16 +2398,16 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetPrecedingStartDescDecimal{} // location of the last bound index. It is called for the first row of each // peer group. For example: // -// ord col -// ------- -// 1 -// 2 -// 2 -// 3 +// ord col +// ------- +// 1 +// 2 +// 2 +// 3 // -// currRow: 1 -// lastIdx: 0 -// offset: 1 +// currRow: 1 +// lastIdx: 0 +// offset: 1 // // Assume we are calculating the end index for an ascending column. In this // case, the value at the current row is '2' and the offset is '1' unit. So, @@ -2507,16 +2507,16 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetPrecedingStartDescFloat64{} // location of the last bound index. It is called for the first row of each // peer group. For example: // -// ord col -// ------- -// 1 -// 2 -// 2 -// 3 +// ord col +// ------- +// 1 +// 2 +// 2 +// 3 // -// currRow: 1 -// lastIdx: 0 -// offset: 1 +// currRow: 1 +// lastIdx: 0 +// offset: 1 // // Assume we are calculating the end index for an ascending column. In this // case, the value at the current row is '2' and the offset is '1' unit. So, @@ -2632,16 +2632,16 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetPrecedingStartDescInterval{} // location of the last bound index. It is called for the first row of each // peer group. For example: // -// ord col -// ------- -// 1 -// 2 -// 2 -// 3 +// ord col +// ------- +// 1 +// 2 +// 2 +// 3 // -// currRow: 1 -// lastIdx: 0 -// offset: 1 +// currRow: 1 +// lastIdx: 0 +// offset: 1 // // Assume we are calculating the end index for an ascending column. In this // case, the value at the current row is '2' and the offset is '1' unit. So, @@ -2734,16 +2734,16 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetPrecedingStartDescDate{} // location of the last bound index. It is called for the first row of each // peer group. For example: // -// ord col -// ------- -// 1 -// 2 -// 2 -// 3 +// ord col +// ------- +// 1 +// 2 +// 2 +// 3 // -// currRow: 1 -// lastIdx: 0 -// offset: 1 +// currRow: 1 +// lastIdx: 0 +// offset: 1 // // Assume we are calculating the end index for an ascending column. In this // case, the value at the current row is '2' and the offset is '1' unit. So, @@ -2866,16 +2866,16 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetPrecedingStartDescTimestamp{} // location of the last bound index. It is called for the first row of each // peer group. For example: // -// ord col -// ------- -// 1 -// 2 -// 2 -// 3 +// ord col +// ------- +// 1 +// 2 +// 2 +// 3 // -// currRow: 1 -// lastIdx: 0 -// offset: 1 +// currRow: 1 +// lastIdx: 0 +// offset: 1 // // Assume we are calculating the end index for an ascending column. In this // case, the value at the current row is '2' and the offset is '1' unit. So, @@ -2981,16 +2981,16 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetPrecedingStartDescDatum{} // location of the last bound index. It is called for the first row of each // peer group. For example: // -// ord col -// ------- -// 1 -// 2 -// 2 -// 3 +// ord col +// ------- +// 1 +// 2 +// 2 +// 3 // -// currRow: 1 -// lastIdx: 0 -// offset: 1 +// currRow: 1 +// lastIdx: 0 +// offset: 1 // // Assume we are calculating the end index for an ascending column. In this // case, the value at the current row is '2' and the offset is '1' unit. So, @@ -3091,16 +3091,16 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetPrecedingEndAscInt16{} // location of the last bound index. It is called for the first row of each // peer group. For example: // -// ord col -// ------- -// 1 -// 2 -// 2 -// 3 +// ord col +// ------- +// 1 +// 2 +// 2 +// 3 // -// currRow: 1 -// lastIdx: 0 -// offset: 1 +// currRow: 1 +// lastIdx: 0 +// offset: 1 // // Assume we are calculating the end index for an ascending column. In this // case, the value at the current row is '2' and the offset is '1' unit. So, @@ -3255,16 +3255,16 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetPrecedingEndAscInt32{} // location of the last bound index. It is called for the first row of each // peer group. For example: // -// ord col -// ------- -// 1 -// 2 -// 2 -// 3 +// ord col +// ------- +// 1 +// 2 +// 2 +// 3 // -// currRow: 1 -// lastIdx: 0 -// offset: 1 +// currRow: 1 +// lastIdx: 0 +// offset: 1 // // Assume we are calculating the end index for an ascending column. In this // case, the value at the current row is '2' and the offset is '1' unit. So, @@ -3419,16 +3419,16 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetPrecedingEndAscInt64{} // location of the last bound index. It is called for the first row of each // peer group. For example: // -// ord col -// ------- -// 1 -// 2 -// 2 -// 3 +// ord col +// ------- +// 1 +// 2 +// 2 +// 3 // -// currRow: 1 -// lastIdx: 0 -// offset: 1 +// currRow: 1 +// lastIdx: 0 +// offset: 1 // // Assume we are calculating the end index for an ascending column. In this // case, the value at the current row is '2' and the offset is '1' unit. So, @@ -3583,16 +3583,16 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetPrecedingEndAscDecimal{} // location of the last bound index. It is called for the first row of each // peer group. For example: // -// ord col -// ------- -// 1 -// 2 -// 2 -// 3 +// ord col +// ------- +// 1 +// 2 +// 2 +// 3 // -// currRow: 1 -// lastIdx: 0 -// offset: 1 +// currRow: 1 +// lastIdx: 0 +// offset: 1 // // Assume we are calculating the end index for an ascending column. In this // case, the value at the current row is '2' and the offset is '1' unit. So, @@ -3736,16 +3736,16 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetPrecedingEndAscFloat64{} // location of the last bound index. It is called for the first row of each // peer group. For example: // -// ord col -// ------- -// 1 -// 2 -// 2 -// 3 +// ord col +// ------- +// 1 +// 2 +// 2 +// 3 // -// currRow: 1 -// lastIdx: 0 -// offset: 1 +// currRow: 1 +// lastIdx: 0 +// offset: 1 // // Assume we are calculating the end index for an ascending column. In this // case, the value at the current row is '2' and the offset is '1' unit. So, @@ -3905,16 +3905,16 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetPrecedingEndAscInterval{} // location of the last bound index. It is called for the first row of each // peer group. For example: // -// ord col -// ------- -// 1 -// 2 -// 2 -// 3 +// ord col +// ------- +// 1 +// 2 +// 2 +// 3 // -// currRow: 1 -// lastIdx: 0 -// offset: 1 +// currRow: 1 +// lastIdx: 0 +// offset: 1 // // Assume we are calculating the end index for an ascending column. In this // case, the value at the current row is '2' and the offset is '1' unit. So, @@ -4051,16 +4051,16 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetPrecedingEndAscDate{} // location of the last bound index. It is called for the first row of each // peer group. For example: // -// ord col -// ------- -// 1 -// 2 -// 2 -// 3 +// ord col +// ------- +// 1 +// 2 +// 2 +// 3 // -// currRow: 1 -// lastIdx: 0 -// offset: 1 +// currRow: 1 +// lastIdx: 0 +// offset: 1 // // Assume we are calculating the end index for an ascending column. In this // case, the value at the current row is '2' and the offset is '1' unit. So, @@ -4227,16 +4227,16 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetPrecedingEndAscTimestamp{} // location of the last bound index. It is called for the first row of each // peer group. For example: // -// ord col -// ------- -// 1 -// 2 -// 2 -// 3 +// ord col +// ------- +// 1 +// 2 +// 2 +// 3 // -// currRow: 1 -// lastIdx: 0 -// offset: 1 +// currRow: 1 +// lastIdx: 0 +// offset: 1 // // Assume we are calculating the end index for an ascending column. In this // case, the value at the current row is '2' and the offset is '1' unit. So, @@ -4386,16 +4386,16 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetPrecedingEndAscDatum{} // location of the last bound index. It is called for the first row of each // peer group. For example: // -// ord col -// ------- -// 1 -// 2 -// 2 -// 3 +// ord col +// ------- +// 1 +// 2 +// 2 +// 3 // -// currRow: 1 -// lastIdx: 0 -// offset: 1 +// currRow: 1 +// lastIdx: 0 +// offset: 1 // // Assume we are calculating the end index for an ascending column. In this // case, the value at the current row is '2' and the offset is '1' unit. So, @@ -4540,16 +4540,16 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetPrecedingEndDescInt16{} // location of the last bound index. It is called for the first row of each // peer group. For example: // -// ord col -// ------- -// 1 -// 2 -// 2 -// 3 +// ord col +// ------- +// 1 +// 2 +// 2 +// 3 // -// currRow: 1 -// lastIdx: 0 -// offset: 1 +// currRow: 1 +// lastIdx: 0 +// offset: 1 // // Assume we are calculating the end index for an ascending column. In this // case, the value at the current row is '2' and the offset is '1' unit. So, @@ -4677,16 +4677,16 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetPrecedingEndDescInt32{} // location of the last bound index. It is called for the first row of each // peer group. For example: // -// ord col -// ------- -// 1 -// 2 -// 2 -// 3 +// ord col +// ------- +// 1 +// 2 +// 2 +// 3 // -// currRow: 1 -// lastIdx: 0 -// offset: 1 +// currRow: 1 +// lastIdx: 0 +// offset: 1 // // Assume we are calculating the end index for an ascending column. In this // case, the value at the current row is '2' and the offset is '1' unit. So, @@ -4814,16 +4814,16 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetPrecedingEndDescInt64{} // location of the last bound index. It is called for the first row of each // peer group. For example: // -// ord col -// ------- -// 1 -// 2 -// 2 -// 3 +// ord col +// ------- +// 1 +// 2 +// 2 +// 3 // -// currRow: 1 -// lastIdx: 0 -// offset: 1 +// currRow: 1 +// lastIdx: 0 +// offset: 1 // // Assume we are calculating the end index for an ascending column. In this // case, the value at the current row is '2' and the offset is '1' unit. So, @@ -4951,16 +4951,16 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetPrecedingEndDescDecimal{} // location of the last bound index. It is called for the first row of each // peer group. For example: // -// ord col -// ------- -// 1 -// 2 -// 2 -// 3 +// ord col +// ------- +// 1 +// 2 +// 2 +// 3 // -// currRow: 1 -// lastIdx: 0 -// offset: 1 +// currRow: 1 +// lastIdx: 0 +// offset: 1 // // Assume we are calculating the end index for an ascending column. In this // case, the value at the current row is '2' and the offset is '1' unit. So, @@ -5077,16 +5077,16 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetPrecedingEndDescFloat64{} // location of the last bound index. It is called for the first row of each // peer group. For example: // -// ord col -// ------- -// 1 -// 2 -// 2 -// 3 +// ord col +// ------- +// 1 +// 2 +// 2 +// 3 // -// currRow: 1 -// lastIdx: 0 -// offset: 1 +// currRow: 1 +// lastIdx: 0 +// offset: 1 // // Assume we are calculating the end index for an ascending column. In this // case, the value at the current row is '2' and the offset is '1' unit. So, @@ -5219,16 +5219,16 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetPrecedingEndDescInterval{} // location of the last bound index. It is called for the first row of each // peer group. For example: // -// ord col -// ------- -// 1 -// 2 -// 2 -// 3 +// ord col +// ------- +// 1 +// 2 +// 2 +// 3 // -// currRow: 1 -// lastIdx: 0 -// offset: 1 +// currRow: 1 +// lastIdx: 0 +// offset: 1 // // Assume we are calculating the end index for an ascending column. In this // case, the value at the current row is '2' and the offset is '1' unit. So, @@ -5338,16 +5338,16 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetPrecedingEndDescDate{} // location of the last bound index. It is called for the first row of each // peer group. For example: // -// ord col -// ------- -// 1 -// 2 -// 2 -// 3 +// ord col +// ------- +// 1 +// 2 +// 2 +// 3 // -// currRow: 1 -// lastIdx: 0 -// offset: 1 +// currRow: 1 +// lastIdx: 0 +// offset: 1 // // Assume we are calculating the end index for an ascending column. In this // case, the value at the current row is '2' and the offset is '1' unit. So, @@ -5487,16 +5487,16 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetPrecedingEndDescTimestamp{} // location of the last bound index. It is called for the first row of each // peer group. For example: // -// ord col -// ------- -// 1 -// 2 -// 2 -// 3 +// ord col +// ------- +// 1 +// 2 +// 2 +// 3 // -// currRow: 1 -// lastIdx: 0 -// offset: 1 +// currRow: 1 +// lastIdx: 0 +// offset: 1 // // Assume we are calculating the end index for an ascending column. In this // case, the value at the current row is '2' and the offset is '1' unit. So, @@ -5619,16 +5619,16 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetPrecedingEndDescDatum{} // location of the last bound index. It is called for the first row of each // peer group. For example: // -// ord col -// ------- -// 1 -// 2 -// 2 -// 3 +// ord col +// ------- +// 1 +// 2 +// 2 +// 3 // -// currRow: 1 -// lastIdx: 0 -// offset: 1 +// currRow: 1 +// lastIdx: 0 +// offset: 1 // // Assume we are calculating the end index for an ascending column. In this // case, the value at the current row is '2' and the offset is '1' unit. So, @@ -5746,16 +5746,16 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetFollowingStartAscInt16{} // location of the last bound index. It is called for the first row of each // peer group. For example: // -// ord col -// ------- -// 1 -// 2 -// 2 -// 3 +// ord col +// ------- +// 1 +// 2 +// 2 +// 3 // -// currRow: 1 -// lastIdx: 0 -// offset: 1 +// currRow: 1 +// lastIdx: 0 +// offset: 1 // // Assume we are calculating the end index for an ascending column. In this // case, the value at the current row is '2' and the offset is '1' unit. So, @@ -5893,16 +5893,16 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetFollowingStartAscInt32{} // location of the last bound index. It is called for the first row of each // peer group. For example: // -// ord col -// ------- -// 1 -// 2 -// 2 -// 3 +// ord col +// ------- +// 1 +// 2 +// 2 +// 3 // -// currRow: 1 -// lastIdx: 0 -// offset: 1 +// currRow: 1 +// lastIdx: 0 +// offset: 1 // // Assume we are calculating the end index for an ascending column. In this // case, the value at the current row is '2' and the offset is '1' unit. So, @@ -6040,16 +6040,16 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetFollowingStartAscInt64{} // location of the last bound index. It is called for the first row of each // peer group. For example: // -// ord col -// ------- -// 1 -// 2 -// 2 -// 3 +// ord col +// ------- +// 1 +// 2 +// 2 +// 3 // -// currRow: 1 -// lastIdx: 0 -// offset: 1 +// currRow: 1 +// lastIdx: 0 +// offset: 1 // // Assume we are calculating the end index for an ascending column. In this // case, the value at the current row is '2' and the offset is '1' unit. So, @@ -6187,16 +6187,16 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetFollowingStartAscDecimal{} // location of the last bound index. It is called for the first row of each // peer group. For example: // -// ord col -// ------- -// 1 -// 2 -// 2 -// 3 +// ord col +// ------- +// 1 +// 2 +// 2 +// 3 // -// currRow: 1 -// lastIdx: 0 -// offset: 1 +// currRow: 1 +// lastIdx: 0 +// offset: 1 // // Assume we are calculating the end index for an ascending column. In this // case, the value at the current row is '2' and the offset is '1' unit. So, @@ -6323,16 +6323,16 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetFollowingStartAscFloat64{} // location of the last bound index. It is called for the first row of each // peer group. For example: // -// ord col -// ------- -// 1 -// 2 -// 2 -// 3 +// ord col +// ------- +// 1 +// 2 +// 2 +// 3 // -// currRow: 1 -// lastIdx: 0 -// offset: 1 +// currRow: 1 +// lastIdx: 0 +// offset: 1 // // Assume we are calculating the end index for an ascending column. In this // case, the value at the current row is '2' and the offset is '1' unit. So, @@ -6475,16 +6475,16 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetFollowingStartAscInterval{} // location of the last bound index. It is called for the first row of each // peer group. For example: // -// ord col -// ------- -// 1 -// 2 -// 2 -// 3 +// ord col +// ------- +// 1 +// 2 +// 2 +// 3 // -// currRow: 1 -// lastIdx: 0 -// offset: 1 +// currRow: 1 +// lastIdx: 0 +// offset: 1 // // Assume we are calculating the end index for an ascending column. In this // case, the value at the current row is '2' and the offset is '1' unit. So, @@ -6604,16 +6604,16 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetFollowingStartAscDate{} // location of the last bound index. It is called for the first row of each // peer group. For example: // -// ord col -// ------- -// 1 -// 2 -// 2 -// 3 +// ord col +// ------- +// 1 +// 2 +// 2 +// 3 // -// currRow: 1 -// lastIdx: 0 -// offset: 1 +// currRow: 1 +// lastIdx: 0 +// offset: 1 // // Assume we are calculating the end index for an ascending column. In this // case, the value at the current row is '2' and the offset is '1' unit. So, @@ -6763,16 +6763,16 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetFollowingStartAscTimestamp{} // location of the last bound index. It is called for the first row of each // peer group. For example: // -// ord col -// ------- -// 1 -// 2 -// 2 -// 3 +// ord col +// ------- +// 1 +// 2 +// 2 +// 3 // -// currRow: 1 -// lastIdx: 0 -// offset: 1 +// currRow: 1 +// lastIdx: 0 +// offset: 1 // // Assume we are calculating the end index for an ascending column. In this // case, the value at the current row is '2' and the offset is '1' unit. So, @@ -6905,16 +6905,16 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetFollowingStartAscDatum{} // location of the last bound index. It is called for the first row of each // peer group. For example: // -// ord col -// ------- -// 1 -// 2 -// 2 -// 3 +// ord col +// ------- +// 1 +// 2 +// 2 +// 3 // -// currRow: 1 -// lastIdx: 0 -// offset: 1 +// currRow: 1 +// lastIdx: 0 +// offset: 1 // // Assume we are calculating the end index for an ascending column. In this // case, the value at the current row is '2' and the offset is '1' unit. So, @@ -7042,16 +7042,16 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetFollowingStartDescInt16{} // location of the last bound index. It is called for the first row of each // peer group. For example: // -// ord col -// ------- -// 1 -// 2 -// 2 -// 3 +// ord col +// ------- +// 1 +// 2 +// 2 +// 3 // -// currRow: 1 -// lastIdx: 0 -// offset: 1 +// currRow: 1 +// lastIdx: 0 +// offset: 1 // // Assume we are calculating the end index for an ascending column. In this // case, the value at the current row is '2' and the offset is '1' unit. So, @@ -7162,16 +7162,16 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetFollowingStartDescInt32{} // location of the last bound index. It is called for the first row of each // peer group. For example: // -// ord col -// ------- -// 1 -// 2 -// 2 -// 3 +// ord col +// ------- +// 1 +// 2 +// 2 +// 3 // -// currRow: 1 -// lastIdx: 0 -// offset: 1 +// currRow: 1 +// lastIdx: 0 +// offset: 1 // // Assume we are calculating the end index for an ascending column. In this // case, the value at the current row is '2' and the offset is '1' unit. So, @@ -7282,16 +7282,16 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetFollowingStartDescInt64{} // location of the last bound index. It is called for the first row of each // peer group. For example: // -// ord col -// ------- -// 1 -// 2 -// 2 -// 3 +// ord col +// ------- +// 1 +// 2 +// 2 +// 3 // -// currRow: 1 -// lastIdx: 0 -// offset: 1 +// currRow: 1 +// lastIdx: 0 +// offset: 1 // // Assume we are calculating the end index for an ascending column. In this // case, the value at the current row is '2' and the offset is '1' unit. So, @@ -7402,16 +7402,16 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetFollowingStartDescDecimal{} // location of the last bound index. It is called for the first row of each // peer group. For example: // -// ord col -// ------- -// 1 -// 2 -// 2 -// 3 +// ord col +// ------- +// 1 +// 2 +// 2 +// 3 // -// currRow: 1 -// lastIdx: 0 -// offset: 1 +// currRow: 1 +// lastIdx: 0 +// offset: 1 // // Assume we are calculating the end index for an ascending column. In this // case, the value at the current row is '2' and the offset is '1' unit. So, @@ -7511,16 +7511,16 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetFollowingStartDescFloat64{} // location of the last bound index. It is called for the first row of each // peer group. For example: // -// ord col -// ------- -// 1 -// 2 -// 2 -// 3 +// ord col +// ------- +// 1 +// 2 +// 2 +// 3 // -// currRow: 1 -// lastIdx: 0 -// offset: 1 +// currRow: 1 +// lastIdx: 0 +// offset: 1 // // Assume we are calculating the end index for an ascending column. In this // case, the value at the current row is '2' and the offset is '1' unit. So, @@ -7636,16 +7636,16 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetFollowingStartDescInterval{} // location of the last bound index. It is called for the first row of each // peer group. For example: // -// ord col -// ------- -// 1 -// 2 -// 2 -// 3 +// ord col +// ------- +// 1 +// 2 +// 2 +// 3 // -// currRow: 1 -// lastIdx: 0 -// offset: 1 +// currRow: 1 +// lastIdx: 0 +// offset: 1 // // Assume we are calculating the end index for an ascending column. In this // case, the value at the current row is '2' and the offset is '1' unit. So, @@ -7738,16 +7738,16 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetFollowingStartDescDate{} // location of the last bound index. It is called for the first row of each // peer group. For example: // -// ord col -// ------- -// 1 -// 2 -// 2 -// 3 +// ord col +// ------- +// 1 +// 2 +// 2 +// 3 // -// currRow: 1 -// lastIdx: 0 -// offset: 1 +// currRow: 1 +// lastIdx: 0 +// offset: 1 // // Assume we are calculating the end index for an ascending column. In this // case, the value at the current row is '2' and the offset is '1' unit. So, @@ -7870,16 +7870,16 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetFollowingStartDescTimestamp{} // location of the last bound index. It is called for the first row of each // peer group. For example: // -// ord col -// ------- -// 1 -// 2 -// 2 -// 3 +// ord col +// ------- +// 1 +// 2 +// 2 +// 3 // -// currRow: 1 -// lastIdx: 0 -// offset: 1 +// currRow: 1 +// lastIdx: 0 +// offset: 1 // // Assume we are calculating the end index for an ascending column. In this // case, the value at the current row is '2' and the offset is '1' unit. So, @@ -7985,16 +7985,16 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetFollowingStartDescDatum{} // location of the last bound index. It is called for the first row of each // peer group. For example: // -// ord col -// ------- -// 1 -// 2 -// 2 -// 3 +// ord col +// ------- +// 1 +// 2 +// 2 +// 3 // -// currRow: 1 -// lastIdx: 0 -// offset: 1 +// currRow: 1 +// lastIdx: 0 +// offset: 1 // // Assume we are calculating the end index for an ascending column. In this // case, the value at the current row is '2' and the offset is '1' unit. So, @@ -8095,16 +8095,16 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetFollowingEndAscInt16{} // location of the last bound index. It is called for the first row of each // peer group. For example: // -// ord col -// ------- -// 1 -// 2 -// 2 -// 3 +// ord col +// ------- +// 1 +// 2 +// 2 +// 3 // -// currRow: 1 -// lastIdx: 0 -// offset: 1 +// currRow: 1 +// lastIdx: 0 +// offset: 1 // // Assume we are calculating the end index for an ascending column. In this // case, the value at the current row is '2' and the offset is '1' unit. So, @@ -8259,16 +8259,16 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetFollowingEndAscInt32{} // location of the last bound index. It is called for the first row of each // peer group. For example: // -// ord col -// ------- -// 1 -// 2 -// 2 -// 3 +// ord col +// ------- +// 1 +// 2 +// 2 +// 3 // -// currRow: 1 -// lastIdx: 0 -// offset: 1 +// currRow: 1 +// lastIdx: 0 +// offset: 1 // // Assume we are calculating the end index for an ascending column. In this // case, the value at the current row is '2' and the offset is '1' unit. So, @@ -8423,16 +8423,16 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetFollowingEndAscInt64{} // location of the last bound index. It is called for the first row of each // peer group. For example: // -// ord col -// ------- -// 1 -// 2 -// 2 -// 3 +// ord col +// ------- +// 1 +// 2 +// 2 +// 3 // -// currRow: 1 -// lastIdx: 0 -// offset: 1 +// currRow: 1 +// lastIdx: 0 +// offset: 1 // // Assume we are calculating the end index for an ascending column. In this // case, the value at the current row is '2' and the offset is '1' unit. So, @@ -8587,16 +8587,16 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetFollowingEndAscDecimal{} // location of the last bound index. It is called for the first row of each // peer group. For example: // -// ord col -// ------- -// 1 -// 2 -// 2 -// 3 +// ord col +// ------- +// 1 +// 2 +// 2 +// 3 // -// currRow: 1 -// lastIdx: 0 -// offset: 1 +// currRow: 1 +// lastIdx: 0 +// offset: 1 // // Assume we are calculating the end index for an ascending column. In this // case, the value at the current row is '2' and the offset is '1' unit. So, @@ -8740,16 +8740,16 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetFollowingEndAscFloat64{} // location of the last bound index. It is called for the first row of each // peer group. For example: // -// ord col -// ------- -// 1 -// 2 -// 2 -// 3 +// ord col +// ------- +// 1 +// 2 +// 2 +// 3 // -// currRow: 1 -// lastIdx: 0 -// offset: 1 +// currRow: 1 +// lastIdx: 0 +// offset: 1 // // Assume we are calculating the end index for an ascending column. In this // case, the value at the current row is '2' and the offset is '1' unit. So, @@ -8909,16 +8909,16 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetFollowingEndAscInterval{} // location of the last bound index. It is called for the first row of each // peer group. For example: // -// ord col -// ------- -// 1 -// 2 -// 2 -// 3 +// ord col +// ------- +// 1 +// 2 +// 2 +// 3 // -// currRow: 1 -// lastIdx: 0 -// offset: 1 +// currRow: 1 +// lastIdx: 0 +// offset: 1 // // Assume we are calculating the end index for an ascending column. In this // case, the value at the current row is '2' and the offset is '1' unit. So, @@ -9055,16 +9055,16 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetFollowingEndAscDate{} // location of the last bound index. It is called for the first row of each // peer group. For example: // -// ord col -// ------- -// 1 -// 2 -// 2 -// 3 +// ord col +// ------- +// 1 +// 2 +// 2 +// 3 // -// currRow: 1 -// lastIdx: 0 -// offset: 1 +// currRow: 1 +// lastIdx: 0 +// offset: 1 // // Assume we are calculating the end index for an ascending column. In this // case, the value at the current row is '2' and the offset is '1' unit. So, @@ -9231,16 +9231,16 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetFollowingEndAscTimestamp{} // location of the last bound index. It is called for the first row of each // peer group. For example: // -// ord col -// ------- -// 1 -// 2 -// 2 -// 3 +// ord col +// ------- +// 1 +// 2 +// 2 +// 3 // -// currRow: 1 -// lastIdx: 0 -// offset: 1 +// currRow: 1 +// lastIdx: 0 +// offset: 1 // // Assume we are calculating the end index for an ascending column. In this // case, the value at the current row is '2' and the offset is '1' unit. So, @@ -9390,16 +9390,16 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetFollowingEndAscDatum{} // location of the last bound index. It is called for the first row of each // peer group. For example: // -// ord col -// ------- -// 1 -// 2 -// 2 -// 3 +// ord col +// ------- +// 1 +// 2 +// 2 +// 3 // -// currRow: 1 -// lastIdx: 0 -// offset: 1 +// currRow: 1 +// lastIdx: 0 +// offset: 1 // // Assume we are calculating the end index for an ascending column. In this // case, the value at the current row is '2' and the offset is '1' unit. So, @@ -9544,16 +9544,16 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetFollowingEndDescInt16{} // location of the last bound index. It is called for the first row of each // peer group. For example: // -// ord col -// ------- -// 1 -// 2 -// 2 -// 3 +// ord col +// ------- +// 1 +// 2 +// 2 +// 3 // -// currRow: 1 -// lastIdx: 0 -// offset: 1 +// currRow: 1 +// lastIdx: 0 +// offset: 1 // // Assume we are calculating the end index for an ascending column. In this // case, the value at the current row is '2' and the offset is '1' unit. So, @@ -9681,16 +9681,16 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetFollowingEndDescInt32{} // location of the last bound index. It is called for the first row of each // peer group. For example: // -// ord col -// ------- -// 1 -// 2 -// 2 -// 3 +// ord col +// ------- +// 1 +// 2 +// 2 +// 3 // -// currRow: 1 -// lastIdx: 0 -// offset: 1 +// currRow: 1 +// lastIdx: 0 +// offset: 1 // // Assume we are calculating the end index for an ascending column. In this // case, the value at the current row is '2' and the offset is '1' unit. So, @@ -9818,16 +9818,16 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetFollowingEndDescInt64{} // location of the last bound index. It is called for the first row of each // peer group. For example: // -// ord col -// ------- -// 1 -// 2 -// 2 -// 3 +// ord col +// ------- +// 1 +// 2 +// 2 +// 3 // -// currRow: 1 -// lastIdx: 0 -// offset: 1 +// currRow: 1 +// lastIdx: 0 +// offset: 1 // // Assume we are calculating the end index for an ascending column. In this // case, the value at the current row is '2' and the offset is '1' unit. So, @@ -9955,16 +9955,16 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetFollowingEndDescDecimal{} // location of the last bound index. It is called for the first row of each // peer group. For example: // -// ord col -// ------- -// 1 -// 2 -// 2 -// 3 +// ord col +// ------- +// 1 +// 2 +// 2 +// 3 // -// currRow: 1 -// lastIdx: 0 -// offset: 1 +// currRow: 1 +// lastIdx: 0 +// offset: 1 // // Assume we are calculating the end index for an ascending column. In this // case, the value at the current row is '2' and the offset is '1' unit. So, @@ -10081,16 +10081,16 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetFollowingEndDescFloat64{} // location of the last bound index. It is called for the first row of each // peer group. For example: // -// ord col -// ------- -// 1 -// 2 -// 2 -// 3 +// ord col +// ------- +// 1 +// 2 +// 2 +// 3 // -// currRow: 1 -// lastIdx: 0 -// offset: 1 +// currRow: 1 +// lastIdx: 0 +// offset: 1 // // Assume we are calculating the end index for an ascending column. In this // case, the value at the current row is '2' and the offset is '1' unit. So, @@ -10223,16 +10223,16 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetFollowingEndDescInterval{} // location of the last bound index. It is called for the first row of each // peer group. For example: // -// ord col -// ------- -// 1 -// 2 -// 2 -// 3 +// ord col +// ------- +// 1 +// 2 +// 2 +// 3 // -// currRow: 1 -// lastIdx: 0 -// offset: 1 +// currRow: 1 +// lastIdx: 0 +// offset: 1 // // Assume we are calculating the end index for an ascending column. In this // case, the value at the current row is '2' and the offset is '1' unit. So, @@ -10342,16 +10342,16 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetFollowingEndDescDate{} // location of the last bound index. It is called for the first row of each // peer group. For example: // -// ord col -// ------- -// 1 -// 2 -// 2 -// 3 +// ord col +// ------- +// 1 +// 2 +// 2 +// 3 // -// currRow: 1 -// lastIdx: 0 -// offset: 1 +// currRow: 1 +// lastIdx: 0 +// offset: 1 // // Assume we are calculating the end index for an ascending column. In this // case, the value at the current row is '2' and the offset is '1' unit. So, @@ -10491,16 +10491,16 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetFollowingEndDescTimestamp{} // location of the last bound index. It is called for the first row of each // peer group. For example: // -// ord col -// ------- -// 1 -// 2 -// 2 -// 3 +// ord col +// ------- +// 1 +// 2 +// 2 +// 3 // -// currRow: 1 -// lastIdx: 0 -// offset: 1 +// currRow: 1 +// lastIdx: 0 +// offset: 1 // // Assume we are calculating the end index for an ascending column. In this // case, the value at the current row is '2' and the offset is '1' unit. So, @@ -10623,16 +10623,16 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetFollowingEndDescDatum{} // location of the last bound index. It is called for the first row of each // peer group. For example: // -// ord col -// ------- -// 1 -// 2 -// 2 -// 3 +// ord col +// ------- +// 1 +// 2 +// 2 +// 3 // -// currRow: 1 -// lastIdx: 0 -// offset: 1 +// currRow: 1 +// lastIdx: 0 +// offset: 1 // // Assume we are calculating the end index for an ascending column. In this // case, the value at the current row is '2' and the offset is '1' unit. So, diff --git a/pkg/sql/colexec/colexecwindow/range_offset_handler_tmpl.go b/pkg/sql/colexec/colexecwindow/range_offset_handler_tmpl.go index 2a9cdd752386..84ecd483b34a 100644 --- a/pkg/sql/colexec/colexecwindow/range_offset_handler_tmpl.go +++ b/pkg/sql/colexec/colexecwindow/range_offset_handler_tmpl.go @@ -173,16 +173,16 @@ var _ rangeOffsetHandler = &_OP_STRING{} // location of the last bound index. It is called for the first row of each // peer group. For example: // -// ord col -// ------- -// 1 -// 2 -// 2 -// 3 +// ord col +// ------- +// 1 +// 2 +// 2 +// 3 // -// currRow: 1 -// lastIdx: 0 -// offset: 1 +// currRow: 1 +// lastIdx: 0 +// offset: 1 // // Assume we are calculating the end index for an ascending column. In this // case, the value at the current row is '2' and the offset is '1' unit. So, diff --git a/pkg/sql/colexec/execgen/cmd/execgen/cast_gen_util.go b/pkg/sql/colexec/execgen/cmd/execgen/cast_gen_util.go index e247eef28e03..74af323aa18d 100644 --- a/pkg/sql/colexec/execgen/cmd/execgen/cast_gen_util.go +++ b/pkg/sql/colexec/execgen/cmd/execgen/cast_gen_util.go @@ -26,16 +26,16 @@ import ( // 1. from the same type family are contiguous // 2. for a fixed "from" type family, all the same "from" widths are contiguous // 2'. for a fixed "from" type family, anyWidth "from" width is the last one -// 3. for a fixed "from" type, all the same "to" type families are contiguous -// 4. for a fixed "from" type and a fixed "to" type family, anyWidth "to" width +// 3. for a fixed "from" type, all the same "to" type families are contiguous +// 4. for a fixed "from" type and a fixed "to" type family, anyWidth "to" width // is the last one. // // If this structure is broken, then the generated code will not compile because // either -// 1. there will be duplicate entries in the switch statements (when -// "continuity" requirement is broken) -// 2. the 'default' case appears before other in the switch statements (when -// anyWidth is not the last one). +// 1. there will be duplicate entries in the switch statements (when +// "continuity" requirement is broken) +// 2. the 'default' case appears before other in the switch statements (when +// anyWidth is not the last one). var nativeCastInfos = []supportedNativeCastInfo{ {types.Bool, types.Float, boolToIntOrFloat}, {types.Bool, types.Int2, boolToIntOrFloat}, diff --git a/pkg/sql/colexec/execgen/cmd/execgen/overloads_base.go b/pkg/sql/colexec/execgen/cmd/execgen/overloads_base.go index 87a16dccadc2..c0e863917420 100644 --- a/pkg/sql/colexec/execgen/cmd/execgen/overloads_base.go +++ b/pkg/sql/colexec/execgen/cmd/execgen/overloads_base.go @@ -43,15 +43,15 @@ import ( // // Here is the diagram of relationships for argTypeOverload struct: // -// argTypeOverloadBase overloadBase -// \ \ | -// \ ------ | -// ↓ ↓ ↓ -// argWidthOverloadBase argTypeOverload -// \ / -// \ | (single) -// ↓ ↓ -// argWidthOverload +// argTypeOverloadBase overloadBase +// \ \ | +// \ ------ | +// ↓ ↓ ↓ +// argWidthOverloadBase argTypeOverload +// \ / +// \ | (single) +// ↓ ↓ +// argWidthOverload // // lastArgTypeOverload is similar in nature to argTypeOverload in that it // describes an overloaded argument, but that argument is the last one, so the @@ -62,15 +62,15 @@ import ( // // Here is the diagram of relationships for lastArgTypeOverload struct: // -// argTypeOverloadBase overloadBase -// \ \ | -// \ ------ | -// ↓ ↓ ↓ -// argWidthOverloadBase lastArgTypeOverload -// \ / -// \ | (multiple) -// ↓ ↓ -// lastArgWidthOverload +// argTypeOverloadBase overloadBase +// \ \ | +// \ ------ | +// ↓ ↓ ↓ +// argWidthOverloadBase lastArgTypeOverload +// \ / +// \ | (multiple) +// ↓ ↓ +// lastArgWidthOverload // // Two argument overload consists of multiple corresponding to each other // argTypeOverloads and lastArgTypeOverloads. @@ -82,21 +82,21 @@ import ( // These structs (or their "resolved" equivalents) are intended to be used by // the code generation with the following patterns: // -// switch canonicalTypeFamily { -// switch width { -// -// } -// } +// switch canonicalTypeFamily { +// switch width { +// +// } +// } // -// switch leftCanonicalTypeFamily { -// switch leftWidth { -// switch rightCanonicalTypeFamily { -// switch rightWidth { -// -// } -// } -// } -// } +// switch leftCanonicalTypeFamily { +// switch leftWidth { +// switch rightCanonicalTypeFamily { +// switch rightWidth { +// +// } +// } +// } +// } type overloadBase struct { kind overloadKind diff --git a/pkg/sql/colexec/execgen/cmd/execgen/overloads_gen_util.go b/pkg/sql/colexec/execgen/cmd/execgen/overloads_gen_util.go index 8ba86057b752..196c8aa0805e 100644 --- a/pkg/sql/colexec/execgen/cmd/execgen/overloads_gen_util.go +++ b/pkg/sql/colexec/execgen/cmd/execgen/overloads_gen_util.go @@ -21,12 +21,13 @@ import ( // populateTwoArgsOverloads creates all overload structs related to a single // binary, comparison, or cast operator. It takes in: -// - base - the overload base that will be shared among all new overloads. -// - opOutputTypes - mapping from a pair of types to the output type, it should -// contain an entry for all supported type pairs. -// - overrideOverloadFuncs - a function that could update AssignFunc and/or -// CompareFunc fields of a newly created lastArgWidthOverload based on a -// typeCustomizer. +// - base - the overload base that will be shared among all new overloads. +// - opOutputTypes - mapping from a pair of types to the output type, it should +// contain an entry for all supported type pairs. +// - overrideOverloadFuncs - a function that could update AssignFunc and/or +// CompareFunc fields of a newly created lastArgWidthOverload based on a +// typeCustomizer. +// // It returns all new overloads that have the same type (which will be empty // for cast operator). func populateTwoArgsOverloads( @@ -225,7 +226,9 @@ func makeFunctionRegex(funcName string, numArgs int) *regexp.Regexp { // makeTemplateFunctionCall makes a string representing a function call in the // template language. For example, it will return -// `{{.Assign "$1" "$2" "$3"}}` +// +// `{{.Assign "$1" "$2" "$3"}}` +// // if funcName is `Assign` and numArgs is 3. func makeTemplateFunctionCall(funcName string, numArgs int) string { res := "{{." + funcName diff --git a/pkg/sql/colexec/execgen/inline.go b/pkg/sql/colexec/execgen/inline.go index 2db786284b86..753d97a21a6c 100644 --- a/pkg/sql/colexec/execgen/inline.go +++ b/pkg/sql/colexec/execgen/inline.go @@ -270,8 +270,10 @@ func extractInlineFuncDecls(f *dst.File) map[string]funcInfo { // statement per return value of the input FuncDecl. For example, for // a FuncDecl that returns two boolean arguments, lastVal and lastValNull, // two statements will be returned: +// // var __retval_lastVal bool // var __retval_lastValNull bool +// // The second return is a slice of the names of each of the mangled return // declarations, in this example, __retval_lastVal and __retval_lastValNull. func extractReturnValues(decl *dst.FuncDecl) (retValDeclStmt dst.Stmt, retValNames []string) { @@ -318,8 +320,10 @@ func extractReturnValues(decl *dst.FuncDecl) (retValDeclStmt dst.Stmt, retValNam // we'll return the statement: // // var ( -// a int = x -// b string = y +// +// a int = x +// b string = y +// // ) // // In the case where the formal parameter name is the same as the input diff --git a/pkg/sql/colexec/execgen/template.go b/pkg/sql/colexec/execgen/template.go index ad20d5fb7711..4e37be9d53d7 100644 --- a/pkg/sql/colexec/execgen/template.go +++ b/pkg/sql/colexec/execgen/template.go @@ -102,41 +102,45 @@ func replaceTemplateVars( // For example, given the function: // // execgen:inline // // execgen:template -// func b(t bool, i int) int { -// if t { -// x = 3 -// } else { -// x = 4 -// } -// switch i { -// case 5: fmt.Println("5") -// case 6: fmt.Println("6") -// } -// return x -// } +// +// func b(t bool, i int) int { +// if t { +// x = 3 +// } else { +// x = 4 +// } +// switch i { +// case 5: fmt.Println("5") +// case 6: fmt.Println("6") +// } +// return x +// } // // and a caller -// b(true, 5) +// +// b(true, 5) +// // this function will generate -// if true { -// x = 3 -// } else { -// x = 4 -// } -// switch 5 { -// case 5: fmt.Println("5") -// case 6: fmt.Println("6") -// } -// return x +// +// if true { +// x = 3 +// } else { +// x = 4 +// } +// switch 5 { +// case 5: fmt.Println("5") +// case 6: fmt.Println("6") +// } +// return x // // in its first pass. However, because the if's condition (true, in this case) // is a logical expression containing boolean literals, and the switch statement // is a switch on a template variable alone, a second pass "folds" // the conditionals and replaces them like so: // -// x = 3 -// fmt.Println(5) -// return x +// x = 3 +// fmt.Println(5) +// return x // // Note that this method lexically replaces all formal parameters, so together // with createTemplateFuncVariant, it enables templates to call other templates @@ -180,11 +184,12 @@ func monomorphizeTemplate(n dst.Node, info *funcInfo, args []dst.Expr) dst.Node // if { } else { } and if ! { } else { } // // execgen:switch -// switch { -// case : -// case : -// ... -// } +// +// switch { +// case : +// case : +// ... +// } func foldConditionals( n dst.Node, info *funcInfo, templateSwitches map[*dst.SwitchStmt]struct{}, ) dst.Node { @@ -696,13 +701,14 @@ func trimLeadingNewLines(decs []string) []string { // For example, given a template function: // // // execgen:template -// func foo (a int, b bool) { -// if b { -// return a -// } else { -// return a + 1 -// } -// } +// +// func foo (a int, b bool) { +// if b { +// return a +// } else { +// return a + 1 +// } +// } // // And callsites: // @@ -711,13 +717,13 @@ func trimLeadingNewLines(decs []string) []string { // // This function will add 2 new func decls to the AST: // -// func foo_true(a int) { -// return a -// } +// func foo_true(a int) { +// return a +// } // -// func foo_false(a int) { -// return a + 1 -// } +// func foo_false(a int) { +// return a + 1 +// } func replaceAndExpandTemplates(f *dst.File, templateFuncInfos map[string]*funcInfo) dst.Node { // First, create the DAG of template functions. This DAG points from template // function to any other template functions that are called from within its diff --git a/pkg/sql/colexec/hash_aggregator.eg.go b/pkg/sql/colexec/hash_aggregator.eg.go index 2101519d289f..e85bea30a184 100644 --- a/pkg/sql/colexec/hash_aggregator.eg.go +++ b/pkg/sql/colexec/hash_aggregator.eg.go @@ -225,10 +225,7 @@ func getNext_true(op *hashAggregator) coldata.Batch { start int = op.bufferingState.unprocessedIdx end int = op.bufferingState.pendingBatch.Length() - 1 ) - _ = sel[start] - _ = sel[end] for i := start; i <= end; i++ { - //gcassert:bce idx := sel[i] if op.distinctOutput[idx] { { @@ -260,7 +257,6 @@ func getNext_true(op *hashAggregator) coldata.Batch { _ = op.distinctOutput[start] _ = op.distinctOutput[end] for i := start; i <= end; i++ { - //gcassert:bce if op.distinctOutput[i] { { __retval_0 = true @@ -359,8 +355,6 @@ func getNext_true(op *hashAggregator) coldata.Batch { start int = toBuffer - 1 end int = lowerBound ) - _ = sel[start] - _ = sel[end] for i := start; i >= end; i-- { //gcassert:bce idx := sel[i] diff --git a/pkg/sql/colexec/hash_aggregator.go b/pkg/sql/colexec/hash_aggregator.go index c13432c0448c..aba13d831daa 100644 --- a/pkg/sql/colexec/hash_aggregator.go +++ b/pkg/sql/colexec/hash_aggregator.go @@ -271,62 +271,68 @@ func (op *hashAggregator) setupScratchSlices(numBuffered int) { // // Let's go through an example of how this function works: our input stream // contains the following tuples: -// {-3}, {-3}, {-2}, {-1}, {-4}, {-1}, {-1}, {-4}. +// +// {-3}, {-3}, {-2}, {-1}, {-4}, {-1}, {-1}, {-4}. +// // (Note that negative values are chosen in order to visually distinguish them // from the IDs that we'll be working with below.) // We will use coldata.BatchSize() == 4 and let's assume that we will use a // simple hash function h(i) = i % 2 with two buckets in the hash table. // // I. we get a batch [-3, -3, -2, -1]. -// 1. a) compute hash buckets: ProbeScratch.Next = [reserved, 1, 1, 0, 1] -// b) build 'Next' chains between hash buckets: -// ProbeScratch.First = [3, 1] (length of First == # of hash buckets) -// ProbeScratch.Next = [reserved, 2, 4, 0, 0] -// (Note that we have a hash collision in the bucket with hash 1.) -// c) find "equality" buckets (populate HeadID): -// ProbeScratch.HeadID = [1, 1, 3, 4] -// (This means that tuples at position 0 and 1 are the same, and the -// tuple at position HeadID-1 is the head of the equality chain.) -// 2. divide all tuples into the equality chains based on HeadID: -// eqChains[0] = [0, 1] -// eqChains[1] = [2] -// eqChains[2] = [3] -// The special "heads of equality chains" selection vector is [0, 2, 3]. -// 3. we don't have any existing buckets yet, so this step is a noop. -// 4. each of the three equality chains contains tuples from a separate -// aggregation group, so we perform aggregation on each of them in turn. -// After we do so, we will have three buckets and the hash table will contain -// three tuples (with buckets and tuples corresponding to each other): +// 1. a) compute hash buckets: ProbeScratch.Next = [reserved, 1, 1, 0, 1] +// b) build 'Next' chains between hash buckets: +// ProbeScratch.First = [3, 1] (length of First == # of hash buckets) +// ProbeScratch.Next = [reserved, 2, 4, 0, 0] +// (Note that we have a hash collision in the bucket with hash 1.) +// c) find "equality" buckets (populate HeadID): +// ProbeScratch.HeadID = [1, 1, 3, 4] +// (This means that tuples at position 0 and 1 are the same, and the +// tuple at position HeadID-1 is the head of the equality chain.) +// 2. divide all tuples into the equality chains based on HeadID: +// eqChains[0] = [0, 1] +// eqChains[1] = [2] +// eqChains[2] = [3] +// The special "heads of equality chains" selection vector is [0, 2, 3]. +// 3. we don't have any existing buckets yet, so this step is a noop. +// 4. each of the three equality chains contains tuples from a separate +// aggregation group, so we perform aggregation on each of them in turn. +// After we do so, we will have three buckets and the hash table will contain +// three tuples (with buckets and tuples corresponding to each other): // buckets = [, , ] // ht.Vals = [-3, -2, -1]. -// We have fully processed the first batch. +// We have fully processed the first batch. // // II. we get a batch [-4, -1, -1, -4]. -// 1. a) compute hash buckets: ProbeScratch.Next = [reserved, 0, 1, 1, 0] -// b) build 'next' chains between hash buckets: -// ProbeScratch.First = [1, 2] -// ProbeScratch.Next = [reserved, 4, 3, 0, 0] -// c) find "equality" buckets: -// ProbeScratch.HeadID = [1, 2, 2, 1] -// 2. divide all tuples into the equality chains based on HeadID: -// eqChains[0] = [0, 3] -// eqChains[1] = [1, 2] -// The special "heads of equality chains" selection vector is [0, 1]. -// 3. probe that special "heads" selection vector against the tuples already -// present in the hash table: -// ProbeScratch.HeadID = [0, 3] -// Value 0 indicates that the first equality chain doesn't have an -// existing bucket, but the second chain does and the ID of its bucket is -// HeadID-1 = 2. We aggregate the second equality chain into that bucket. -// 4. the first equality chain contains tuples from a new aggregation group, -// so we create a new bucket for it and perform the aggregation. -// After we do so, we will have four buckets and the hash table will contain -// four tuples: +// +// 1. a) compute hash buckets: ProbeScratch.Next = [reserved, 0, 1, 1, 0] +// b) build 'next' chains between hash buckets: +// ProbeScratch.First = [1, 2] +// ProbeScratch.Next = [reserved, 4, 3, 0, 0] +// c) find "equality" buckets: +// ProbeScratch.HeadID = [1, 2, 2, 1] +// +// 2. divide all tuples into the equality chains based on HeadID: +// eqChains[0] = [0, 3] +// eqChains[1] = [1, 2] +// The special "heads of equality chains" selection vector is [0, 1]. +// +// 3. probe that special "heads" selection vector against the tuples already +// present in the hash table: +// ProbeScratch.HeadID = [0, 3] +// Value 0 indicates that the first equality chain doesn't have an +// existing bucket, but the second chain does and the ID of its bucket is +// HeadID-1 = 2. We aggregate the second equality chain into that bucket. +// +// 4. the first equality chain contains tuples from a new aggregation group, +// so we create a new bucket for it and perform the aggregation. +// After we do so, we will have four buckets and the hash table will contain +// four tuples: // buckets = [, , , ] // ht.Vals = [-3, -2, -1, -4]. -// We have fully processed the second batch. +// We have fully processed the second batch. // -// We have processed the input fully, so we're ready to emit the output. +// We have processed the input fully, so we're ready to emit the output. // // NOTE: b *must* be a non-zero length batch. func (op *hashAggregator) onlineAgg(b coldata.Batch) { diff --git a/pkg/sql/colexec/hash_aggregator_tmpl.go b/pkg/sql/colexec/hash_aggregator_tmpl.go index 62ac93b21da1..2eab2090bbec 100644 --- a/pkg/sql/colexec/hash_aggregator_tmpl.go +++ b/pkg/sql/colexec/hash_aggregator_tmpl.go @@ -128,23 +128,18 @@ func (op *hashAggregator) populateEqChains( func findSplit( op *hashAggregator, start int, end int, sel []int, useSel bool, ascending bool, ) (bool, int) { - if useSel { - _ = sel[start] - _ = sel[end] - } else { + if !useSel { _ = op.distinctOutput[start] _ = op.distinctOutput[end] } if ascending { for i := start; i <= end; i++ { if useSel { - //gcassert:bce idx := sel[i] if op.distinctOutput[idx] { return true, i } } else { - //gcassert:bce if op.distinctOutput[i] { return true, i } diff --git a/pkg/sql/colexec/sort.eg.go b/pkg/sql/colexec/sort.eg.go index 03c72b141671..1e3262f08230 100644 --- a/pkg/sql/colexec/sort.eg.go +++ b/pkg/sql/colexec/sort.eg.go @@ -339,6 +339,7 @@ func (s *sortBoolAscWithNullsOp) sortPartitions(partitions []int) { } } +//gcassert:inline func (s *sortBoolAscWithNullsOp) Less(i, j int) bool { n1 := s.nulls.MaybeHasNulls() && s.nulls.NullAt(s.order[i]) n2 := s.nulls.MaybeHasNulls() && s.nulls.NullAt(s.order[j]) @@ -432,6 +433,7 @@ func (s *sortBytesAscWithNullsOp) sortPartitions(partitions []int) { } } +//gcassert:inline func (s *sortBytesAscWithNullsOp) Less(i, j int) bool { n1 := s.nulls.MaybeHasNulls() && s.nulls.NullAt(s.order[i]) n2 := s.nulls.MaybeHasNulls() && s.nulls.NullAt(s.order[j]) @@ -518,6 +520,7 @@ func (s *sortDecimalAscWithNullsOp) sortPartitions(partitions []int) { } } +//gcassert:inline func (s *sortDecimalAscWithNullsOp) Less(i, j int) bool { n1 := s.nulls.MaybeHasNulls() && s.nulls.NullAt(s.order[i]) n2 := s.nulls.MaybeHasNulls() && s.nulls.NullAt(s.order[j]) @@ -595,6 +598,7 @@ func (s *sortInt16AscWithNullsOp) sortPartitions(partitions []int) { } } +//gcassert:inline func (s *sortInt16AscWithNullsOp) Less(i, j int) bool { n1 := s.nulls.MaybeHasNulls() && s.nulls.NullAt(s.order[i]) n2 := s.nulls.MaybeHasNulls() && s.nulls.NullAt(s.order[j]) @@ -683,6 +687,7 @@ func (s *sortInt32AscWithNullsOp) sortPartitions(partitions []int) { } } +//gcassert:inline func (s *sortInt32AscWithNullsOp) Less(i, j int) bool { n1 := s.nulls.MaybeHasNulls() && s.nulls.NullAt(s.order[i]) n2 := s.nulls.MaybeHasNulls() && s.nulls.NullAt(s.order[j]) @@ -771,6 +776,7 @@ func (s *sortInt64AscWithNullsOp) sortPartitions(partitions []int) { } } +//gcassert:inline func (s *sortInt64AscWithNullsOp) Less(i, j int) bool { n1 := s.nulls.MaybeHasNulls() && s.nulls.NullAt(s.order[i]) n2 := s.nulls.MaybeHasNulls() && s.nulls.NullAt(s.order[j]) @@ -859,6 +865,7 @@ func (s *sortFloat64AscWithNullsOp) sortPartitions(partitions []int) { } } +//gcassert:inline func (s *sortFloat64AscWithNullsOp) Less(i, j int) bool { n1 := s.nulls.MaybeHasNulls() && s.nulls.NullAt(s.order[i]) n2 := s.nulls.MaybeHasNulls() && s.nulls.NullAt(s.order[j]) @@ -955,6 +962,7 @@ func (s *sortTimestampAscWithNullsOp) sortPartitions(partitions []int) { } } +//gcassert:inline func (s *sortTimestampAscWithNullsOp) Less(i, j int) bool { n1 := s.nulls.MaybeHasNulls() && s.nulls.NullAt(s.order[i]) n2 := s.nulls.MaybeHasNulls() && s.nulls.NullAt(s.order[j]) @@ -1039,6 +1047,7 @@ func (s *sortIntervalAscWithNullsOp) sortPartitions(partitions []int) { } } +//gcassert:inline func (s *sortIntervalAscWithNullsOp) Less(i, j int) bool { n1 := s.nulls.MaybeHasNulls() && s.nulls.NullAt(s.order[i]) n2 := s.nulls.MaybeHasNulls() && s.nulls.NullAt(s.order[j]) @@ -1116,6 +1125,7 @@ func (s *sortJSONAscWithNullsOp) sortPartitions(partitions []int) { } } +//gcassert:inline func (s *sortJSONAscWithNullsOp) Less(i, j int) bool { n1 := s.nulls.MaybeHasNulls() && s.nulls.NullAt(s.order[i]) n2 := s.nulls.MaybeHasNulls() && s.nulls.NullAt(s.order[j]) @@ -1199,6 +1209,7 @@ func (s *sortDatumAscWithNullsOp) sortPartitions(partitions []int) { } } +//gcassert:inline func (s *sortDatumAscWithNullsOp) Less(i, j int) bool { n1 := s.nulls.MaybeHasNulls() && s.nulls.NullAt(s.order[i]) n2 := s.nulls.MaybeHasNulls() && s.nulls.NullAt(s.order[j]) @@ -1278,6 +1289,7 @@ func (s *sortBoolDescWithNullsOp) sortPartitions(partitions []int) { } } +//gcassert:inline func (s *sortBoolDescWithNullsOp) Less(i, j int) bool { n1 := s.nulls.MaybeHasNulls() && s.nulls.NullAt(s.order[i]) n2 := s.nulls.MaybeHasNulls() && s.nulls.NullAt(s.order[j]) @@ -1371,6 +1383,7 @@ func (s *sortBytesDescWithNullsOp) sortPartitions(partitions []int) { } } +//gcassert:inline func (s *sortBytesDescWithNullsOp) Less(i, j int) bool { n1 := s.nulls.MaybeHasNulls() && s.nulls.NullAt(s.order[i]) n2 := s.nulls.MaybeHasNulls() && s.nulls.NullAt(s.order[j]) @@ -1457,6 +1470,7 @@ func (s *sortDecimalDescWithNullsOp) sortPartitions(partitions []int) { } } +//gcassert:inline func (s *sortDecimalDescWithNullsOp) Less(i, j int) bool { n1 := s.nulls.MaybeHasNulls() && s.nulls.NullAt(s.order[i]) n2 := s.nulls.MaybeHasNulls() && s.nulls.NullAt(s.order[j]) @@ -1534,6 +1548,7 @@ func (s *sortInt16DescWithNullsOp) sortPartitions(partitions []int) { } } +//gcassert:inline func (s *sortInt16DescWithNullsOp) Less(i, j int) bool { n1 := s.nulls.MaybeHasNulls() && s.nulls.NullAt(s.order[i]) n2 := s.nulls.MaybeHasNulls() && s.nulls.NullAt(s.order[j]) @@ -1622,6 +1637,7 @@ func (s *sortInt32DescWithNullsOp) sortPartitions(partitions []int) { } } +//gcassert:inline func (s *sortInt32DescWithNullsOp) Less(i, j int) bool { n1 := s.nulls.MaybeHasNulls() && s.nulls.NullAt(s.order[i]) n2 := s.nulls.MaybeHasNulls() && s.nulls.NullAt(s.order[j]) @@ -1710,6 +1726,7 @@ func (s *sortInt64DescWithNullsOp) sortPartitions(partitions []int) { } } +//gcassert:inline func (s *sortInt64DescWithNullsOp) Less(i, j int) bool { n1 := s.nulls.MaybeHasNulls() && s.nulls.NullAt(s.order[i]) n2 := s.nulls.MaybeHasNulls() && s.nulls.NullAt(s.order[j]) @@ -1798,6 +1815,7 @@ func (s *sortFloat64DescWithNullsOp) sortPartitions(partitions []int) { } } +//gcassert:inline func (s *sortFloat64DescWithNullsOp) Less(i, j int) bool { n1 := s.nulls.MaybeHasNulls() && s.nulls.NullAt(s.order[i]) n2 := s.nulls.MaybeHasNulls() && s.nulls.NullAt(s.order[j]) @@ -1894,6 +1912,7 @@ func (s *sortTimestampDescWithNullsOp) sortPartitions(partitions []int) { } } +//gcassert:inline func (s *sortTimestampDescWithNullsOp) Less(i, j int) bool { n1 := s.nulls.MaybeHasNulls() && s.nulls.NullAt(s.order[i]) n2 := s.nulls.MaybeHasNulls() && s.nulls.NullAt(s.order[j]) @@ -1978,6 +1997,7 @@ func (s *sortIntervalDescWithNullsOp) sortPartitions(partitions []int) { } } +//gcassert:inline func (s *sortIntervalDescWithNullsOp) Less(i, j int) bool { n1 := s.nulls.MaybeHasNulls() && s.nulls.NullAt(s.order[i]) n2 := s.nulls.MaybeHasNulls() && s.nulls.NullAt(s.order[j]) @@ -2055,6 +2075,7 @@ func (s *sortJSONDescWithNullsOp) sortPartitions(partitions []int) { } } +//gcassert:inline func (s *sortJSONDescWithNullsOp) Less(i, j int) bool { n1 := s.nulls.MaybeHasNulls() && s.nulls.NullAt(s.order[i]) n2 := s.nulls.MaybeHasNulls() && s.nulls.NullAt(s.order[j]) @@ -2138,6 +2159,7 @@ func (s *sortDatumDescWithNullsOp) sortPartitions(partitions []int) { } } +//gcassert:inline func (s *sortDatumDescWithNullsOp) Less(i, j int) bool { n1 := s.nulls.MaybeHasNulls() && s.nulls.NullAt(s.order[i]) n2 := s.nulls.MaybeHasNulls() && s.nulls.NullAt(s.order[j]) @@ -2217,6 +2239,7 @@ func (s *sortBoolAscOp) sortPartitions(partitions []int) { } } +//gcassert:inline func (s *sortBoolAscOp) Less(i, j int) bool { var lt bool @@ -2300,6 +2323,7 @@ func (s *sortBytesAscOp) sortPartitions(partitions []int) { } } +//gcassert:inline func (s *sortBytesAscOp) Less(i, j int) bool { // If the type can be abbreviated as a uint64, compare the abbreviated @@ -2376,6 +2400,7 @@ func (s *sortDecimalAscOp) sortPartitions(partitions []int) { } } +//gcassert:inline func (s *sortDecimalAscOp) Less(i, j int) bool { var lt bool @@ -2444,7 +2469,6 @@ func (s *sortInt16AscOp) sortPartitions(partitions []int) { } //gcassert:inline -// func (s *sortInt16AscOp) Less(i, j int) bool { var lt bool @@ -2524,7 +2548,6 @@ func (s *sortInt32AscOp) sortPartitions(partitions []int) { } //gcassert:inline -// func (s *sortInt32AscOp) Less(i, j int) bool { var lt bool @@ -2604,7 +2627,6 @@ func (s *sortInt64AscOp) sortPartitions(partitions []int) { } //gcassert:inline -// func (s *sortInt64AscOp) Less(i, j int) bool { var lt bool @@ -2683,6 +2705,7 @@ func (s *sortFloat64AscOp) sortPartitions(partitions []int) { } } +//gcassert:inline func (s *sortFloat64AscOp) Less(i, j int) bool { var lt bool @@ -2769,6 +2792,7 @@ func (s *sortTimestampAscOp) sortPartitions(partitions []int) { } } +//gcassert:inline func (s *sortTimestampAscOp) Less(i, j int) bool { var lt bool @@ -2843,6 +2867,7 @@ func (s *sortIntervalAscOp) sortPartitions(partitions []int) { } } +//gcassert:inline func (s *sortIntervalAscOp) Less(i, j int) bool { var lt bool @@ -2910,6 +2935,7 @@ func (s *sortJSONAscOp) sortPartitions(partitions []int) { } } +//gcassert:inline func (s *sortJSONAscOp) Less(i, j int) bool { var lt bool @@ -2983,6 +3009,7 @@ func (s *sortDatumAscOp) sortPartitions(partitions []int) { } } +//gcassert:inline func (s *sortDatumAscOp) Less(i, j int) bool { var lt bool @@ -3052,6 +3079,7 @@ func (s *sortBoolDescOp) sortPartitions(partitions []int) { } } +//gcassert:inline func (s *sortBoolDescOp) Less(i, j int) bool { var lt bool @@ -3135,6 +3163,7 @@ func (s *sortBytesDescOp) sortPartitions(partitions []int) { } } +//gcassert:inline func (s *sortBytesDescOp) Less(i, j int) bool { // If the type can be abbreviated as a uint64, compare the abbreviated @@ -3211,6 +3240,7 @@ func (s *sortDecimalDescOp) sortPartitions(partitions []int) { } } +//gcassert:inline func (s *sortDecimalDescOp) Less(i, j int) bool { var lt bool @@ -3279,7 +3309,6 @@ func (s *sortInt16DescOp) sortPartitions(partitions []int) { } //gcassert:inline -// func (s *sortInt16DescOp) Less(i, j int) bool { var lt bool @@ -3359,7 +3388,6 @@ func (s *sortInt32DescOp) sortPartitions(partitions []int) { } //gcassert:inline -// func (s *sortInt32DescOp) Less(i, j int) bool { var lt bool @@ -3439,7 +3467,6 @@ func (s *sortInt64DescOp) sortPartitions(partitions []int) { } //gcassert:inline -// func (s *sortInt64DescOp) Less(i, j int) bool { var lt bool @@ -3518,6 +3545,7 @@ func (s *sortFloat64DescOp) sortPartitions(partitions []int) { } } +//gcassert:inline func (s *sortFloat64DescOp) Less(i, j int) bool { var lt bool @@ -3604,6 +3632,7 @@ func (s *sortTimestampDescOp) sortPartitions(partitions []int) { } } +//gcassert:inline func (s *sortTimestampDescOp) Less(i, j int) bool { var lt bool @@ -3678,6 +3707,7 @@ func (s *sortIntervalDescOp) sortPartitions(partitions []int) { } } +//gcassert:inline func (s *sortIntervalDescOp) Less(i, j int) bool { var lt bool @@ -3745,6 +3775,7 @@ func (s *sortJSONDescOp) sortPartitions(partitions []int) { } } +//gcassert:inline func (s *sortJSONDescOp) Less(i, j int) bool { var lt bool @@ -3818,6 +3849,7 @@ func (s *sortDatumDescOp) sortPartitions(partitions []int) { } } +//gcassert:inline func (s *sortDatumDescOp) Less(i, j int) bool { var lt bool diff --git a/pkg/sql/colexec/sort_tmpl.go b/pkg/sql/colexec/sort_tmpl.go index cc08865eb54f..fd75a7a281f3 100644 --- a/pkg/sql/colexec/sort_tmpl.go +++ b/pkg/sql/colexec/sort_tmpl.go @@ -166,8 +166,9 @@ func (s *sort_TYPE_DIR_HANDLES_NULLSOp) sortPartitions(partitions []int) { // {{$isInt := or (eq .VecMethod "Int16") (eq .VecMethod "Int32")}} // {{$isInt = or ($isInt) (eq .VecMethod "Int64")}} // {{if and ($isInt) (not $nulls)}} -//gcassert:inline // {{end}} +// +//gcassert:inline func (s *sort_TYPE_DIR_HANDLES_NULLSOp) Less(i, j int) bool { // {{if $nulls}} n1 := s.nulls.MaybeHasNulls() && s.nulls.NullAt(s.order[i]) diff --git a/pkg/sql/colexecop/constants.go b/pkg/sql/colexecop/constants.go index 54c29ebfe992..ceda4e524235 100644 --- a/pkg/sql/colexecop/constants.go +++ b/pkg/sql/colexecop/constants.go @@ -28,15 +28,15 @@ const SortMergeNonSortMinFDsOpen = 2 // fallback to sort and merge join. We'll be using the minimum necessary per // input + 2 (1 for each spilling queue that the merge joiner uses). For // clarity this is what happens: -// - The 2 partitions that need to be sorted + merged will use an FD each: 2 -// FDs. Meanwhile, each sorter will use up to ExternalSorterMinPartitions to -// sort and partition this input. At this stage 2 + 2 * -// ExternalSorterMinPartitions FDs are used. -// - Once the inputs (the hash joiner partitions) are finished, both FDs will -// be released. The merge joiner will now be in use, which uses two -// spillingQueues with 1 FD each for a total of 2. Since each sorter will -// use ExternalSorterMinPartitions, the FDs used at this stage are 2 + -// (2 * ExternalSorterMinPartitions) as well. Note that as soon as the -// sorter emits its first batch, it must be the case that the input to it -// has returned a zero batch, and thus the FD has been closed. +// - The 2 partitions that need to be sorted + merged will use an FD each: 2 +// FDs. Meanwhile, each sorter will use up to ExternalSorterMinPartitions to +// sort and partition this input. At this stage 2 + 2 * +// ExternalSorterMinPartitions FDs are used. +// - Once the inputs (the hash joiner partitions) are finished, both FDs will +// be released. The merge joiner will now be in use, which uses two +// spillingQueues with 1 FD each for a total of 2. Since each sorter will +// use ExternalSorterMinPartitions, the FDs used at this stage are 2 + +// (2 * ExternalSorterMinPartitions) as well. Note that as soon as the +// sorter emits its first batch, it must be the case that the input to it +// has returned a zero batch, and thus the FD has been closed. const ExternalHJMinPartitions = SortMergeNonSortMinFDsOpen + (ExternalSorterMinPartitions * 2) diff --git a/pkg/sql/colfetcher/cfetcher.go b/pkg/sql/colfetcher/cfetcher.go index 80ab036c0086..68e5336ff77c 100644 --- a/pkg/sql/colfetcher/cfetcher.go +++ b/pkg/sql/colfetcher/cfetcher.go @@ -182,21 +182,22 @@ const noOutputColumn = -1 // cFetcher handles fetching kvs and forming table rows for an // arbitrary number of tables. // Usage: -// var cf cFetcher -// err := cf.Init(..) -// // Handle err -// err := cf.StartScan(..) -// // Handle err -// for { -// res, err := cf.NextBatch() -// // Handle err -// if res.colBatch.Length() == 0 { -// // Done -// break -// } -// // Process res.colBatch -// } -// cf.Close(ctx) +// +// var cf cFetcher +// err := cf.Init(..) +// // Handle err +// err := cf.StartScan(..) +// // Handle err +// for { +// res, err := cf.NextBatch() +// // Handle err +// if res.colBatch.Length() == 0 { +// // Done +// break +// } +// // Process res.colBatch +// } +// cf.Close(ctx) type cFetcher struct { cFetcherArgs diff --git a/pkg/sql/colflow/colrpc/inbox_test.go b/pkg/sql/colflow/colrpc/inbox_test.go index a1f294720e17..62682deab9a6 100644 --- a/pkg/sql/colflow/colrpc/inbox_test.go +++ b/pkg/sql/colflow/colrpc/inbox_test.go @@ -201,9 +201,9 @@ func TestInboxTimeout(t *testing.T) { // These goroutines race against each other and the // desired state is that everything is cleaned up at the end. Examples of // scenarios that are tested by this test include but are not limited to: -// - DrainMeta called before Next and before a stream arrives. -// - DrainMeta called with an active stream. -// - A forceful cancellation of Next but no call to DrainMeta. +// - DrainMeta called before Next and before a stream arrives. +// - DrainMeta called with an active stream. +// - A forceful cancellation of Next but no call to DrainMeta. func TestInboxShutdown(t *testing.T) { defer leaktest.AfterTest(t)() diff --git a/pkg/sql/colflow/colrpc/outbox.go b/pkg/sql/colflow/colrpc/outbox.go index e62450e2ec4a..d8dab29483d5 100644 --- a/pkg/sql/colflow/colrpc/outbox.go +++ b/pkg/sql/colflow/colrpc/outbox.go @@ -80,8 +80,8 @@ type Outbox struct { } // NewOutbox creates a new Outbox. -// - getStats, when non-nil, returns all of the execution statistics of the -// operators that are in the same tree as this Outbox. +// - getStats, when non-nil, returns all of the execution statistics of the +// operators that are in the same tree as this Outbox. func NewOutbox( unlimitedAllocator *colmem.Allocator, input colexecargs.OpWithMetaInfo, @@ -135,15 +135,15 @@ func (o *Outbox) close(ctx context.Context) { // If an error is encountered that cannot be sent over the stream, the error // will be logged but not returned. // There are several ways the bidirectional FlowStream RPC may terminate. -// 1) Execution is finished. In this case, the upstream operator signals -// termination by returning a zero-length batch. The Outbox will drain its -// metadata sources, send the metadata, and then call CloseSend on the -// stream. The Outbox will wait until its Recv goroutine receives a non-nil -// error to not leak resources. -// 2) A cancellation happened. This can come from the provided context or the -// remote reader. Refer to tests for expected behavior. -// 3) A drain signal was received from the server (consumer). In this case, the -// Outbox goes through the same steps as 1). +// 1. Execution is finished. In this case, the upstream operator signals +// termination by returning a zero-length batch. The Outbox will drain its +// metadata sources, send the metadata, and then call CloseSend on the +// stream. The Outbox will wait until its Recv goroutine receives a non-nil +// error to not leak resources. +// 2. A cancellation happened. This can come from the provided context or the +// remote reader. Refer to tests for expected behavior. +// 3. A drain signal was received from the server (consumer). In this case, the +// Outbox goes through the same steps as 1). func (o *Outbox) Run( ctx context.Context, dialer execinfra.Dialer, @@ -254,20 +254,20 @@ func (o *Outbox) moveToDraining(ctx context.Context, reason redact.RedactableStr // drain signal) as well as an error which is non-nil if an error was // encountered AND the error should be sent over the stream as metadata. The for // loop continues iterating until one of the following conditions becomes true: -// 1) A zero-length batch is received from the input. This indicates graceful -// termination. true, nil is returned. -// 2) Outbox.draining is observed to be true. This is also considered graceful -// termination. true, nil is returned. -// 3) An error unrelated to the stream occurs (e.g. while deserializing a -// coldata.Batch). false, err is returned. This err should be sent over the -// stream as metadata. -// 4) An error related to the stream occurs. In this case, the error is logged -// but not returned, as there is no way to propagate this error anywhere -// meaningful. false, nil is returned. -// NOTE: if non-io.EOF error is encountered (indicating ungraceful shutdown -// of the stream), flowCtxCancel will be called. If an io.EOF is encountered -// (indicating a graceful shutdown initiated by the remote Inbox), -// outboxCtxCancel will be called. +// 1. A zero-length batch is received from the input. This indicates graceful +// termination. true, nil is returned. +// 2. Outbox.draining is observed to be true. This is also considered graceful +// termination. true, nil is returned. +// 3. An error unrelated to the stream occurs (e.g. while deserializing a +// coldata.Batch). false, err is returned. This err should be sent over the +// stream as metadata. +// 4. An error related to the stream occurs. In this case, the error is logged +// but not returned, as there is no way to propagate this error anywhere +// meaningful. false, nil is returned. +// NOTE: if non-io.EOF error is encountered (indicating ungraceful shutdown +// of the stream), flowCtxCancel will be called. If an io.EOF is encountered +// (indicating a graceful shutdown initiated by the remote Inbox), +// outboxCtxCancel will be called. func (o *Outbox) sendBatches( ctx context.Context, stream flowStreamClient, flowCtxCancel, outboxCtxCancel context.CancelFunc, ) (terminatedGracefully bool, errToSend error) { diff --git a/pkg/sql/colflow/routers.go b/pkg/sql/colflow/routers.go index c8cc2d5a68b1..610bc76ab4d5 100644 --- a/pkg/sql/colflow/routers.go +++ b/pkg/sql/colflow/routers.go @@ -336,10 +336,11 @@ func (o *routerOutputOp) forwardErr(err error) { // internal buffer. Zero-length batch should be passed-in to indicate that no // more batches will be added. // TODO(asubiotto): We should explore pipelining addBatch if disk-spilling -// performance becomes a concern. The main router goroutine will be writing to -// disk as the code is written, meaning that we impact the performance of -// writing rows to a fast output if we have to write to disk for a single -// slow output. +// +// performance becomes a concern. The main router goroutine will be writing to +// disk as the code is written, meaning that we impact the performance of +// writing rows to a fast output if we have to write to disk for a single +// slow output. func (o *routerOutputOp) addBatch(ctx context.Context, batch coldata.Batch) bool { o.mu.Lock() defer o.mu.Unlock() diff --git a/pkg/sql/colflow/routers_test.go b/pkg/sql/colflow/routers_test.go index c35cfad051e7..a0fb35ac9c72 100644 --- a/pkg/sql/colflow/routers_test.go +++ b/pkg/sql/colflow/routers_test.go @@ -112,7 +112,8 @@ type memoryTestCase struct { // Note that not all tests will check for a spill, it is enough that some // deterministic tests do so for the simple cases. // TODO(asubiotto): We might want to also return a verify() function that will -// check for leftover files. +// +// check for leftover files. func getDiskQueueCfgAndMemoryTestCases( t *testing.T, rng *rand.Rand, ) (colcontainer.DiskQueueCfg, func(), []memoryTestCase) { diff --git a/pkg/sql/colflow/vectorized_flow_shutdown_test.go b/pkg/sql/colflow/vectorized_flow_shutdown_test.go index 6463ffc15ecf..38236cc0de3c 100644 --- a/pkg/sql/colflow/vectorized_flow_shutdown_test.go +++ b/pkg/sql/colflow/vectorized_flow_shutdown_test.go @@ -77,37 +77,41 @@ func (c callbackCloser) Close(ctx context.Context) error { // synchronizer which then outputs all the data into a materializer. // The resulting scheme looks as follows: // -// Remote Node | Local Node -// | -// -> output -> Outbox -> | -> Inbox -> | -// | | +// Remote Node | Local Node +// | +// -> output -> Outbox -> | -> Inbox -> | +// | | +// // Hash Router -> output -> Outbox -> | -> Inbox -> | -// | | -// -> output -> Outbox -> | -> Inbox -> | -// | -> Synchronizer -> materializer -> FlowCoordinator -// Outbox -> | -> Inbox -> | -// | -// Outbox -> | -> Inbox -> | -// | -// Outbox -> | -> Inbox -> | +// +// | | +// -> output -> Outbox -> | -> Inbox -> | +// | -> Synchronizer -> materializer -> FlowCoordinator +// Outbox -> | -> Inbox -> | +// | +// Outbox -> | -> Inbox -> | +// | +// Outbox -> | -> Inbox -> | // // Also, with 50% probability, another remote node with the chain of an Outbox // and Inbox is placed between the synchronizer and materializer. The resulting // scheme then looks as follows: // -// Remote Node | Another Remote Node | Local Node -// | | -// -> output -> Outbox -> | -> Inbox -> | -// | | | | +// Remote Node | Another Remote Node | Local Node +// | | +// -> output -> Outbox -> | -> Inbox -> | +// | | | | +// // Hash Router -> output -> Outbox -> | -> Inbox -> | -// | | | | -// -> output -> Outbox -> | -> Inbox -> | -// | | -> Synchronizer -> Outbox -> | -> Inbox -> materializer -> FlowCoordinator -// Outbox -> | -> Inbox -> | -// | | | -// Outbox -> | -> Inbox -> | -// | | | -// Outbox -> | -> Inbox -> | +// +// | | | | +// -> output -> Outbox -> | -> Inbox -> | +// | | -> Synchronizer -> Outbox -> | -> Inbox -> materializer -> FlowCoordinator +// Outbox -> | -> Inbox -> | +// | | | +// Outbox -> | -> Inbox -> | +// | | | +// Outbox -> | -> Inbox -> | // // Remote nodes are simulated by having separate contexts and separate outbox // registries. diff --git a/pkg/sql/colflow/vectorized_flow_test.go b/pkg/sql/colflow/vectorized_flow_test.go index 5ad903f50d4e..85d710aee968 100644 --- a/pkg/sql/colflow/vectorized_flow_test.go +++ b/pkg/sql/colflow/vectorized_flow_test.go @@ -86,34 +86,39 @@ func intCols(numCols int) []*types.T { // not important). If it drains the depicted inbox, that is pulling from node 2 // which is in turn pulling from an outbox, a cycle is created and the flow is // blocked. -// +------------+ -// | Node 3 | -// +-----+------+ -// ^ -// Node 1 | Node 2 +// +// +------------+ +// | Node 3 | +// +-----+------+ +// ^ +// Node 1 | Node 2 +// // +------------------------+-----------------+ -// +------------+ | -// Spec C +--------+ | | -// | | noop | | | -// | +---+----+ | | -// | ^ | | -// | +--+---+ | | -// | |outbox| +<----------+ -// | +------+ | | | -// +------------+ | | +// +// +------------+ | +// Spec C +--------+ | | +// | | noop | | | +// | +---+----+ | | +// | ^ | | +// | +--+---+ | | +// | |outbox| +<----------+ +// | +------+ | | | +// +------------+ | | +// // Drain cycle!---+ | +----+-----------------+ -// v | |Any group of operators| -// +------------+ | +----+-----------------+ -// | +------+ | | ^ -// Spec A |inbox +--------------+ -// | +------+ | | -// +------------+ | -// ^ | -// | | -// +-----+------+ | -// Spec B noop | | -// |materializer| + -// +------------+ +// +// v | |Any group of operators| +// +------------+ | +----+-----------------+ +// | +------+ | | ^ +// Spec A |inbox +--------------+ +// | +------+ | | +// +------------+ | +// ^ | +// | | +// +-----+------+ | +// Spec B noop | | +// |materializer| + +// +------------+ func TestDrainOnlyInputDAG(t *testing.T) { defer leaktest.AfterTest(t)() diff --git a/pkg/sql/colmem/allocator.go b/pkg/sql/colmem/allocator.go index 8c1d3acf4e82..a1d92a2c7864 100644 --- a/pkg/sql/colmem/allocator.go +++ b/pkg/sql/colmem/allocator.go @@ -426,10 +426,11 @@ func (a *Allocator) Used() int64 { // this allocator by delta bytes (which can be both positive or negative). // // If: -// - afterAllocation is true, -// - the allocator was created via NewLimitedAllocator with a non-nil unlimited -// memory account, -// - the positive delta allocation is denied by the limited memory account, +// - afterAllocation is true, +// - the allocator was created via NewLimitedAllocator with a non-nil unlimited +// memory account, +// - the positive delta allocation is denied by the limited memory account, +// // then the unlimited account is grown by delta. The memory error is still // thrown. func (a *Allocator) adjustMemoryUsage(delta int64, afterAllocation bool) { @@ -604,16 +605,16 @@ func GetFixedSizeTypeSize(t *types.T) (size int64) { // reallocating batches with ResetMaybeReallocate() function. // // The heuristic is as follows: -// - the first time a batch exceeds the memory limit, its capacity is memorized, -// and from now on that capacity will determine the upper bound on the -// capacities of the batches allocated through the helper; -// - if at any point in time a batch exceeds the memory limit by at least a -// factor of two, then that batch is discarded, and the capacity will never -// exceed half of the capacity of the discarded batch; -// - if the memory limit is not reached, then the behavior of the dynamic growth -// of the capacity provided by Allocator.resetMaybeReallocate is still -// applicable (i.e. the capacities will grow exponentially until -// coldata.BatchSize()). +// - the first time a batch exceeds the memory limit, its capacity is memorized, +// and from now on that capacity will determine the upper bound on the +// capacities of the batches allocated through the helper; +// - if at any point in time a batch exceeds the memory limit by at least a +// factor of two, then that batch is discarded, and the capacity will never +// exceed half of the capacity of the discarded batch; +// - if the memory limit is not reached, then the behavior of the dynamic growth +// of the capacity provided by Allocator.resetMaybeReallocate is still +// applicable (i.e. the capacities will grow exponentially until +// coldata.BatchSize()). // // NOTE: it works under the assumption that only a single coldata.Batch is being // used. diff --git a/pkg/sql/comment_on_database.go b/pkg/sql/comment_on_database.go index 07bf47f0d303..355e5edccc60 100644 --- a/pkg/sql/comment_on_database.go +++ b/pkg/sql/comment_on_database.go @@ -30,7 +30,8 @@ type commentOnDatabaseNode struct { // CommentOnDatabase add comment on a database. // Privileges: CREATE on database. -// notes: postgres requires CREATE on the database. +// +// notes: postgres requires CREATE on the database. func (p *planner) CommentOnDatabase( ctx context.Context, n *tree.CommentOnDatabase, ) (planNode, error) { diff --git a/pkg/sql/comment_on_schema.go b/pkg/sql/comment_on_schema.go index a303bfd236f7..d97919042cc6 100644 --- a/pkg/sql/comment_on_schema.go +++ b/pkg/sql/comment_on_schema.go @@ -32,7 +32,8 @@ type commentOnSchemaNode struct { // CommentOnSchema add comment on a schema. // Privileges: CREATE on scheme. -// notes: postgres requires CREATE on the scheme. +// +// notes: postgres requires CREATE on the scheme. func (p *planner) CommentOnSchema(ctx context.Context, n *tree.CommentOnSchema) (planNode, error) { if err := checkSchemaChangeEnabled( ctx, diff --git a/pkg/sql/comment_on_table.go b/pkg/sql/comment_on_table.go index 23a944657d57..60097a985ff6 100644 --- a/pkg/sql/comment_on_table.go +++ b/pkg/sql/comment_on_table.go @@ -30,8 +30,9 @@ type commentOnTableNode struct { // CommentOnTable add comment on a table. // Privileges: CREATE on table. -// notes: postgres requires CREATE on the table. -// mysql requires ALTER, CREATE, INSERT on the table. +// +// notes: postgres requires CREATE on the table. +// mysql requires ALTER, CREATE, INSERT on the table. func (p *planner) CommentOnTable(ctx context.Context, n *tree.CommentOnTable) (planNode, error) { if err := checkSchemaChangeEnabled( ctx, diff --git a/pkg/sql/conn_executor_exec.go b/pkg/sql/conn_executor_exec.go index 6f118a2527e3..04bb1ffd82bb 100644 --- a/pkg/sql/conn_executor_exec.go +++ b/pkg/sql/conn_executor_exec.go @@ -82,7 +82,8 @@ import ( // stmt: The statement to execute. // res: Used to produce query results. // pinfo: The values to use for the statement's placeholders. If nil is passed, -// then the statement cannot have any placeholder. +// +// then the statement cannot have any placeholder. func (ex *connExecutor) execStmt( ctx context.Context, parserStmt parser.Statement, @@ -1681,9 +1682,9 @@ func (ex *connExecutor) beginImplicitTxn( // execStmtInAbortedState executes a statement in a txn that's in state // Aborted or RestartWait. All statements result in error events except: -// - COMMIT / ROLLBACK: aborts the current transaction. -// - ROLLBACK TO SAVEPOINT / SAVEPOINT: reopens the current transaction, -// allowing it to be retried. +// - COMMIT / ROLLBACK: aborts the current transaction. +// - ROLLBACK TO SAVEPOINT / SAVEPOINT: reopens the current transaction, +// allowing it to be retried. func (ex *connExecutor) execStmtInAbortedState( ctx context.Context, ast tree.Statement, res RestrictedCommandResult, ) (_ fsm.Event, payload fsm.EventPayload) { diff --git a/pkg/sql/conn_executor_test.go b/pkg/sql/conn_executor_test.go index cfda981129d8..51b3de842290 100644 --- a/pkg/sql/conn_executor_test.go +++ b/pkg/sql/conn_executor_test.go @@ -1113,10 +1113,10 @@ func TestShowLastQueryStatisticsUnknown(t *testing.T) { } // TestTransactionDeadline tests that the transaction deadline is set correctly: -// - In a single-tenant environment, the transaction deadline should use the leased -// descriptor expiration. -// - In a multi-tenant environment, the transaction deadline should be set to -// min(sqlliveness.Session expiry, lease descriptor expiration). +// - In a single-tenant environment, the transaction deadline should use the leased +// descriptor expiration. +// - In a multi-tenant environment, the transaction deadline should be set to +// min(sqlliveness.Session expiry, lease descriptor expiration). func TestTransactionDeadline(t *testing.T) { defer leaktest.AfterTest(t)() @@ -1689,8 +1689,8 @@ func TestEmptyTxnIsBeingCorrectlyCounted(t *testing.T) { "after executing empty transactions, but it was not") } -//TestSessionTotalActiveTime tests that a session's total active time is -//correctly being recorded as transactions are executed. +// TestSessionTotalActiveTime tests that a session's total active time is +// correctly being recorded as transactions are executed. func TestSessionTotalActiveTime(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) diff --git a/pkg/sql/contention/contentionutils/concurrent_buffer_guard.go b/pkg/sql/contention/contentionutils/concurrent_buffer_guard.go index e0fd71ce70d5..98949660ff06 100644 --- a/pkg/sql/contention/contentionutils/concurrent_buffer_guard.go +++ b/pkg/sql/contention/contentionutils/concurrent_buffer_guard.go @@ -29,13 +29,16 @@ type CapacityLimiter func() int64 // as a generic data structure as something like: // // template -// class ConcurrentBuffer { -// std::vector buffer; -// ... +// +// class ConcurrentBuffer { +// std::vector buffer; +// ... +// // public: -// void write(T val); -// std::vector read() const; -// }; +// +// void write(T val); +// std::vector read() const; +// }; // // To work around the lacking of generic, ConcurrentBufferGuard is designed to // be embedded into higher-level structs that implements the buffer read/write @@ -85,15 +88,15 @@ func NewConcurrentBufferGuard( // If the reserved index is valid, AtomicWrite immediately executes the // bufferWriteOp with the reserved index. However, if the reserved index is not // valid, (that is, array index out of bound), there are two scenarios: -// 1. If the reserved index == size of the array, then the caller of AtomicWrite() -// method is responsible for executing the onBufferFullHandler() callback. The -// caller does so by upgrading the read-lock to a write-lock, therefore -// blocks all future writers. After the callback is executed, the write-lock -// is then downgraded to a read-lock. -// 2. If the reserved index > size of the array, then the caller of AtomicWrite() -// is blocked until the array is flushed. This is achieved by waiting on the -// conditional variable (flushDone) while holding onto the read-lock. After -// the flush is completed, the writer is unblocked and allowed to retry. +// 1. If the reserved index == size of the array, then the caller of AtomicWrite() +// method is responsible for executing the onBufferFullHandler() callback. The +// caller does so by upgrading the read-lock to a write-lock, therefore +// blocks all future writers. After the callback is executed, the write-lock +// is then downgraded to a read-lock. +// 2. If the reserved index > size of the array, then the caller of AtomicWrite() +// is blocked until the array is flushed. This is achieved by waiting on the +// conditional variable (flushDone) while holding onto the read-lock. After +// the flush is completed, the writer is unblocked and allowed to retry. func (c *ConcurrentBufferGuard) AtomicWrite(op bufferWriteOp) { size := c.limiter() c.flushSyncLock.RLock() diff --git a/pkg/sql/contention/event_store.go b/pkg/sql/contention/event_store.go index a5baf2d7bc55..58238cdb7ed7 100644 --- a/pkg/sql/contention/event_store.go +++ b/pkg/sql/contention/event_store.go @@ -72,18 +72,18 @@ var eventBatchPool = &sync.Pool{ // event collection. It subsequently resolves the transaction ID reported in the // contention event into transaction fingerprint ID. // eventStore relies on two background goroutines: -// 1. intake goroutine: this goroutine is responsible for inserting batched -// contention events into the in-memory store, and then queue the batched -// events into the resolver. This means that the contention events can be -// immediately visible as early as possible to the readers of the eventStore -// before the txn id resolution is performed. -// 2. resolver goroutine: this goroutine runs on a timer (controlled via -// sql.contention.event_store.resolution_interval cluster setting). -// Periodically, the timer fires and resolver attempts to contact remote -// nodes to resolve the transaction IDs in the queued contention events -// into transaction fingerprint IDs. If the attempt is successful, the -// resolver goroutine will update the stored contention events with the -// transaction fingerprint IDs. +// 1. intake goroutine: this goroutine is responsible for inserting batched +// contention events into the in-memory store, and then queue the batched +// events into the resolver. This means that the contention events can be +// immediately visible as early as possible to the readers of the eventStore +// before the txn id resolution is performed. +// 2. resolver goroutine: this goroutine runs on a timer (controlled via +// sql.contention.event_store.resolution_interval cluster setting). +// Periodically, the timer fires and resolver attempts to contact remote +// nodes to resolve the transaction IDs in the queued contention events +// into transaction fingerprint IDs. If the attempt is successful, the +// resolver goroutine will update the stored contention events with the +// transaction fingerprint IDs. type eventStore struct { st *cluster.Settings @@ -296,13 +296,13 @@ func (s *eventStore) getEventByEventHash( // flushAndResolve is the main method called by the resolver goroutine each // time the timer fires. This method does two things: -// 1. it triggers the batching buffer to flush its content into the intake -// goroutine. This is to ensure that in the case where we have very low -// rate of contentions, the contention events won't be permanently trapped -// in the batching buffer. -// 2. it invokes the dequeue() method on the resolverQueue. This cause the -// resolver to perform txnID resolution. See inline comments on the method -// for details. +// 1. it triggers the batching buffer to flush its content into the intake +// goroutine. This is to ensure that in the case where we have very low +// rate of contentions, the contention events won't be permanently trapped +// in the batching buffer. +// 2. it invokes the dequeue() method on the resolverQueue. This cause the +// resolver to perform txnID resolution. See inline comments on the method +// for details. func (s *eventStore) flushAndResolve(ctx context.Context) error { // This forces the write-buffer flushes its batch into the intake goroutine. // The intake goroutine will asynchronously add all events in the batch diff --git a/pkg/sql/contention/registry_test.go b/pkg/sql/contention/registry_test.go index d17531dd5544..c89a1402fdc2 100644 --- a/pkg/sql/contention/registry_test.go +++ b/pkg/sql/contention/registry_test.go @@ -195,9 +195,9 @@ func TestRegistryConcurrentAdds(t *testing.T) { // TestSerializedRegistryInvariants verifies that the serialized registries // maintain all invariants, namely that -// - all three levels of objects are subject to the respective maximum size -// - all three levels of objects satisfy the respective ordering -// requirements. +// - all three levels of objects are subject to the respective maximum size +// - all three levels of objects satisfy the respective ordering +// requirements. func TestSerializedRegistryInvariants(t *testing.T) { rng, _ := randutil.NewTestRand() const nonSQLKeyProbability = 0.1 diff --git a/pkg/sql/contention/txnidcache/txn_id_cache.go b/pkg/sql/contention/txnidcache/txn_id_cache.go index 63c1a56727b4..eeeea37dcbbf 100644 --- a/pkg/sql/contention/txnidcache/txn_id_cache.go +++ b/pkg/sql/contention/txnidcache/txn_id_cache.go @@ -55,51 +55,52 @@ const channelSize = 128 // reaches the limit defined by the cluster setting. // // Cache's overall architecture is as follows: -// +------------------------------------------------------------+ -// | connExecutor --------* | -// | | writes resolvedTxnID to Writer | -// | v | -// | +---------------------------------------------------+ | -// | | Writer | | -// | | | | -// | | Writer contains multiple shards of concurrent | | -// | | write buffer. Each incoming resolvedTxnID is | | -// | | first hashed to a corresponding shard, and then | | -// | | is written to the concurrent write buffer | | -// | | backing that shard. Once the concurrent write | | -// | | buffer is full, a flush is performed and the | | -// | | content of the buffer is send into the channel. | | -// | | | | -// | | +------------+ | | -// | | | shard1 | | | -// | | +------------+ | | -// | | | shard2 | | | -// | | +------------+ | | -// | | | shard3 | | | -// | | +------------+ | | -// | | | ..... | | | -// | | | ..... | | | -// | | +------------+ | | -// | | | shard128 | | | -// | | +------------+ | | -// | | | | -// | +-----+---------------------------------------------+ | -// +------------|-----------------------------------------------+ -// | -// | -// V -// channel -// ^ -// | -// Cache polls the channel using a goroutine and push the -// | block into its storage. -// | -// +----------------------------------+ -// | Cache: | -// | The cache contains a | -// | FIFO buffer backed by | -// | fifoCache. | -// +----------------------------------+ +// +// +------------------------------------------------------------+ +// | connExecutor --------* | +// | | writes resolvedTxnID to Writer | +// | v | +// | +---------------------------------------------------+ | +// | | Writer | | +// | | | | +// | | Writer contains multiple shards of concurrent | | +// | | write buffer. Each incoming resolvedTxnID is | | +// | | first hashed to a corresponding shard, and then | | +// | | is written to the concurrent write buffer | | +// | | backing that shard. Once the concurrent write | | +// | | buffer is full, a flush is performed and the | | +// | | content of the buffer is send into the channel. | | +// | | | | +// | | +------------+ | | +// | | | shard1 | | | +// | | +------------+ | | +// | | | shard2 | | | +// | | +------------+ | | +// | | | shard3 | | | +// | | +------------+ | | +// | | | ..... | | | +// | | | ..... | | | +// | | +------------+ | | +// | | | shard128 | | | +// | | +------------+ | | +// | | | | +// | +-----+---------------------------------------------+ | +// +------------|-----------------------------------------------+ +// | +// | +// V +// channel +// ^ +// | +// Cache polls the channel using a goroutine and push the +// | block into its storage. +// | +// +----------------------------------+ +// | Cache: | +// | The cache contains a | +// | FIFO buffer backed by | +// | fifoCache. | +// +----------------------------------+ type Cache struct { st *cluster.Settings diff --git a/pkg/sql/covering/overlap_merge.go b/pkg/sql/covering/overlap_merge.go index 798d528b9db0..33b4d32436b7 100644 --- a/pkg/sql/covering/overlap_merge.go +++ b/pkg/sql/covering/overlap_merge.go @@ -73,9 +73,10 @@ type Covering []Range // returned as a `[]interface{}` and in the same order as they are in coverings. // // Example: -// covering 1: [1, 2) -> 'a', [3, 4) -> 'b', [6, 7) -> 'c' -// covering 2: [1, 5) -> 'd' -// output: [1, 2) -> 'ad', [2, 3) -> `d`, [3, 4) -> 'bd', [4, 5) -> 'd', [6, 7) -> 'c' +// +// covering 1: [1, 2) -> 'a', [3, 4) -> 'b', [6, 7) -> 'c' +// covering 2: [1, 5) -> 'd' +// output: [1, 2) -> 'ad', [2, 3) -> `d`, [3, 4) -> 'bd', [4, 5) -> 'd', [6, 7) -> 'c' // // The input is mutated (sorted). It is also assumed (and not checked) to be // valid (e.g. non-overlapping intervals in each covering). diff --git a/pkg/sql/crdb_internal.go b/pkg/sql/crdb_internal.go index 6119e9f4fefe..1543de5bcb58 100644 --- a/pkg/sql/crdb_internal.go +++ b/pkg/sql/crdb_internal.go @@ -88,10 +88,10 @@ import ( const CrdbInternalName = catconstants.CRDBInternalSchemaName // Naming convention: -// - if the response is served from memory, prefix with node_ -// - if the response is served via a kv request, prefix with kv_ -// - if the response is not from kv requests but is cluster-wide (i.e. the -// answer isn't specific to the sql connection being used, prefix with cluster_. +// - if the response is served from memory, prefix with node_ +// - if the response is served via a kv request, prefix with kv_ +// - if the response is not from kv requests but is cluster-wide (i.e. the +// answer isn't specific to the sql connection being used, prefix with cluster_. // // Adding something new here will require an update to `pkg/cli` for inclusion in // a `debug zip`; the unit tests will guide you. diff --git a/pkg/sql/crdb_internal_test.go b/pkg/sql/crdb_internal_test.go index 3208886a1118..5b819b253701 100644 --- a/pkg/sql/crdb_internal_test.go +++ b/pkg/sql/crdb_internal_test.go @@ -739,15 +739,17 @@ func TestDistSQLFlowsVirtualTables(t *testing.T) { // Traces on node1: // ------------- // root <-- traceID1 -// root.child <-- traceID1 -// root.child.detached_child <-- traceID1 +// +// root.child <-- traceID1 +// root.child.detached_child <-- traceID1 // // Traces on node2: // ------------- // root.child.remotechild <-- traceID1 // root.child.remotechilddone <-- traceID1 // root2 <-- traceID2 -// root2.child <-- traceID2 +// +// root2.child <-- traceID2 func setupTraces(t1, t2 *tracing.Tracer) (tracingpb.TraceID, func()) { // Start a root span on "node 1". root := t1.StartSpan("root", tracing.WithRecording(tracingpb.RecordingVerbose)) diff --git a/pkg/sql/create_index.go b/pkg/sql/create_index.go index 47fa6ef7abc4..fec085511cfe 100644 --- a/pkg/sql/create_index.go +++ b/pkg/sql/create_index.go @@ -48,8 +48,9 @@ type createIndexNode struct { // CreateIndex creates an index. // Privileges: CREATE on table. -// notes: postgres requires CREATE on the table. -// mysql requires INDEX on the table. +// +// notes: postgres requires CREATE on the table. +// mysql requires INDEX on the table. func (p *planner) CreateIndex(ctx context.Context, n *tree.CreateIndex) (planNode, error) { if err := checkSchemaChangeEnabled( ctx, diff --git a/pkg/sql/create_role.go b/pkg/sql/create_role.go index d7227b301b64..19f8e95e2357 100644 --- a/pkg/sql/create_role.go +++ b/pkg/sql/create_role.go @@ -44,8 +44,9 @@ type CreateRoleNode struct { // CreateRole represents a CREATE ROLE statement. // Privileges: INSERT on system.users. -// notes: postgres allows the creation of users with an empty password. We do -// as well, but disallow password authentication for these users. +// +// notes: postgres allows the creation of users with an empty password. We do +// as well, but disallow password authentication for these users. func (p *planner) CreateRole(ctx context.Context, n *tree.CreateRole) (planNode, error) { return p.CreateRoleNode(ctx, n.Name, n.IfNotExists, n.IsRole, "CREATE ROLE", n.KVOptions) diff --git a/pkg/sql/delegate/show_grants.go b/pkg/sql/delegate/show_grants.go index e22c4b16af89..4c7a72d78bdc 100644 --- a/pkg/sql/delegate/show_grants.go +++ b/pkg/sql/delegate/show_grants.go @@ -27,8 +27,9 @@ import ( // delegateShowGrants implements SHOW GRANTS which returns grant details for the // specified objects and users. // Privileges: None. -// Notes: postgres does not have a SHOW GRANTS statement. -// mysql only returns the user's privileges. +// +// Notes: postgres does not have a SHOW GRANTS statement. +// mysql only returns the user's privileges. func (d *delegator) delegateShowGrants(n *tree.ShowGrants) (tree.Statement, error) { var params []string diff --git a/pkg/sql/delegate/show_ranges.go b/pkg/sql/delegate/show_ranges.go index 9de81960b662..33ffed220bc6 100644 --- a/pkg/sql/delegate/show_ranges.go +++ b/pkg/sql/delegate/show_ranges.go @@ -44,9 +44,10 @@ func checkPrivilegesForShowRanges(d *delegator, table cat.Table) error { } // delegateShowRanges implements the SHOW RANGES statement: -// SHOW RANGES FROM TABLE t -// SHOW RANGES FROM INDEX t@idx -// SHOW RANGES FROM DATABASE db +// +// SHOW RANGES FROM TABLE t +// SHOW RANGES FROM INDEX t@idx +// SHOW RANGES FROM DATABASE db // // These statements show the ranges corresponding to the given table or index, // along with the list of replicas and the lease holder. diff --git a/pkg/sql/delegate/show_role_grants.go b/pkg/sql/delegate/show_role_grants.go index ebf6332e7072..b9eec553791d 100644 --- a/pkg/sql/delegate/show_role_grants.go +++ b/pkg/sql/delegate/show_role_grants.go @@ -23,7 +23,8 @@ import ( // ShowRoleGrants returns role membership details for the specified roles and grantees. // Privileges: SELECT on system.role_members. -// Notes: postgres does not have a SHOW GRANTS ON ROLES statement. +// +// Notes: postgres does not have a SHOW GRANTS ON ROLES statement. func (d *delegator) delegateShowRoleGrants(n *tree.ShowRoleGrants) (tree.Statement, error) { const selectQuery = ` SELECT role AS role_name, diff --git a/pkg/sql/delegate/show_sequences.go b/pkg/sql/delegate/show_sequences.go index 90d3aa963d19..924892a7649f 100644 --- a/pkg/sql/delegate/show_sequences.go +++ b/pkg/sql/delegate/show_sequences.go @@ -19,7 +19,8 @@ import ( // ShowSequences returns all the schemas in the given or current database. // Privileges: None. -// Notes: postgres does not have a SHOW SEQUENCES statement. +// +// Notes: postgres does not have a SHOW SEQUENCES statement. func (d *delegator) delegateShowSequences(n *tree.ShowSequences) (tree.Statement, error) { name, err := d.getSpecifiedOrCurrentDatabase(n.Database) if err != nil { diff --git a/pkg/sql/delegate/show_table.go b/pkg/sql/delegate/show_table.go index 37af5be9af2d..a305b8f3c6af 100644 --- a/pkg/sql/delegate/show_table.go +++ b/pkg/sql/delegate/show_table.go @@ -247,12 +247,13 @@ func (d *delegator) delegateShowCreateAllTables() (tree.Statement, error) { // showTableDetails returns the AST of a query which extracts information about // the given table using the given query patterns in SQL. The query pattern must // accept the following formatting parameters: -// %[1]s the database name as SQL string literal. -// %[2]s the unqualified table name as SQL string literal. -// %[3]s the given table name as SQL string literal. -// %[4]s the database name as SQL identifier. -// %[5]s the schema name as SQL string literal. -// %[6]s the table ID. +// +// %[1]s the database name as SQL string literal. +// %[2]s the unqualified table name as SQL string literal. +// %[3]s the given table name as SQL string literal. +// %[4]s the database name as SQL identifier. +// %[5]s the schema name as SQL string literal. +// %[6]s the table ID. func (d *delegator) showTableDetails( name *tree.UnresolvedObjectName, query string, ) (tree.Statement, error) { diff --git a/pkg/sql/delegate/show_tables.go b/pkg/sql/delegate/show_tables.go index 48e8cb46f4da..b9c29f0e7c6f 100644 --- a/pkg/sql/delegate/show_tables.go +++ b/pkg/sql/delegate/show_tables.go @@ -30,8 +30,9 @@ var showEstimatedRowCountClusterSetting = settings.RegisterBoolSetting( // delegateShowTables implements SHOW TABLES which returns all the tables. // Privileges: None. -// Notes: postgres does not have a SHOW TABLES statement. -// mysql only returns tables you have privileges on. +// +// Notes: postgres does not have a SHOW TABLES statement. +// mysql only returns tables you have privileges on. func (d *delegator) delegateShowTables(n *tree.ShowTables) (tree.Statement, error) { flags := cat.Flags{AvoidDescriptorCaches: true} _, name, err := d.catalog.ResolveSchema(d.ctx, flags, &n.ObjectNamePrefix) diff --git a/pkg/sql/distsql/server.go b/pkg/sql/distsql/server.go index f70fff3fd4f7..4e374b72ca0e 100644 --- a/pkg/sql/distsql/server.go +++ b/pkg/sql/distsql/server.go @@ -215,10 +215,13 @@ func FlowVerIsCompatible( // // Args: // reserved: Specifies the upfront memory reservation that the flow takes -// ownership of. This account is already closed if an error is returned or -// will be closed through Flow.Cleanup. +// +// ownership of. This account is already closed if an error is returned or +// will be closed through Flow.Cleanup. +// // localState: Specifies if the flow runs entirely on this node and, if it does, -// specifies the txn and other attributes. +// +// specifies the txn and other attributes. // // Note: unless an error is returned, the returned context contains a span that // must be finished through Flow.Cleanup. diff --git a/pkg/sql/distsql_physical_planner.go b/pkg/sql/distsql_physical_planner.go index 360878306d93..db9090831190 100644 --- a/pkg/sql/distsql_physical_planner.go +++ b/pkg/sql/distsql_physical_planner.go @@ -63,22 +63,22 @@ import ( // DistSQLPlanner is used to generate distributed plans from logical // plans. A rough overview of the process: // -// - the plan is based on a planNode tree (in the future it will be based on an -// intermediate representation tree). Only a subset of the possible trees is -// supported (this can be checked via CheckSupport). +// - the plan is based on a planNode tree (in the future it will be based on an +// intermediate representation tree). Only a subset of the possible trees is +// supported (this can be checked via CheckSupport). // -// - we generate a PhysicalPlan for the planNode tree recursively. The -// PhysicalPlan consists of a network of processors and streams, with a set -// of unconnected "result routers". The PhysicalPlan also has information on -// ordering and on the mapping planNode columns to columns in the result -// streams (all result routers output streams with the same schema). +// - we generate a PhysicalPlan for the planNode tree recursively. The +// PhysicalPlan consists of a network of processors and streams, with a set +// of unconnected "result routers". The PhysicalPlan also has information on +// ordering and on the mapping planNode columns to columns in the result +// streams (all result routers output streams with the same schema). // -// The PhysicalPlan for a scanNode leaf consists of TableReaders, one for each node -// that has one or more ranges. +// The PhysicalPlan for a scanNode leaf consists of TableReaders, one for each node +// that has one or more ranges. // -// - for each an internal planNode we start with the plan of the child node(s) -// and add processing stages (connected to the result routers of the children -// node). +// - for each an internal planNode we start with the plan of the child node(s) +// and add processing stages (connected to the result routers of the children +// node). type DistSQLPlanner struct { // planVersion is the version of DistSQL targeted by the plan we're building. // This is currently only assigned to the node's current DistSQL version and @@ -1104,13 +1104,13 @@ func (dsp *DistSQLPlanner) PartitionSpans( // partitionSpans takes a single span and splits it up according to the owning // nodes (if the span touches multiple ranges). // -// - partitions is the set of SpanPartitions so far. The updated set is -// returned. -// - nodeMap maps a SQLInstanceID to an index inside the partitions array. If -// the SQL instance chosen for the span is not in this map, then a new -// SpanPartition is appended to partitions and nodeMap is updated accordingly. -// - getSQLInstanceIDForKVNodeID is a resolver from the KV node ID to the SQL -// instance ID. +// - partitions is the set of SpanPartitions so far. The updated set is +// returned. +// - nodeMap maps a SQLInstanceID to an index inside the partitions array. If +// the SQL instance chosen for the span is not in this map, then a new +// SpanPartition is appended to partitions and nodeMap is updated accordingly. +// - getSQLInstanceIDForKVNodeID is a resolver from the KV node ID to the SQL +// instance ID. // // The updated array of SpanPartitions is returned as well as the index into // that array pointing to the SpanPartition that included the last part of the @@ -1502,9 +1502,10 @@ func (dsp *DistSQLPlanner) convertOrdering( // initTableReaderSpecTemplate initializes a TableReaderSpec/PostProcessSpec // that corresponds to a scanNode, except for the following fields: -// - Spans -// - Parallelize -// - BatchBytesLimit +// - Spans +// - Parallelize +// - BatchBytesLimit +// // The generated specs will be used as templates for planning potentially // multiple TableReaders. func initTableReaderSpecTemplate( @@ -1909,20 +1910,20 @@ func (dsp *DistSQLPlanner) addAggregators( // planAggregators plans the aggregator processors. An evaluator stage is added // if necessary. // Invariants assumed: -// - There is strictly no "pre-evaluation" necessary. If the given query is -// 'SELECT COUNT(k), v + w FROM kv GROUP BY v + w', the evaluation of the first -// 'v + w' is done at the source of the groupNode. -// - We only operate on the following expressions: -// - ONLY aggregation functions, with arguments pre-evaluated. So for -// COUNT(k + v), we assume a stream of evaluated 'k + v' values. -// - Expressions that CONTAIN an aggregation function, e.g. 'COUNT(k) + 1'. -// These are set as render expressions in the post-processing spec and -// are evaluated on the rows that the aggregator returns. -// - Expressions that also appear verbatim in the GROUP BY expressions. -// For 'SELECT k GROUP BY k', the aggregation function added is IDENT, -// therefore k just passes through unchanged. -// All other expressions simply pass through unchanged, for e.g. '1' in -// 'SELECT 1 GROUP BY k'. +// - There is strictly no "pre-evaluation" necessary. If the given query is +// 'SELECT COUNT(k), v + w FROM kv GROUP BY v + w', the evaluation of the first +// 'v + w' is done at the source of the groupNode. +// - We only operate on the following expressions: +// - ONLY aggregation functions, with arguments pre-evaluated. So for +// COUNT(k + v), we assume a stream of evaluated 'k + v' values. +// - Expressions that CONTAIN an aggregation function, e.g. 'COUNT(k) + 1'. +// These are set as render expressions in the post-processing spec and +// are evaluated on the rows that the aggregator returns. +// - Expressions that also appear verbatim in the GROUP BY expressions. +// For 'SELECT k GROUP BY k', the aggregation function added is IDENT, +// therefore k just passes through unchanged. +// All other expressions simply pass through unchanged, for e.g. '1' in +// 'SELECT 1 GROUP BY k'. func (dsp *DistSQLPlanner) planAggregators( planCtx *PlanningCtx, p *PhysicalPlan, info *aggregatorPlanningInfo, ) error { @@ -3671,31 +3672,32 @@ func (dsp *DistSQLPlanner) isOnlyOnGateway(plan *PhysicalPlan) bool { // unnecessary network I/O. // // Examples (single node): -// - Query: ( VALUES (1), (2), (2) ) UNION ( VALUES (2), (3) ) -// Plan: -// VALUES VALUES +// +// - Query: ( VALUES (1), (2), (2) ) UNION ( VALUES (2), (3) ) +// Plan: +// VALUES VALUES // | | -// ------------- -// | -// DISTINCT +// ------------- +// | +// DISTINCT // -// - Query: ( VALUES (1), (2), (2) ) INTERSECT ALL ( VALUES (2), (3) ) -// Plan: -// VALUES VALUES +// - Query: ( VALUES (1), (2), (2) ) INTERSECT ALL ( VALUES (2), (3) ) +// Plan: +// VALUES VALUES // | | -// ------------- -// | -// JOIN +// ------------- +// | +// JOIN // -// - Query: ( VALUES (1), (2), (2) ) EXCEPT ( VALUES (2), (3) ) -// Plan: -// VALUES VALUES +// - Query: ( VALUES (1), (2), (2) ) EXCEPT ( VALUES (2), (3) ) +// Plan: +// VALUES VALUES // | | -// DISTINCT DISTINCT +// DISTINCT DISTINCT // | | -// ------------- -// | -// JOIN +// ------------- +// | +// JOIN func (dsp *DistSQLPlanner) createPlanForSetOp( ctx context.Context, planCtx *PlanningCtx, n *unionNode, ) (*PhysicalPlan, error) { diff --git a/pkg/sql/distsql_running.go b/pkg/sql/distsql_running.go index bb2e5b6af96e..2760c256ab51 100644 --- a/pkg/sql/distsql_running.go +++ b/pkg/sql/distsql_running.go @@ -1353,10 +1353,10 @@ func (dsp *DistSQLPlanner) PlanAndRunAll( // function will have closed all the subquery plans because it assumes that the // caller will not try to run the main plan given that the subqueries' // evaluation failed. -// - subqueryResultMemAcc must be a non-nil memory account that the result of -// subqueries' evaluation will be registered with. It is the caller's -// responsibility to shrink (or close) the account accordingly, once the -// references to those results are lost. +// - subqueryResultMemAcc must be a non-nil memory account that the result of +// subqueries' evaluation will be registered with. It is the caller's +// responsibility to shrink (or close) the account accordingly, once the +// references to those results are lost. func (dsp *DistSQLPlanner) PlanAndRunSubqueries( ctx context.Context, planner *planner, diff --git a/pkg/sql/doc.go b/pkg/sql/doc.go index 74a3223a6036..ca2fd63f8d14 100644 --- a/pkg/sql/doc.go +++ b/pkg/sql/doc.go @@ -17,7 +17,7 @@ map. The sql package builds on top of this core system (provided by the storage and kv packages) adding parsing, query planning and query execution as well as defining the privilege model. -Databases and Tables +# Databases and Tables The two primary objects are databases and tables. A database is a namespace which holds a series of tables. Conceptually, a database can be viewed as a @@ -28,25 +28,25 @@ secondary indexes. Like a directory, a database has a name and some metadata. The metadata is defined by the DatabaseDescriptor: - message DatabaseDescriptor { - optional string name; - optional uint32 id; - optional PrivilegeDescriptor privileges; - } + message DatabaseDescriptor { + optional string name; + optional uint32 id; + optional PrivilegeDescriptor privileges; + } As you can see, currently the metadata we store for databases just consists of privileges. Similarly, tables have a TableDescriptor: - message TableDescriptor { - optional string name; - optional uint32 id; - repeated ColumnDescriptor columns; - optional IndexDescriptor primary_index; - repeated IndexDescriptor indexes; - optional PrivilegeDescriptor privileges; - } + message TableDescriptor { + optional string name; + optional uint32 id; + repeated ColumnDescriptor columns; + optional IndexDescriptor primary_index; + repeated IndexDescriptor indexes; + optional PrivilegeDescriptor privileges; + } Both the database ID and the table ID are allocated from the same "ID space" and IDs are never reused. @@ -56,56 +56,55 @@ root level contains databases and the database level contains tables. The "system.namespace" and "system.descriptor" tables implement the mapping from database/table name to ID and from ID to descriptor: - CREATE TABLE system.namespace ( - "parentID" INT, - "name" CHAR, - "id" INT, - PRIMARY KEY ("parentID", name) - ); + CREATE TABLE system.namespace ( + "parentID" INT, + "name" CHAR, + "id" INT, + PRIMARY KEY ("parentID", name) + ); - Create TABLE system.descriptor ( - "id" INT PRIMARY KEY, - "descriptor" BLOB - ); + Create TABLE system.descriptor ( + "id" INT PRIMARY KEY, + "descriptor" BLOB + ); The ID 0 is a reserved ID used for the "root" of the namespace in which the databases reside. In order to look up the ID of a database given its name, the system runs the underlying key-value operations that correspond to the following query: - SELECT id FROM system.namespace WHERE "parentID" = 0 AND name = + SELECT id FROM system.namespace WHERE "parentID" = 0 AND name = And given a database/table ID, the system looks up the descriptor using the following query: - SELECT descriptor FROM system.descriptor WHERE id = - + SELECT descriptor FROM system.descriptor WHERE id = Let's also create two new tables to use as running examples, one relatively simple, and one a little more complex. The first table is just a list of stores, with a "store_id" primary key that is an automatically incremented unique integer as the primary key (the "SERIAL" datatype) and a name. - CREATE DATABASE test; - SET DATABASE TO test; + CREATE DATABASE test; + SET DATABASE TO test; - Create TABLE stores ( - "store_id" SERIAL PRIMARY KEY, - "name" CHAR UNIQUE - ); + Create TABLE stores ( + "store_id" SERIAL PRIMARY KEY, + "name" CHAR UNIQUE + ); The second table - CREATE TABLE inventory ( - "item_id" INT UNIQUE, - "name" CHAR UNIQUE, - "at_store" INT, - "stock" INT, - PRIMARY KEY (item_id, at_store), - CONSTRAINT at_store_fk FOREIGN KEY (at_store) REFERENCES stores (store_id) - ); + CREATE TABLE inventory ( + "item_id" INT UNIQUE, + "name" CHAR UNIQUE, + "at_store" INT, + "stock" INT, + PRIMARY KEY (item_id, at_store), + CONSTRAINT at_store_fk FOREIGN KEY (at_store) REFERENCES stores (store_id) + ); -Primary Key Addressing +# Primary Key Addressing All of the SQL data stored in tables is mapped down to individual keys and values. We call the exact mapping converting any table or row to a key value @@ -119,7 +118,7 @@ Primary keys consist of one or more non-NULL columns from the table. For a given row of the table, the columns for the primary key are encoded into a single string. For example, our inventory table would be encoded as: - /item_id/at_store + /item_id/at_store [Note that "/" is being used to disambiguate the components of the key. The actual encodings do not use the "/" character. The actual encoding is specified @@ -132,17 +131,17 @@ Before being stored in the monolithic key-value space, the encoded primary key columns are prefixed with the table ID and an ID indicating that the key corresponds to the primary index. The prefix for the inventory table looks like this: - /TableID/PrimaryIndexID/item_id/at_store + /TableID/PrimaryIndexID/item_id/at_store Each column value is stored in a key with that prefix. Every column has a unique ID (local to the table). The value for every cell is stored at the key: - /TableID/PrimaryIndexID/item_id/at_store/ColumnID -> ColumnValue + /TableID/PrimaryIndexID/item_id/at_store/ColumnID -> ColumnValue Thus, the scan over the range - [/TableID/PrimaryIndexID/item_id/at_store, - /TableID/PrimaryIndexID/item_id/at_storf) + [/TableID/PrimaryIndexID/item_id/at_store, + /TableID/PrimaryIndexID/item_id/at_storf) Where the abuse of notation "namf" in the end key refers to the key resulting from incrementing the value of the start key. As an efficiency, we do not store @@ -153,12 +152,12 @@ to note the existence of a row with only a primary key and remaining NULLs, every row also has a sentinel key indicating its existence. The sentinel key is simply the primary index key, with an empty value: - /TableID/PrimaryIndexID/item_id/at_store -> + /TableID/PrimaryIndexID/item_id/at_store -> Thus the above scan on such a row would return a single key, which we can use to reconstruct the row filling in NULLs for the non-primary-key values. -Column Families +# Column Families The above structure is inefficient if we have many columns, since each row in an N-column table results in up to N+1 entries (1 sentinel key + N keys if every @@ -167,7 +166,7 @@ together and write them as a single key-value pair. We call this a "column family", and there are more details in this blog post: https://www.cockroachlabs.com/blog/sql-cockroachdb-column-families/ -Secondary Indexes +# Secondary Indexes Despite not being a formal part of the SQL standard, secondary indexes are one of its most powerful features. Secondary indexes are a level of indirection that @@ -175,12 +174,12 @@ allow quick lookups of a row using something other than the primary key. As an example, here is a secondary index on the "inventory" table, using only the "name" column: - CREATE INDEX name ON inventory (name); + CREATE INDEX name ON inventory (name); This secondary index allows fast lookups based on just the "name". We use the following key addressing scheme for this non-unique index: - /TableId/SecondaryIndexID/name/item_id/at_store -> + /TableId/SecondaryIndexID/name/item_id/at_store -> Notice that while the index is on "name", the key contains both "name" and the values for item_id and at_store. This is done to ensure that each row for a @@ -192,7 +191,7 @@ into a unique index. Let's suppose that we had instead defined the index as: - CREATE UNIQUE INDEX name ON inventory (name, item_id); + CREATE UNIQUE INDEX name ON inventory (name, item_id); Since this index is defined on creation as a unique index, we do not need to append the rest of the primary key columns to ensure uniqueness; instead, any @@ -202,12 +201,12 @@ creation itself will fail). However, we still need to be able to decode the full primary key by reading this index, as we will see later, in order to read any columns that are not in this index: - SELECT at_store FROM inventory WHERE name = "foo"; + SELECT at_store FROM inventory WHERE name = "foo"; The solution is to put any remaining primary key columns into the value. Thus, the key addressing for this unique index looks like this: - /TableID/SecondaryIndexID/name/item_id -> at_store + /TableID/SecondaryIndexID/name/item_id -> at_store The value for a unique index is composed of any primary key columns that are not already part of the index ("at_store" in this example). The goal of this key @@ -217,20 +216,20 @@ non-primary and non-index column requires two reads, first to decode the primary key, and then to read the full row for the primary key, which contains all the columns. For instance, to read the value of the "stock" column in this table: - SELECT stock FROM inventory WHERE name = "foo"; + SELECT stock FROM inventory WHERE name = "foo"; Looking this up by the index on "name" does not give us the value of the "stock" column. Instead, to process this query, Cockroach does two key-value reads, which are morally equivalent to the following two SQL queries: - SELECT (item_id, at_store) FROM inventory WHERE name = "foo"; + SELECT (item_id, at_store) FROM inventory WHERE name = "foo"; Then we use the values for the primary key that we received from the first query to perform the lookup: - SELECT stock FROM inventory WHERE item_id = "..." AND at_store = "..."; + SELECT stock FROM inventory WHERE item_id = "..." AND at_store = "..."; -Query Planning and Execution +# Query Planning and Execution SQL queries are executed by converting every SQL query into a set of transactional key-value operations. The Cockroach distributed transactional @@ -272,7 +271,7 @@ to resolve names within the query to actual objects within the system. Let's consider a query which looks up the stock of an item in the inventory table named "foo" with item_id X: - SELECT stock FROM inventory WHERE item_id = X AND name = 'test' + SELECT stock FROM inventory WHERE item_id = X AND name = 'test' The query planner first needs to resolve the "inventory" qualified name in the FROM clause to the appropriate TableDescriptor. It also needs to resolve the @@ -302,7 +301,7 @@ description is here: https://www.cockroachlabs.com/blog/index-selection-cockroachdb-2/, but back to the example above, the range information would determine that: - item_id >= 0 AND item_id <= 0 AND name >= 'test' and name <= 'test + item_id >= 0 AND item_id <= 0 AND name >= 'test' and name <= 'test Since there are two indexes on the "inventory" table, one index on "name" and another unique index on "item_id" and "name", the latter is selected as the @@ -311,11 +310,11 @@ candidate for performing a scan. To perform this scan, we need a start SecondaryIndexID of the chosen index, and the constraints on the range information above: - /inventory/SecondaryIndexID/item_id/name + /inventory/SecondaryIndexID/item_id/name The end key is: - /inventory/SecondaryIndexID/item_id/namf + /inventory/SecondaryIndexID/item_id/namf The "namf" suffix is not a typo: it is an abuse of notation to demonstrate how we calculate the end key: the end key is computed by incrementing the final byte @@ -323,11 +322,10 @@ of the start key such that "t" becomes "u". Our example scan will return two key-value pairs: - /system.descriptor/primary/0/test -> NULL - /system.descriptor/primary/0/test/id -> + /system.descriptor/primary/0/test -> NULL + /system.descriptor/primary/0/test/id -> The first key is the sentinel key, and the value from the second key returned by the scan is the result we need to return as the result of this SQL query. - */ package sql diff --git a/pkg/sql/drop_database.go b/pkg/sql/drop_database.go index 539fcceea8b3..49097835f92e 100644 --- a/pkg/sql/drop_database.go +++ b/pkg/sql/drop_database.go @@ -39,8 +39,9 @@ type dropDatabaseNode struct { // DropDatabase drops a database. // Privileges: DROP on database and DROP on all tables in the database. -// Notes: postgres allows only the database owner to DROP a database. -// mysql requires the DROP privileges on the database. +// +// Notes: postgres allows only the database owner to DROP a database. +// mysql requires the DROP privileges on the database. func (p *planner) DropDatabase(ctx context.Context, n *tree.DropDatabase) (planNode, error) { if err := checkSchemaChangeEnabled( ctx, diff --git a/pkg/sql/drop_index.go b/pkg/sql/drop_index.go index 4669d0f4568f..fc7a1e338441 100644 --- a/pkg/sql/drop_index.go +++ b/pkg/sql/drop_index.go @@ -39,8 +39,9 @@ type dropIndexNode struct { // DropIndex drops an index. // Privileges: CREATE on table. -// Notes: postgres allows only the index owner to DROP an index. -// mysql requires the INDEX privilege on the table. +// +// Notes: postgres allows only the index owner to DROP an index. +// mysql requires the INDEX privilege on the table. func (p *planner) DropIndex(ctx context.Context, n *tree.DropIndex) (planNode, error) { if err := checkSchemaChangeEnabled( ctx, diff --git a/pkg/sql/drop_table.go b/pkg/sql/drop_table.go index 84963e923f3f..8fc4ab83ad4e 100644 --- a/pkg/sql/drop_table.go +++ b/pkg/sql/drop_table.go @@ -47,8 +47,9 @@ type toDelete struct { // DropTable drops a table. // Privileges: DROP on table. -// Notes: postgres allows only the table owner to DROP a table. -// mysql requires the DROP privilege on the table. +// +// Notes: postgres allows only the table owner to DROP a table. +// mysql requires the DROP privilege on the table. func (p *planner) DropTable(ctx context.Context, n *tree.DropTable) (planNode, error) { if err := checkSchemaChangeEnabled( ctx, diff --git a/pkg/sql/drop_view.go b/pkg/sql/drop_view.go index 63bf0cddd4a5..221bed7b0c8b 100644 --- a/pkg/sql/drop_view.go +++ b/pkg/sql/drop_view.go @@ -34,8 +34,9 @@ type dropViewNode struct { // DropView drops a view. // Privileges: DROP on view. -// Notes: postgres allows only the view owner to DROP a view. -// mysql requires the DROP privilege on the view. +// +// Notes: postgres allows only the view owner to DROP a view. +// mysql requires the DROP privilege on the view. func (p *planner) DropView(ctx context.Context, n *tree.DropView) (planNode, error) { if err := checkSchemaChangeEnabled( ctx, diff --git a/pkg/sql/exec_factory_util.go b/pkg/sql/exec_factory_util.go index 6e77b0e91755..d9201c4b4db0 100644 --- a/pkg/sql/exec_factory_util.go +++ b/pkg/sql/exec_factory_util.go @@ -132,9 +132,9 @@ func tableToScanOrdinals( // getResultColumnsForSimpleProject populates result columns for a simple // projection. inputCols must be non-nil and contain the result columns before // the projection has been applied. It supports two configurations: -// 1. colNames and resultTypes are non-nil. resultTypes indicates the updated -// types (after the projection has been applied) -// 2. colNames is nil. +// 1. colNames and resultTypes are non-nil. resultTypes indicates the updated +// types (after the projection has been applied) +// 2. colNames is nil. func getResultColumnsForSimpleProject( cols []exec.NodeColumnOrdinal, colNames []string, diff --git a/pkg/sql/exec_util.go b/pkg/sql/exec_util.go index 4fde889606e1..c758b846a1e1 100644 --- a/pkg/sql/exec_util.go +++ b/pkg/sql/exec_util.go @@ -2297,8 +2297,10 @@ func (st *SessionTracing) getSessionTrace() ([]traceRow, error) { // // Args: // kvTracingEnabled: If set, the traces will also include "KV trace" messages - -// verbose messages around the interaction of SQL with KV. Some of the messages -// are per-row. +// +// verbose messages around the interaction of SQL with KV. Some of the messages +// are per-row. +// // showResults: If set, result rows are reported in the trace. func (st *SessionTracing) StartTracing( recType tracingpb.RecordingType, kvTracingEnabled, showResults bool, @@ -2518,11 +2520,11 @@ type traceRow [traceNumCols]tree.Datum // A regular expression to split log messages. // It has three parts: -// - the (optional) code location, with at least one forward slash and a period -// in the file name: -// ((?:[^][ :]+/[^][ :]+\.[^][ :]+:[0-9]+)?) -// - the (optional) tag: ((?:\[(?:[^][]|\[[^]]*\])*\])?) -// - the message itself: the rest. +// - the (optional) code location, with at least one forward slash and a period +// in the file name: +// ((?:[^][ :]+/[^][ :]+\.[^][ :]+:[0-9]+)?) +// - the (optional) tag: ((?:\[(?:[^][]|\[[^]]*\])*\])?) +// - the message itself: the rest. var logMessageRE = regexp.MustCompile( `(?s:^((?:[^][ :]+/[^][ :]+\.[^][ :]+:[0-9]+)?) *((?:\[(?:[^][]|\[[^]]*\])*\])?) *(.*))`) diff --git a/pkg/sql/execinfra/processorsbase.go b/pkg/sql/execinfra/processorsbase.go index 8ab45ff28b35..1002121f96da 100644 --- a/pkg/sql/execinfra/processorsbase.go +++ b/pkg/sql/execinfra/processorsbase.go @@ -836,9 +836,11 @@ func ProcessorSpan(ctx context.Context, name string) (context.Context, *tracing. // // It is likely that this method is called from RowSource.Start implementation, // and the recommended layout is the following: -// ctx = pb.StartInternal(ctx, name) -// < inputs >.Start(ctx) // if there are any inputs-RowSources to pb -// < other initialization > +// +// ctx = pb.StartInternal(ctx, name) +// < inputs >.Start(ctx) // if there are any inputs-RowSources to pb +// < other initialization > +// // so that the caller doesn't mistakenly use old ctx object. func (pb *ProcessorBaseNoHelper) StartInternal(ctx context.Context, name string) context.Context { return pb.startImpl(ctx, true /* createSpan */, name) @@ -877,9 +879,9 @@ func (pb *ProcessorBaseNoHelper) startImpl( // Notably, it calls ConsumerClosed() on all the inputsToDrain and updates // pb.Ctx to the context passed into StartInternal() call. // -// if pb.InternalClose() { -// // Perform processor specific close work. -// } +// if pb.InternalClose() { +// // Perform processor specific close work. +// } func (pb *ProcessorBase) InternalClose() bool { return pb.InternalCloseEx(nil /* onClose */) } diff --git a/pkg/sql/execinfra/version.go b/pkg/sql/execinfra/version.go index e6d9b6e0fe46..35895e795a7f 100644 --- a/pkg/sql/execinfra/version.go +++ b/pkg/sql/execinfra/version.go @@ -24,18 +24,18 @@ import "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" // // This mechanism can be used to provide a "window" of compatibility when new // features are added. Example: -// - we start with Version=1; distsql servers with version 1 only accept -// requests with version 1. -// - a new distsql feature is added; Version is bumped to 2. The -// planner does not yet use this feature by default; it still issues -// requests with version 1. -// - MinAcceptedVersion is still 1, i.e. servers with version 2 -// accept both versions 1 and 2. -// - after an upgrade cycle, we can enable the feature in the planner, -// requiring version 2. -// - at some later point, we can choose to deprecate version 1 and have -// servers only accept versions >= 2 (by setting -// MinAcceptedVersion to 2). +// - we start with Version=1; distsql servers with version 1 only accept +// requests with version 1. +// - a new distsql feature is added; Version is bumped to 2. The +// planner does not yet use this feature by default; it still issues +// requests with version 1. +// - MinAcceptedVersion is still 1, i.e. servers with version 2 +// accept both versions 1 and 2. +// - after an upgrade cycle, we can enable the feature in the planner, +// requiring version 2. +// - at some later point, we can choose to deprecate version 1 and have +// servers only accept versions >= 2 (by setting +// MinAcceptedVersion to 2). // // Why does this all matter? Because of rolling upgrades, distsql servers across // nodes may not have an overlapping window of compatibility, so only a subset diff --git a/pkg/sql/execinfrapb/expr.go b/pkg/sql/execinfrapb/expr.go index 07fc95f43a0f..737c532cf469 100644 --- a/pkg/sql/execinfrapb/expr.go +++ b/pkg/sql/execinfrapb/expr.go @@ -221,10 +221,11 @@ func RunFilter(filter tree.TypedExpr, evalCtx *eval.Context) (bool, error) { // Eval - given a row - evaluates the wrapped expression and returns the // resulting datum. For example, given a row (1, 2, 3, 4, 5): -// '@2' would return '2' -// '@2 + @5' would return '7' -// '@1' would return '1' -// '@2 + 10' would return '12' +// +// '@2' would return '2' +// '@2 + @5' would return '7' +// '@1' would return '1' +// '@2 + 10' would return '12' func (eh *ExprHelper) Eval(row rowenc.EncDatumRow) (tree.Datum, error) { eh.Row = row diff --git a/pkg/sql/execstats/traceanalyzer.go b/pkg/sql/execstats/traceanalyzer.go index 726b223cf1c1..ca0253d2bf95 100644 --- a/pkg/sql/execstats/traceanalyzer.go +++ b/pkg/sql/execstats/traceanalyzer.go @@ -96,7 +96,8 @@ func NewFlowsMetadata(flows map[base.SQLInstanceID]*execinfrapb.FlowSpec) *Flows // NodeLevelStats returns all the flow level stats that correspond to the given // traces and flow metadata. // TODO(asubiotto): Flatten this struct, we're currently allocating a map per -// stat. +// +// stat. type NodeLevelStats struct { NetworkBytesSentGroupedByNode map[base.SQLInstanceID]int64 MaxMemoryUsageGroupedByNode map[base.SQLInstanceID]int64 diff --git a/pkg/sql/executor_statement_metrics.go b/pkg/sql/executor_statement_metrics.go index 96ada9e82458..5b0d6ad6a39d 100644 --- a/pkg/sql/executor_statement_metrics.go +++ b/pkg/sql/executor_statement_metrics.go @@ -106,11 +106,11 @@ func (GuardrailMetrics) MetricStruct() {} // recordStatementSummary gathers various details pertaining to the // last executed statement/query and performs the associated // accounting in the passed-in EngineMetrics. -// - distSQLUsed reports whether the query was distributed. -// - automaticRetryCount is the count of implicit txn retries -// so far. -// - result is the result set computed by the query/statement. -// - err is the error encountered, if any. +// - distSQLUsed reports whether the query was distributed. +// - automaticRetryCount is the count of implicit txn retries +// so far. +// - result is the result set computed by the query/statement. +// - err is the error encountered, if any. func (ex *connExecutor) recordStatementSummary( ctx context.Context, planner *planner, diff --git a/pkg/sql/explain_bundle.go b/pkg/sql/explain_bundle.go index b731a6617848..3b8ba0a37c09 100644 --- a/pkg/sql/explain_bundle.go +++ b/pkg/sql/explain_bundle.go @@ -491,7 +491,8 @@ func TestingOverrideExplainEnvVersion(ver string) func() { } // PrintVersion appends a row of the form: -// -- Version: CockroachDB CCL v20.1.0 ... +// +// -- Version: CockroachDB CCL v20.1.0 ... func (c *stmtEnvCollector) PrintVersion(w io.Writer) error { version, err := c.query("SELECT version()") if err != nil { diff --git a/pkg/sql/flowinfra/flow_scheduler.go b/pkg/sql/flowinfra/flow_scheduler.go index 57ec595c91bf..3e766be46856 100644 --- a/pkg/sql/flowinfra/flow_scheduler.go +++ b/pkg/sql/flowinfra/flow_scheduler.go @@ -156,7 +156,8 @@ var flowSchedulerQueueingEnabled = settings.RegisterBoolSetting( // canRunFlow returns whether the FlowScheduler can run the flow. If true is // returned, numRunning is also incremented. // TODO(radu): we will have more complex resource accounting (like memory). -// For now we just limit the number of concurrent flows. +// +// For now we just limit the number of concurrent flows. func (fs *FlowScheduler) canRunFlow() bool { // Optimistically increase numRunning to account for this new flow. newNumRunning := atomic.AddInt32(&fs.atomics.numRunning, 1) diff --git a/pkg/sql/flowinfra/stream_decoder.go b/pkg/sql/flowinfra/stream_decoder.go index 110b50d47740..cd2dedc7b5fb 100644 --- a/pkg/sql/flowinfra/stream_decoder.go +++ b/pkg/sql/flowinfra/stream_decoder.go @@ -24,22 +24,23 @@ import ( // records. // // Sample usage: -// sd := StreamDecoder{} -// var row sqlbase.EncDatumRow -// for each message in stream { -// err := sd.AddMessage(msg) -// if err != nil { ... } -// for { -// row, meta, err := sd.GetRow(row) -// if err != nil { ... } -// if row == nil && meta.Empty() { -// // No more rows in this message. -// break -// } -// // Use -// ... -// } -// } +// +// sd := StreamDecoder{} +// var row sqlbase.EncDatumRow +// for each message in stream { +// err := sd.AddMessage(msg) +// if err != nil { ... } +// for { +// row, meta, err := sd.GetRow(row) +// if err != nil { ... } +// if row == nil && meta.Empty() { +// // No more rows in this message. +// break +// } +// // Use +// ... +// } +// } // // AddMessage can be called multiple times before getting the rows, but this // will cause data to accumulate internally. diff --git a/pkg/sql/flowinfra/stream_encoder.go b/pkg/sql/flowinfra/stream_encoder.go index 8bf80856ae1c..6c160464a0a8 100644 --- a/pkg/sql/flowinfra/stream_encoder.go +++ b/pkg/sql/flowinfra/stream_encoder.go @@ -29,17 +29,18 @@ const PreferredEncoding = descpb.DatumEncoding_ASCENDING_KEY // StreamEncoder converts EncDatum rows into a sequence of ProducerMessage. // // Sample usage: -// se := StreamEncoder{} // -// for { -// for ... { -// err := se.AddRow(...) -// ... -// } -// msg := se.FormMessage(nil) -// // Send out message. -// ... -// } +// se := StreamEncoder{} +// +// for { +// for ... { +// err := se.AddRow(...) +// ... +// } +// msg := se.FormMessage(nil) +// // Send out message. +// ... +// } type StreamEncoder struct { types []*types.T // encodings is fully initialized when the first row is received. diff --git a/pkg/sql/gcjob/gc_job.go b/pkg/sql/gcjob/gc_job.go index 8681a19aad44..c02c6d9dcc32 100644 --- a/pkg/sql/gcjob/gc_job.go +++ b/pkg/sql/gcjob/gc_job.go @@ -41,7 +41,8 @@ var ( // SetSmallMaxGCIntervalForTest sets the MaxSQLGCInterval and then returns a closure // that resets it. // This is to be used in tests like: -// defer SetSmallMaxGCIntervalForTest() +// +// defer SetSmallMaxGCIntervalForTest() func SetSmallMaxGCIntervalForTest() func() { oldInterval := MaxSQLGCInterval MaxSQLGCInterval = 500 * time.Millisecond diff --git a/pkg/sql/grant_revoke.go b/pkg/sql/grant_revoke.go index e2510251c6ad..608f77dabe7c 100644 --- a/pkg/sql/grant_revoke.go +++ b/pkg/sql/grant_revoke.go @@ -40,8 +40,9 @@ import ( // TODO(marc): open questions: // - should we have root always allowed and not present in the permissions list? // Privileges: GRANT on database/table/view. -// Notes: postgres requires the object owner. -// mysql requires the "grant option" and the same privileges, and sometimes superuser. +// +// Notes: postgres requires the object owner. +// mysql requires the "grant option" and the same privileges, and sometimes superuser. func (p *planner) Grant(ctx context.Context, n *tree.Grant) (planNode, error) { grantOn, err := p.getGrantOnObject(ctx, n.Targets, sqltelemetry.IncIAMGrantPrivilegesCounter) if err != nil { @@ -97,8 +98,9 @@ func (p *planner) Grant(ctx context.Context, n *tree.Grant) (planNode, error) { // TODO(marc): open questions: // - should we have root always allowed and not present in the permissions list? // Privileges: GRANT on database/table/view. -// Notes: postgres requires the object owner. -// mysql requires the "grant option" and the same privileges, and sometimes superuser. +// +// Notes: postgres requires the object owner. +// mysql requires the "grant option" and the same privileges, and sometimes superuser. func (p *planner) Revoke(ctx context.Context, n *tree.Revoke) (planNode, error) { grantOn, err := p.getGrantOnObject(ctx, n.Targets, sqltelemetry.IncIAMRevokePrivilegesCounter) if err != nil { diff --git a/pkg/sql/idxrecommendations/idx_recommendations.go b/pkg/sql/idxrecommendations/idx_recommendations.go index ca86ab3c95c1..ddc0d8d0a3b8 100644 --- a/pkg/sql/idxrecommendations/idx_recommendations.go +++ b/pkg/sql/idxrecommendations/idx_recommendations.go @@ -37,11 +37,10 @@ type IdxRecommendations interface { // FormatIdxRecommendations formats a list of index recommendations. The output // is in the format: // -// { -// "replacement : CREATE UNIQUE INDEX ON t1 (i) STORING (k); DROP INDEX t1@existing_t1_i;", -// "creation : CREATE INDEX ON t2 (i) STORING (k);", -// } -// +// { +// "replacement : CREATE UNIQUE INDEX ON t1 (i) STORING (k); DROP INDEX t1@existing_t1_i;", +// "creation : CREATE INDEX ON t2 (i) STORING (k);", +// } func FormatIdxRecommendations(recs []indexrec.Rec) []string { if len(recs) == 0 { return nil diff --git a/pkg/sql/importer/exportcsv_test.go b/pkg/sql/importer/exportcsv_test.go index 9d167ac4eabe..a999876e269c 100644 --- a/pkg/sql/importer/exportcsv_test.go +++ b/pkg/sql/importer/exportcsv_test.go @@ -17,7 +17,6 @@ import ( gosql "database/sql" "fmt" "io" - "io/ioutil" "net/http" "net/http/httptest" "net/url" @@ -303,11 +302,11 @@ func TestExportUniqueness(t *testing.T) { const stmt = `EXPORT INTO CSV 'nodelocal://0/' WITH chunk_rows=$1 FROM SELECT * FROM foo` sqlDB.Exec(t, stmt, 2) - dir1, err := ioutil.ReadDir(dir) + dir1, err := os.ReadDir(dir) require.NoError(t, err) sqlDB.Exec(t, stmt, 2) - dir2, err := ioutil.ReadDir(dir) + dir2, err := os.ReadDir(dir) require.NoError(t, err) require.Equal(t, 2*len(dir1), len(dir2), "second export did not double the number of files") @@ -527,12 +526,12 @@ func TestExportTargetFileSizeSetting(t *testing.T) { sqlDB := sqlutils.MakeSQLRunner(conn) sqlDB.Exec(t, `EXPORT INTO CSV 'nodelocal://0/foo' WITH chunk_size='10KB' FROM select i, gen_random_uuid() from generate_series(1, 4000) as i;`) - files, err := ioutil.ReadDir(filepath.Join(dir, "foo")) + files, err := os.ReadDir(filepath.Join(dir, "foo")) require.NoError(t, err) require.Equal(t, 14, len(files)) sqlDB.Exec(t, `EXPORT INTO CSV 'nodelocal://0/foo-compressed' WITH chunk_size='10KB',compression='gzip' FROM select i, gen_random_uuid() from generate_series(1, 4000) as i;`) - zipFiles, err := ioutil.ReadDir(filepath.Join(dir, "foo-compressed")) + zipFiles, err := os.ReadDir(filepath.Join(dir, "foo-compressed")) require.NoError(t, err) require.GreaterOrEqual(t, len(zipFiles), 6) } diff --git a/pkg/sql/importer/exportparquet.go b/pkg/sql/importer/exportparquet.go index 6f91ff16f5f1..fdcb3ea86e71 100644 --- a/pkg/sql/importer/exportparquet.go +++ b/pkg/sql/importer/exportparquet.go @@ -659,9 +659,12 @@ func NewParquetColumn(typ *types.T, name string, nullable bool) (ParquetColumn, // newParquetSchema creates the schema for the parquet file, // see example schema: -// https://github.com/fraugster/parquet-go/issues/18#issuecomment-946013210 +// +// https://github.com/fraugster/parquet-go/issues/18#issuecomment-946013210 +// // see docs here: -// https://pkg.go.dev/github.com/fraugster/parquet-go/parquetschema#SchemaDefinition +// +// https://pkg.go.dev/github.com/fraugster/parquet-go/parquetschema#SchemaDefinition func newParquetSchema(parquetFields []ParquetColumn) *parquetschema.SchemaDefinition { schemaDefinition := new(parquetschema.SchemaDefinition) schemaDefinition.RootColumn = new(parquetschema.ColumnDefinition) diff --git a/pkg/sql/importer/read_import_avro.go b/pkg/sql/importer/read_import_avro.go index 086b01e88c70..848a82a0ba66 100644 --- a/pkg/sql/importer/read_import_avro.go +++ b/pkg/sql/importer/read_import_avro.go @@ -54,15 +54,16 @@ func nativeTimeToDatum(t time.Time, targetT *types.T) (tree.Datum, error) { // // While Avro's specification is fairly broad, and supports arbitrary complex // data types, this method concerns itself with -// - primitive avro types: null, boolean, int (32), long (64), float (32), double (64), -// bytes, string, and arrays of the above. -// - logical avro types (as defined by the go avro library): long.time-micros, int.time-millis, -// long.timestamp-micros,long.timestamp-millis, and int.date +// - primitive avro types: null, boolean, int (32), long (64), float (32), double (64), +// bytes, string, and arrays of the above. +// - logical avro types (as defined by the go avro library): long.time-micros, int.time-millis, +// long.timestamp-micros,long.timestamp-millis, and int.date // // An avro record is, essentially, a key->value mapping from field name to field value. // A field->value mapping may be represented directly (i.e. the // interface{} pass in will have corresponding go primitive type): -// user_id:123 -- that is the interface{} type will be int, and it's value is 123. +// +// user_id:123 -- that is the interface{} type will be int, and it's value is 123. // // Or, we could see field_name:null, if the field is nullable and is null. // diff --git a/pkg/sql/importer/read_import_base.go b/pkg/sql/importer/read_import_base.go index 1789bc399286..0dbc325dc9fb 100644 --- a/pkg/sql/importer/read_import_base.go +++ b/pkg/sql/importer/read_import_base.go @@ -17,7 +17,6 @@ import ( "context" "fmt" "io" - "io/ioutil" "math" "net/url" "strings" @@ -326,9 +325,9 @@ func decompressingReader( case roachpb.IOFileFormat_Gzip: return gzip.NewReader(in) case roachpb.IOFileFormat_Bzip: - return ioutil.NopCloser(bzip2.NewReader(in)), nil + return io.NopCloser(bzip2.NewReader(in)), nil default: - return ioutil.NopCloser(in), nil + return io.NopCloser(in), nil } } diff --git a/pkg/sql/instrumentation.go b/pkg/sql/instrumentation.go index 1f3035ec2155..787dc7859ce2 100644 --- a/pkg/sql/instrumentation.go +++ b/pkg/sql/instrumentation.go @@ -61,17 +61,16 @@ var collectTxnStatsSampleRate = settings.RegisterFloatSetting( // instrumentationHelper encapsulates the logic around extracting information // about the execution of a statement, like bundles and traces. Typical usage: // -// - SetOutputMode() can be used as necessary if we are running an EXPLAIN -// ANALYZE variant. +// - SetOutputMode() can be used as necessary if we are running an EXPLAIN +// ANALYZE variant. // -// - Setup() is called before query execution. +// - Setup() is called before query execution. // -// - SetDiscardRows(), ShouldDiscardRows(), ShouldSaveFlows(), -// ShouldBuildExplainPlan(), RecordExplainPlan(), RecordPlanInfo(), -// PlanForStats() can be called at any point during execution. -// -// - Finish() is called after query execution. +// - SetDiscardRows(), ShouldDiscardRows(), ShouldSaveFlows(), +// ShouldBuildExplainPlan(), RecordExplainPlan(), RecordPlanInfo(), +// PlanForStats() can be called at any point during execution. // +// - Finish() is called after query execution. type instrumentationHelper struct { outputMode outputMode // explainFlags is used when outputMode is explainAnalyzePlanOutput or diff --git a/pkg/sql/inverted/expression.go b/pkg/sql/inverted/expression.go index 095dec0e007c..6d4ab4ac3e5d 100644 --- a/pkg/sql/inverted/expression.go +++ b/pkg/sql/inverted/expression.go @@ -32,16 +32,16 @@ import ( // // It would be ideal if the inverted column only contained Datums, since we // could then work with a Datum here. However, JSON breaks that approach: -// - JSON inverted columns use a custom encoding that uses a special byte -// jsonInvertedIndex, followed by the bytes produced by the various -// implementations of the encodeInvertedIndexKey() method in the JSON -// interface. This could be worked around by using a JSON datum that -// represents a single path as the start key of the span, and representing -// [start, start] spans. We would special case the encoding logic to -// recognize that it is dealing with JSON (we have similar special path code -// for JSON elsewhere). But this is insufficient (next bullet). -// - Expressions like x ? 'b' don't have operands that are JSON, but can be -// represented using a span on the inverted column. +// - JSON inverted columns use a custom encoding that uses a special byte +// jsonInvertedIndex, followed by the bytes produced by the various +// implementations of the encodeInvertedIndexKey() method in the JSON +// interface. This could be worked around by using a JSON datum that +// represents a single path as the start key of the span, and representing +// [start, start] spans. We would special case the encoding logic to +// recognize that it is dealing with JSON (we have similar special path code +// for JSON elsewhere). But this is insufficient (next bullet). +// - Expressions like x ? 'b' don't have operands that are JSON, but can be +// represented using a span on the inverted column. // // So we make it the job of the caller of this library to encode the inverted // column. Note that the second bullet above has some similarities with the @@ -195,26 +195,29 @@ func (is Spans) End(i int) []byte { // to be evaluated on the inverted index. Any implementation can be used in the // builder functions And() and Or(), but in practice there are two useful // implementations provided here: -// - SpanExpression: this is the normal expression representing unions and -// intersections over spans of the inverted index. A SpanExpression is the -// root of an expression tree containing other SpanExpressions (there is one -// exception when a SpanExpression tree can contain non-SpanExpressions, -// discussed below for Joins). -// - NonInvertedColExpression: this is a marker expression representing the universal -// span, due to it being an expression on the non inverted column. This only appears in -// expression trees with a single node, since Anding with such an expression simply -// changes the tightness to false and Oring with this expression replaces the -// other expression with a NonInvertedColExpression. +// - SpanExpression: this is the normal expression representing unions and +// intersections over spans of the inverted index. A SpanExpression is the +// root of an expression tree containing other SpanExpressions (there is one +// exception when a SpanExpression tree can contain non-SpanExpressions, +// discussed below for Joins). +// - NonInvertedColExpression: this is a marker expression representing the universal +// span, due to it being an expression on the non inverted column. This only appears in +// expression trees with a single node, since Anding with such an expression simply +// changes the tightness to false and Oring with this expression replaces the +// other expression with a NonInvertedColExpression. // -// Optimizer cost estimation +// # Optimizer cost estimation // // There are two cases: -// - Single table expression: after generating the Expression, the -// optimizer will check that it is a *SpanExpression -- if not, it is a -// NonInvertedColExpression, which implies a full inverted index scan, and -// it is definitely not worth using the inverted index. There are two costs for -// using the inverted index: +// +// - Single table expression: after generating the Expression, the +// optimizer will check that it is a *SpanExpression -- if not, it is a +// NonInvertedColExpression, which implies a full inverted index scan, and +// it is definitely not worth using the inverted index. There are two costs for +// using the inverted index: +// // - The scan cost: this should be estimated by using SpanExpression.SpansToRead. +// // - The cardinality of the output set after evaluating the expression: this // requires a traversal of the expression to assign cardinality to the // spans in each FactoredUnionSpans (this could be done using a mean, @@ -227,55 +230,55 @@ func (is Spans) End(i int) []byte { // used to derive the expected cardinality of the union of the two sets // and the intersection of the two sets. // -// - Join expression: Assigning a cost is hard since there are two -// parameters, corresponding to the left and right columns. In some cases, -// like Geospatial, the expression that could be generated is a black-box to -// the optimizer since the quad-tree traversal is unknown until partial -// application (when one of the parameters is known). Minimally, we do need to -// know whether the user expression is going to cause a full inverted index -// scan due to parts of the expression referring to non-inverted columns. -// The optimizer will provide its own placeholder implementation of -// Expression into which it can embed whatever information it wants. -// Let's call this the UnknownExpression -- it will only exist at the -// leaves of the expression tree. It will use this UnknownExpression -// whenever there is an expression involving both the inverted columns. If -// the final expression is a NonInvertedColExpression, it is definitely not -// worth using the inverted index. If the final expression is an -// UnknownExpression (the tree must be a single node) or a *SpanExpression, -// the optimizer could either conjure up some magic cost number or try to -// compose one using costs assigned to each span (as described in the -// previous bullet) and to each leaf-level UnknownExpression. +// - Join expression: Assigning a cost is hard since there are two +// parameters, corresponding to the left and right columns. In some cases, +// like Geospatial, the expression that could be generated is a black-box to +// the optimizer since the quad-tree traversal is unknown until partial +// application (when one of the parameters is known). Minimally, we do need to +// know whether the user expression is going to cause a full inverted index +// scan due to parts of the expression referring to non-inverted columns. +// The optimizer will provide its own placeholder implementation of +// Expression into which it can embed whatever information it wants. +// Let's call this the UnknownExpression -- it will only exist at the +// leaves of the expression tree. It will use this UnknownExpression +// whenever there is an expression involving both the inverted columns. If +// the final expression is a NonInvertedColExpression, it is definitely not +// worth using the inverted index. If the final expression is an +// UnknownExpression (the tree must be a single node) or a *SpanExpression, +// the optimizer could either conjure up some magic cost number or try to +// compose one using costs assigned to each span (as described in the +// previous bullet) and to each leaf-level UnknownExpression. // -// Query evaluation +// # Query evaluation // // There are two cases: -// - Single table expression: The optimizer will convert the *SpanExpression -// into a form that is passed to the evaluation machinery, which can recreate -// the *SpanExpression and evaluate it. The optimizer will have constructed -// the spans for the evaluation using SpanExpression.SpansToRead, so the -// expression evaluating code does not need to concern itself with the spans -// to be read. -// e.g. the query was of the form ... WHERE x <@ '{"a":1, "b":2}'::json -// The optimizer constructs a *SpanExpression, and +// - Single table expression: The optimizer will convert the *SpanExpression +// into a form that is passed to the evaluation machinery, which can recreate +// the *SpanExpression and evaluate it. The optimizer will have constructed +// the spans for the evaluation using SpanExpression.SpansToRead, so the +// expression evaluating code does not need to concern itself with the spans +// to be read. +// e.g. the query was of the form ... WHERE x <@ '{"a":1, "b":2}'::json +// The optimizer constructs a *SpanExpression, and // - uses the serialization of the *SpanExpression as the spec for a processor // that will evaluate the expression. // - uses the SpanExpression.SpansToRead to specify the inverted index // spans that must be read and fed to the processor. -// - Join expression: The optimizer had an expression tree with the root as -// a *SpanExpression or an UnknownExpression. Therefore it knows that after -// partial application the expression will be a *SpanExpression. It passes the -// inverted expression with two unknowns, as a string, to the join execution -// machinery. The optimizer provides a way to do partial application for each -// input row, and returns a *SpanExpression, which is evaluated on the -// inverted index. -// e.g. the join query was of the form -// ... ON t1.x <@ t2.y OR (t1.x @> t2.y AND t2.y @> '{"a":1, "b":2}'::json) -// and the optimizer decides to use the inverted index on t2.y. The optimizer -// passes an expression string with two unknowns in the InvertedJoinerSpec, -// where @1 represents t1.x and @2 represents t2.y. For each input row of -// t1 the inverted join processor asks the optimizer to apply the value of @1 -// and return a *SpanExpression, which the join processor will evaluate on -// the inverted index. +// - Join expression: The optimizer had an expression tree with the root as +// a *SpanExpression or an UnknownExpression. Therefore it knows that after +// partial application the expression will be a *SpanExpression. It passes the +// inverted expression with two unknowns, as a string, to the join execution +// machinery. The optimizer provides a way to do partial application for each +// input row, and returns a *SpanExpression, which is evaluated on the +// inverted index. +// e.g. the join query was of the form +// ... ON t1.x <@ t2.y OR (t1.x @> t2.y AND t2.y @> '{"a":1, "b":2}'::json) +// and the optimizer decides to use the inverted index on t2.y. The optimizer +// passes an expression string with two unknowns in the InvertedJoinerSpec, +// where @1 represents t1.x and @2 represents t2.y. For each input row of +// t1 the inverted join processor asks the optimizer to apply the value of @1 +// and return a *SpanExpression, which the join processor will evaluate on +// the inverted index. type Expression interface { // IsTight returns whether the inverted expression is tight, i.e., will the // original expression not need to be reevaluated on each row output by the diff --git a/pkg/sql/opt/bench/bench_test.go b/pkg/sql/opt/bench/bench_test.go index 767c59ae2a5a..94920d3ee757 100644 --- a/pkg/sql/opt/bench/bench_test.go +++ b/pkg/sql/opt/bench/bench_test.go @@ -55,10 +55,10 @@ import ( // normalization rules turned on) and the resulting memo is saved and reused. On // each execution, placeholders are assigned before exploration. Only these // phases are valid with the prepare protocol: -// - AssignPlaceholdersNoNorm -// - AssignPlaceholdersNorm -// - Explore -// - ExecBuild +// - AssignPlaceholdersNoNorm +// - AssignPlaceholdersNorm +// - Explore +// - ExecBuild type Phase int const ( @@ -863,12 +863,11 @@ func queriesToTest(b *testing.B) []benchQuery { // // For example, a 5-chain looks like: // -// SELECT * FROM a, b, c, d, e -// WHERE a.x = b.y -// AND b.x = c.y -// AND c.x = d.y -// AND d.x = e.y -// +// SELECT * FROM a, b, c, d, e +// WHERE a.x = b.y +// AND b.x = c.y +// AND c.x = d.y +// AND d.x = e.y func BenchmarkChain(b *testing.B) { for i := 1; i < 20; i++ { q := makeChain(i) diff --git a/pkg/sql/opt/cat/catalog.go b/pkg/sql/opt/cat/catalog.go index 268563716a2a..7c3719ed05d1 100644 --- a/pkg/sql/opt/cat/catalog.go +++ b/pkg/sql/opt/cat/catalog.go @@ -26,9 +26,9 @@ import ( // StableID permanently and uniquely identifies a catalog object (table, view, // index, column, etc.) within its scope: // -// data source StableID: unique within database -// index StableID: unique within table -// column StableID: unique within table +// data source StableID: unique within database +// index StableID: unique within table +// column StableID: unique within table // // If a new catalog object is created, it will always be assigned a new StableID // that has never, and will never, be reused by a different object in the same diff --git a/pkg/sql/opt/cat/column.go b/pkg/sql/opt/cat/column.go index a5c7cae94f5a..1d81c58a69c3 100644 --- a/pkg/sql/opt/cat/column.go +++ b/pkg/sql/opt/cat/column.go @@ -45,7 +45,8 @@ type Column struct { // Ordinal returns the position of the column in its table. The following always // holds: -// tab.Column(i).Ordinal() == i +// +// tab.Column(i).Ordinal() == i func (c *Column) Ordinal() int { return c.ordinal } diff --git a/pkg/sql/opt/cat/table.go b/pkg/sql/opt/cat/table.go index 5b8841c9d642..0b427b76c7b8 100644 --- a/pkg/sql/opt/cat/table.go +++ b/pkg/sql/opt/cat/table.go @@ -29,7 +29,7 @@ import ( // deletes are allowed. Finally, in the delete-only state, only deletes are // allowed. Further details about "online schema change" can be found in: // -// docs/RFCS/20151014_online_schema_change.md +// docs/RFCS/20151014_online_schema_change.md // // Calling code must take care to use the right collection of columns or // indexes. Usually this should be the public collections, since most usages are @@ -179,8 +179,7 @@ type Table interface { // on the content of each row in a table. For example, this check constraint // ensures that only values greater than zero can be inserted into the table: // -// CREATE TABLE a (a INT CHECK (a > 0)) -// +// CREATE TABLE a (a INT CHECK (a > 0)) type CheckConstraint struct { Constraint string Validated bool @@ -253,7 +252,9 @@ type HistogramBucket struct { // ForeignKeyConstraint represents a foreign key constraint. A foreign key // constraint has an origin (or referencing) side and a referenced side. For // example: -// ALTER TABLE o ADD CONSTRAINT fk FOREIGN KEY (a,b) REFERENCES r(a,b) +// +// ALTER TABLE o ADD CONSTRAINT fk FOREIGN KEY (a,b) REFERENCES r(a,b) +// // Here o is the origin table, r is the referenced table, and we have two pairs // of columns: (o.a,r.a) and (o.b,r.b). type ForeignKeyConstraint interface { @@ -301,7 +302,9 @@ type ForeignKeyConstraint interface { // UniqueConstraint represents a uniqueness constraint. UniqueConstraints may // or may not be enforced with a unique index. For example, the following // statement creates a unique constraint on column a without a unique index: -// ALTER TABLE t ADD CONSTRAINT u UNIQUE WITHOUT INDEX (a); +// +// ALTER TABLE t ADD CONSTRAINT u UNIQUE WITHOUT INDEX (a); +// // In order to enforce this uniqueness constraint, the optimizer must add // a uniqueness check as a postquery to any query that inserts into or updates // column a. diff --git a/pkg/sql/opt/cat/zone.go b/pkg/sql/opt/cat/zone.go index d5205e53b363..b53407562cb4 100644 --- a/pkg/sql/opt/cat/zone.go +++ b/pkg/sql/opt/cat/zone.go @@ -109,9 +109,8 @@ type ReplicaConstraints interface { // A prohibited constraint's key/value pair must *not* match any of the tiers of // a node's locality for the range to locate there. For example: // -// +region=east Range can only be placed on nodes in region=east locality. -// -region=west Range cannot be placed on nodes in region=west locality. -// +// +region=east Range can only be placed on nodes in region=east locality. +// -region=west Range cannot be placed on nodes in region=west locality. type Constraint interface { // IsRequired is true if this is a required constraint, or false if this is // a prohibited constraint (signified by initial + or - character). diff --git a/pkg/sql/opt/colset.go b/pkg/sql/opt/colset.go index 0be4a075074c..f6b6cc107b0b 100644 --- a/pkg/sql/opt/colset.go +++ b/pkg/sql/opt/colset.go @@ -29,6 +29,7 @@ type ColSet struct { const offset = 1 // setVal returns the value to store in the internal set for the given ColumnID. +// //gcassert:inline func setVal(col ColumnID) int { return int(col - offset) @@ -36,6 +37,7 @@ func setVal(col ColumnID) int { // retVal returns the ColumnID to return for the given value in the internal // set. +// //gcassert:inline func retVal(i int) ColumnID { return ColumnID(i + offset) @@ -147,14 +149,16 @@ func (s ColSet) ToList() ColList { // relations with a defined column mapping). // // For example, suppose we have a UNION with the following column mapping: -// Left: 1, 2, 3 -// Right: 4, 5, 6 -// Out: 7, 8, 9 +// +// Left: 1, 2, 3 +// Right: 4, 5, 6 +// Out: 7, 8, 9 // // Here are some possible calls to TranslateColSet and their results: -// TranslateColSet(ColSet{1, 2}, Left, Right) -> ColSet{4, 5} -// TranslateColSet(ColSet{5, 6}, Right, Out) -> ColSet{8, 9} -// TranslateColSet(ColSet{9}, Out, Right) -> ColSet{6} +// +// TranslateColSet(ColSet{1, 2}, Left, Right) -> ColSet{4, 5} +// TranslateColSet(ColSet{5, 6}, Right, Out) -> ColSet{8, 9} +// TranslateColSet(ColSet{9}, Out, Right) -> ColSet{6} // // Any columns in the input set that do not appear in the from list are ignored. // @@ -162,7 +166,7 @@ func (s ColSet) ToList() ColList { // possible for the input and output sets to have different cardinality. // Consider the following case: // -// SELECT x, x, y FROM xyz UNION SELECT a, b, c FROM abc +// SELECT x, x, y FROM xyz UNION SELECT a, b, c FROM abc // // TranslateColSet(ColSet{x, y}, {x, x, y}, {a, b, c}) returns ColSet{a, b, c}. // diff --git a/pkg/sql/opt/constraint/constraint.go b/pkg/sql/opt/constraint/constraint.go index 9440018ad444..fa2e9be2a095 100644 --- a/pkg/sql/opt/constraint/constraint.go +++ b/pkg/sql/opt/constraint/constraint.go @@ -34,9 +34,9 @@ import ( // Set struct comment for more details. // // A few examples: -// - a constraint on @1 > 1: a single span /@1: (/1 - ] -// - a constraint on @1 = 1 AND @2 >= 1: a single span /@1/@2: [/1/1 - /1] -// - a constraint on @1 < 5 OR @1 > 10: multiple spans /@1: [ - /5) (10 - ] +// - a constraint on @1 > 1: a single span /@1: (/1 - ] +// - a constraint on @1 = 1 AND @2 >= 1: a single span /@1/@2: [/1/1 - /1] +// - a constraint on @1 < 5 OR @1 > 10: multiple spans /@1: [ - /5) (10 - ] type Constraint struct { Columns Columns @@ -340,9 +340,10 @@ func (c *Constraint) findIntersectingSpan(keyCtx *KeyContext, sp *Span) (_ *Span // Combine refines the receiver constraint using constraints on a suffix of the // same list of columns. For example: -// c: /a/b: [/1 - /2] [/4 - /4] -// other: /b: [/5 - /5] -// result: /a/b: [/1/5 - /2/5] [/4/5 - /4/5] +// +// c: /a/b: [/1 - /2] [/4 - /4] +// other: /b: [/5 - /5] +// result: /a/b: [/1/5 - /2/5] [/4/5 - /4/5] func (c *Constraint) Combine(evalCtx *eval.Context, other *Constraint) { if !other.Columns.IsStrictSuffixOf(&c.Columns) { // Note: we don't want to let the c and other pointers escape by passing @@ -472,7 +473,9 @@ func (c *Constraint) Combine(evalCtx *eval.Context, other *Constraint) { } // ConsolidateSpans merges spans that have consecutive boundaries. For example: -// [/1 - /2] [/3 - /4] becomes [/1 - /4]. +// +// [/1 - /2] [/3 - /4] becomes [/1 - /4]. +// // An optional PrefixSorter parameter describes the localities of partitions in // the index for which the Constraint is being built. Spans belonging to 100% // local partitions will not be consolidated with spans that overlap any remote @@ -545,11 +548,12 @@ func (c *Constraint) ConsolidateSpans(evalCtx *eval.Context, ps partition.Prefix // ExactPrefix returns the length of the longest column prefix which are // constrained to a single value. For example: -// /a/b/c: [/1/2/3 - /1/2/3] -> ExactPrefix = 3 -// /a/b/c: [/1/2/3 - /1/2/3] [/1/2/5 - /1/2/8] -> ExactPrefix = 2 -// /a/b/c: [/1/2/3 - /1/2/3] [/1/2/5 - /1/3/8] -> ExactPrefix = 1 -// /a/b/c: [/1/2/3 - /1/2/3] [/1/3/3 - /1/3/3] -> ExactPrefix = 1 -// /a/b/c: [/1/2/3 - /1/2/3] [/3 - /4] -> ExactPrefix = 0 +// +// /a/b/c: [/1/2/3 - /1/2/3] -> ExactPrefix = 3 +// /a/b/c: [/1/2/3 - /1/2/3] [/1/2/5 - /1/2/8] -> ExactPrefix = 2 +// /a/b/c: [/1/2/3 - /1/2/3] [/1/2/5 - /1/3/8] -> ExactPrefix = 1 +// /a/b/c: [/1/2/3 - /1/2/3] [/1/3/3 - /1/3/3] -> ExactPrefix = 1 +// /a/b/c: [/1/2/3 - /1/2/3] [/3 - /4] -> ExactPrefix = 0 func (c *Constraint) ExactPrefix(evalCtx *eval.Context) int { if c.IsContradiction() { return 0 @@ -578,7 +582,9 @@ func (c *Constraint) ExactPrefix(evalCtx *eval.Context) int { // ConstrainedColumns returns the number of columns which are constrained by // the Constraint. For example: -// /a/b/c: [/1/1 - /1] [/3 - /3] +// +// /a/b/c: [/1/1 - /1] [/3 - /3] +// // has 2 constrained columns. This may be less than the total number of columns // in the constraint, especially if it represents an index constraint. func (c *Constraint) ConstrainedColumns(evalCtx *eval.Context) int { @@ -600,13 +606,16 @@ func (c *Constraint) ConstrainedColumns(evalCtx *eval.Context) int { // Prefix returns the length of the longest prefix of columns for which all the // spans have the same start and end values. For example: -// /a/b/c: [/1/1/1 - /1/1/2] [/3/3/3 - /3/3/4] +// +// /a/b/c: [/1/1/1 - /1/1/2] [/3/3/3 - /3/3/4] +// // has prefix 2. // // Note that Prefix returns a value that is greater than or equal to the value // returned by ExactPrefix. For example: -// /a/b/c: [/1/2/3 - /1/2/3] [/1/2/5 - /1/3/8] -> ExactPrefix = 1, Prefix = 1 -// /a/b/c: [/1/2/3 - /1/2/3] [/1/3/3 - /1/3/3] -> ExactPrefix = 1, Prefix = 3 +// +// /a/b/c: [/1/2/3 - /1/2/3] [/1/2/5 - /1/3/8] -> ExactPrefix = 1, Prefix = 1 +// /a/b/c: [/1/2/3 - /1/2/3] [/1/3/3 - /1/3/3] -> ExactPrefix = 1, Prefix = 3 func (c *Constraint) Prefix(evalCtx *eval.Context) int { if c.IsContradiction() { return 0 @@ -632,10 +641,13 @@ func (c *Constraint) Prefix(evalCtx *eval.Context) int { // constant by the constraint. // // For example, in this constraint, columns a and c are constant: -// /a/b/c: [/1/1/1 - /1/1/1] [/1/4/1 - /1/4/1] +// +// /a/b/c: [/1/1/1 - /1/1/1] [/1/4/1 - /1/4/1] // // However, none of the columns in this constraint are constant: -// /a/b: [/1/1 - /2/1] [/3/1 - /3/1] +// +// /a/b: [/1/1 - /2/1] [/3/1 - /3/1] +// // Even though column b might appear to be constant, the first span allows // column b to take on any value. For example, a=1 and b=100 is contained in // the first span. @@ -737,6 +749,7 @@ func (c *Constraint) ExtractNotNullCols(evalCtx *eval.Context) opt.ColSet { // specify any nulls. // 2. All spans cover all the columns of the index and have equal start and // end keys up to but not necessarily including the last column. +// // TODO(asubiotto): The only reason to extract this is that both the heuristic // planner and optimizer need this logic, due to the heuristic planner planning // mutations. Once the optimizer plans mutations, this method can go away. diff --git a/pkg/sql/opt/constraint/constraint_set.go b/pkg/sql/opt/constraint/constraint_set.go index 1d45f78756ef..e81d87ba5439 100644 --- a/pkg/sql/opt/constraint/constraint_set.go +++ b/pkg/sql/opt/constraint/constraint_set.go @@ -41,21 +41,21 @@ var Contradiction = &Set{contradiction: true} // expression tree each time. // // A few examples: -// - @1 >= 10 -// /@1: [/10 - ] // -// - @1 > 10 AND @2 = 5 -// /@1: [/11 - ] -// /@2: [/5 - /5] +// - @1 >= 10 +// /@1: [/10 - ] // -// - (@1 = 10 AND @2 > 5) OR (@1 = 20 AND @2 > 0) -// /@1: [/10 - /10] [/20 - /20] -// /@2: [/1 - ] +// - @1 > 10 AND @2 = 5 +// /@1: [/11 - ] +// /@2: [/5 - /5] // -// - @1 > 10.5 AND @2 != 'foo' -// /@1: (10.5 - ] -// /@2: [ - 'foo') ('foo' - ] +// - (@1 = 10 AND @2 > 5) OR (@1 = 20 AND @2 > 0) +// /@1: [/10 - /10] [/20 - /20] +// /@2: [/1 - ] // +// - @1 > 10.5 AND @2 != 'foo' +// /@1: (10.5 - ] +// /@2: [ - 'foo') ('foo' - ] type Set struct { // firstConstraint holds the first constraint in the set and otherConstraints // hold any constraints beyond the first. These are separated in order to @@ -185,7 +185,9 @@ func (s *Set) Intersect(evalCtx *eval.Context, other *Set) *Set { // may not be "tight", meaning that the new constraint set might allow // additional combinations of values that neither of the input sets allowed. For // example: -// (x > 1 AND y > 10) OR (x < 5 AND y < 50) +// +// (x > 1 AND y > 10) OR (x < 5 AND y < 50) +// // the union is unconstrained (and thus allows combinations like x,y = 10,0). // // Union returns the merged set. diff --git a/pkg/sql/opt/constraint/key.go b/pkg/sql/opt/constraint/key.go index 12b25287ca02..2aa86bb01ce7 100644 --- a/pkg/sql/opt/constraint/key.go +++ b/pkg/sql/opt/constraint/key.go @@ -93,8 +93,9 @@ func (k Key) Value(nth int) tree.Datum { // extensions specify whether each key is conceptually suffixed with negative // or positive infinity for purposes of comparison. For example, if k is /1/2, // then: -// k (kExt = ExtendLow) : /1/2/Low -// k (kExt = ExtendHigh): /1/2/High +// +// k (kExt = ExtendLow) : /1/2/Low +// k (kExt = ExtendHigh): /1/2/High // // These extensions have no effect if one key is not a prefix of the other, // since the comparison would already have concluded in previous values. But @@ -102,10 +103,11 @@ func (k Key) Value(nth int) tree.Datum { // extension determines which key is greater. This enables correct comparison // of start and end keys in spans which may be either inclusive or exclusive. // Here is the mapping: -// [/1/2 - ...] (inclusive start key): ExtendLow : /1/2/Low -// (/1/2 - ...] (exclusive start key): ExtendHigh: /1/2/High -// [... - /1/2] (inclusive end key) : ExtendHigh: /1/2/High -// [... - /1/2) (exclusive end key) : ExtendLow : /1/2/Low +// +// [/1/2 - ...] (inclusive start key): ExtendLow : /1/2/Low +// (/1/2 - ...] (exclusive start key): ExtendHigh: /1/2/High +// [... - /1/2] (inclusive end key) : ExtendHigh: /1/2/High +// [... - /1/2) (exclusive end key) : ExtendLow : /1/2/Low func (k Key) Compare(keyCtx *KeyContext, l Key, kext, lext KeyExtension) int { klen := k.Length() llen := l.Length() @@ -162,7 +164,8 @@ func (k Key) Concat(l Key) Key { // CutFront returns the key with the first numCols values removed. // Example: -// [/1/2 - /1/3].CutFront(1) = [/2 - /3] +// +// [/1/2 - /1/3].CutFront(1) = [/2 - /3] func (k Key) CutFront(numCols int) Key { if numCols == 0 { return k @@ -178,7 +181,8 @@ func (k Key) CutFront(numCols int) Key { // CutBack returns the key with the last numCols values removed. // Example: -// '/1/2'.CutBack(1) = '/1' +// +// '/1/2'.CutBack(1) = '/1' func (k Key) CutBack(numCols int) Key { if numCols == 0 { return k @@ -193,10 +197,11 @@ func (k Key) CutBack(numCols int) Key { } // IsNextKey returns true if: -// - k and other have the same length; -// - on all but the last column, k and other have the same values; -// - on the last column, k has the datum that follows other's datum (for -// types that support it). +// - k and other have the same length; +// - on all but the last column, k and other have the same values; +// - on the last column, k has the datum that follows other's datum (for +// types that support it). +// // For example: /2.IsNextKey(/1) is true. func (k Key) IsNextKey(keyCtx *KeyContext, other Key) bool { n := k.Length() @@ -216,16 +221,19 @@ func (k Key) IsNextKey(keyCtx *KeyContext, other Key) bool { // Next returns the next key; this only works for discrete types like integers. // It is guaranteed that there are no possible keys in the span -// ( key, Next(keu) ). +// +// ( key, Next(keu) ). // // Examples: -// Next(/1/2) = /1/3 -// Next(/1/false) = /1/true -// Next(/1/true) returns !ok -// Next(/'foo') = /'foo\x00' +// +// Next(/1/2) = /1/3 +// Next(/1/false) = /1/true +// Next(/1/true) returns !ok +// Next(/'foo') = /'foo\x00' // // If a column is descending, the values on that column go backwards: -// Next(/2) = /1 +// +// Next(/2) = /1 // // The key cannot be empty. func (k Key) Next(keyCtx *KeyContext) (_ Key, ok bool) { @@ -252,13 +260,15 @@ func (k Key) Next(keyCtx *KeyContext) (_ Key, ok bool) { // Prev returns the next key; this only works for discrete types like integers. // // Examples: -// Prev(/1/2) = /1/1 -// Prev(/1/true) = /1/false -// Prev(/1/false) returns !ok. -// Prev(/'foo') returns !ok. +// +// Prev(/1/2) = /1/1 +// Prev(/1/true) = /1/false +// Prev(/1/false) returns !ok. +// Prev(/'foo') returns !ok. // // If a column is descending, the values on that column go backwards: -// Prev(/1) = /2 +// +// Prev(/1) = /2 // // If this is the minimum possible key, returns EmptyKey. func (k Key) Prev(keyCtx *KeyContext) (_ Key, ok bool) { @@ -278,10 +288,11 @@ func (k Key) Prev(keyCtx *KeyContext) (_ Key, ok bool) { } // String formats a key like this: -// EmptyKey : empty string -// Key with 1 value : /2 -// Key with 2 values: /5/1 -// Key with 3 values: /3/6/4 +// +// EmptyKey : empty string +// Key with 1 value : /2 +// Key with 2 values: /5/1 +// Key with 3 values: /3/6/4 func (k Key) String() string { var buf bytes.Buffer for i := 0; i < k.Length(); i++ { diff --git a/pkg/sql/opt/constraint/span.go b/pkg/sql/opt/constraint/span.go index de5232a922b5..d9ddafffebb2 100644 --- a/pkg/sql/opt/constraint/span.go +++ b/pkg/sql/opt/constraint/span.go @@ -21,8 +21,9 @@ import ( // SpanBoundary specifies whether a span endpoint is inclusive or exclusive of // its start or end key. An inclusive boundary is represented as '[' and an // exclusive boundary is represented as ')'. Examples: -// [/0 - /1] (inclusive, inclusive) -// [/1 - /10) (inclusive, exclusive) +// +// [/0 - /1] (inclusive, inclusive) +// [/1 - /10) (inclusive, exclusive) type SpanBoundary bool const ( @@ -39,10 +40,11 @@ const ( // range can be inclusive or exclusive. Each key value within the range is // an N-tuple of datum values, one for each constrained column. Here are some // examples: -// @1 < 100 : [ - /100) -// @1 >= 100 : [/100 - ] -// @1 >= 1 AND @1 <= 10 : [/1 - /10] -// (@1 = 100 AND @2 > 10) OR (@1 > 100 AND @1 <= 101): (/100/10 - /101] +// +// @1 < 100 : [ - /100) +// @1 >= 100 : [/100 - ] +// @1 >= 1 AND @1 <= 10 : [/1 - /10] +// (@1 = 100 AND @2 > 10) OR (@1 > 100 AND @1 <= 101): (/100/10 - /101] type Span struct { // Start is the starting boundary for the span. start Key @@ -156,23 +158,24 @@ func (sp *Span) Init(start Key, startBoundary SpanBoundary, end Key, endBoundary // boundary, and an exclusive end boundary is less than an inclusive end // boundary. Here are examples of how various spans are ordered, with // equivalent extended keys shown as well (see Key.Compare comment): -// [ - /2 ) = /Low - /2/Low -// [ - /2/1) = /Low - /2/1/Low -// [ - /2/1] = /Low - /2/1/High -// [ - /2 ] = /Low - /2/High -// [ - ] = /Low - /High -// [/1 - /2/1) = /1/Low - /2/1/Low -// [/1 - /2/1] = /1/Low - /2/1/High -// [/1 - ] = /1/Low - /High -// [/1/1 - /2 ) = /1/1/Low - /2/Low -// [/1/1 - /2 ] = /1/1/Low - /2/High -// [/1/1 - ] = /1/1/Low - /High -// (/1/1 - /2 ) = /1/1/High - /2/Low -// (/1/1 - /2 ] = /1/1/High - /2/High -// (/1/1 - ] = /1/1/High - /High -// (/1 - /2/1) = /1/High - /2/1/Low -// (/1 - /2/1] = /1/High - /2/1/High -// (/1 - ] = /1/High - /High +// +// [ - /2 ) = /Low - /2/Low +// [ - /2/1) = /Low - /2/1/Low +// [ - /2/1] = /Low - /2/1/High +// [ - /2 ] = /Low - /2/High +// [ - ] = /Low - /High +// [/1 - /2/1) = /1/Low - /2/1/Low +// [/1 - /2/1] = /1/Low - /2/1/High +// [/1 - ] = /1/Low - /High +// [/1/1 - /2 ) = /1/1/Low - /2/Low +// [/1/1 - /2 ] = /1/1/Low - /2/High +// [/1/1 - ] = /1/1/Low - /High +// (/1/1 - /2 ) = /1/1/High - /2/Low +// (/1/1 - /2 ] = /1/1/High - /2/High +// (/1/1 - ] = /1/1/High - /High +// (/1 - /2/1) = /1/High - /2/1/Low +// (/1 - /2/1] = /1/High - /2/1/High +// (/1 - ] = /1/High - /High func (sp *Span) Compare(keyCtx *KeyContext, other *Span) int { // Span with lowest start boundary is less than the other. if cmp := sp.CompareStarts(keyCtx, other); cmp != 0 { @@ -258,7 +261,8 @@ func (sp *Span) TryIntersectWith(keyCtx *KeyContext, other *Span) bool { // spans cannot be expressed as a single span, then TryUnionWith will not // update the span and TryUnionWith returns false. This could occur if the // spans are disjoint, for example: -// [/1 - /5] UNION [/10 - /15] +// +// [/1 - /5] UNION [/10 - /15] // // Otherwise, this span is updated to the merged span range and TryUnionWith // returns true. If the resulting span does not constrain the range [ - ], then @@ -306,12 +310,12 @@ func (sp *Span) TryUnionWith(keyCtx *KeyContext, other *Span) bool { // with more constraints on columns that follow. // // Examples: -// - for an integer column (/1 - /5) => [/2 - /4]. -// - for a descending integer column (/5 - /1) => (/4 - /2). -// - for a string column, we don't have Prev so -// (/foo - /qux) => [/foo\x00 - /qux). -// - for a decimal column, we don't have either Next or Prev so we can't -// change anything. +// - for an integer column (/1 - /5) => [/2 - /4]. +// - for a descending integer column (/5 - /1) => (/4 - /2). +// - for a string column, we don't have Prev so +// (/foo - /qux) => [/foo\x00 - /qux). +// - for a decimal column, we don't have either Next or Prev so we can't +// change anything. func (sp *Span) PreferInclusive(keyCtx *KeyContext) { if sp.startBoundary == ExcludeBoundary { if key, ok := sp.start.Next(keyCtx); ok { @@ -336,24 +340,23 @@ func (sp *Span) CutFront(numCols int) { // KeyCount returns the number of distinct keys between specified-length // prefixes of the start and end keys. Returns zero and false if the operation // is not possible. Requirements: -// 1. The given prefix length must be at least 1. -// 2. The boundaries must be inclusive. -// 3. The start and end keys must have at least prefixLength values. -// 4. The start and end keys be equal up to index [prefixLength-2]. -// 5. The datums at index [prefixLength-1] must be of the same type and: -// a. countable, or -// b. have the same value (in which case the distinct count is 1). +// 1. The given prefix length must be at least 1. +// 2. The boundaries must be inclusive. +// 3. The start and end keys must have at least prefixLength values. +// 4. The start and end keys be equal up to index [prefixLength-2]. +// 5. The datums at index [prefixLength-1] must be of the same type and: +// a. countable, or +// b. have the same value (in which case the distinct count is 1). // // Example: // -// [/'ASIA'/1/'postfix' - /'ASIA'/2].KeyCount(keyCtx, length=2) => 2, true +// [/'ASIA'/1/'postfix' - /'ASIA'/2].KeyCount(keyCtx, length=2) => 2, true // // Note that any extra key values beyond the given length are simply ignored. // Therefore, the above example will produce equivalent results if postfixes are // removed: // -// ['ASIA'/1 - /'ASIA'/2].KeyCount(keyCtx, length=2) => 2, true -// +// ['ASIA'/1 - /'ASIA'/2].KeyCount(keyCtx, length=2) => 2, true func (sp *Span) KeyCount(keyCtx *KeyContext, prefixLength int) (int64, bool) { if prefixLength < 1 { // The length must be at least one because distinct count is undefined for @@ -411,15 +414,14 @@ func (sp *Span) KeyCount(keyCtx *KeyContext, prefixLength int) (int64, bool) { // // Example: // -// [/'ASIA'/1/'post' - /'ASIA'/3/'fix'].Split(keyCtx, length=2, limit=10) -// => -// ( -// [/'ASIA'/1/'post' - /'ASIA'/1], -// [/'ASIA'/2 - /'ASIA'/2], -// [/'ASIA'/3 - /'ASIA'/3/'fix'], -// ), -// true -// +// [/'ASIA'/1/'post' - /'ASIA'/3/'fix'].Split(keyCtx, length=2, limit=10) +// => +// ( +// [/'ASIA'/1/'post' - /'ASIA'/1], +// [/'ASIA'/2 - /'ASIA'/2], +// [/'ASIA'/3 - /'ASIA'/3/'fix'], +// ), +// true func (sp *Span) Split(keyCtx *KeyContext, prefixLength int) (spans *Spans, ok bool) { keyCount, ok := sp.KeyCount(keyCtx, prefixLength) if !ok { @@ -481,11 +483,12 @@ func (sp *Span) endExt() KeyExtension { // String formats a Span. Inclusivity/exclusivity is shown using // brackets/parens. Some examples: -// [1 - 2] -// (1/1 - 2) -// [ - 5/6) -// [1 - ] -// [ - ] +// +// [1 - 2] +// (1/1 - 2) +// [ - 5/6) +// [1 - ] +// [ - ] func (sp Span) String() string { var buf bytes.Buffer if sp.startBoundary == IncludeBoundary { diff --git a/pkg/sql/opt/constraint/testutils.go b/pkg/sql/opt/constraint/testutils.go index 733c838e5def..156914287b3b 100644 --- a/pkg/sql/opt/constraint/testutils.go +++ b/pkg/sql/opt/constraint/testutils.go @@ -23,7 +23,8 @@ import ( ) // ParseConstraint parses a constraint in the format of Constraint.String, e.g: -// "/1/2/3: [/1 - /2]". +// +// "/1/2/3: [/1 - /2]". func ParseConstraint(evalCtx *eval.Context, str string) Constraint { s := strings.SplitN(str, ": ", 2) if len(s) != 2 { @@ -40,7 +41,8 @@ func ParseConstraint(evalCtx *eval.Context, str string) Constraint { } // parseSpans parses a list of spans with integer values like: -// "[/1 - /2] [/5 - /6]". +// +// "[/1 - /2] [/5 - /6]". func parseSpans(evalCtx *eval.Context, str string) Spans { if str == "" || str == "contradiction" { return Spans{} diff --git a/pkg/sql/opt/cycle/detector.go b/pkg/sql/opt/cycle/detector.go index dd2d5687c34a..2a1e73c7ab21 100644 --- a/pkg/sql/opt/cycle/detector.go +++ b/pkg/sql/opt/cycle/detector.go @@ -15,13 +15,12 @@ package cycle // // Example usage: // -// var d Detector -// d.AddEdge(Vertex(0), Vertex(1)) -// d.AddEdge(Vertex(1), Vertex(2)) -// d.AddEdge(Vertex(2), Vertex(0)) -// d.FindCycleStartingAtVertex(Vertex(0)) -// => [0, 1, 2, 0], true -// +// var d Detector +// d.AddEdge(Vertex(0), Vertex(1)) +// d.AddEdge(Vertex(1), Vertex(2)) +// d.AddEdge(Vertex(2), Vertex(0)) +// d.FindCycleStartingAtVertex(Vertex(0)) +// => [0, 1, 2, 0], true type Detector struct { // edges are the directed edges in the graph. edges map[Vertex][]Vertex diff --git a/pkg/sql/opt/doc.go b/pkg/sql/opt/doc.go index 770a8cf6d9c4..bdb69b8f7712 100644 --- a/pkg/sql/opt/doc.go +++ b/pkg/sql/opt/doc.go @@ -17,7 +17,7 @@ equivalent query plans with vastly different execution times. The Cockroach optimizer is cost-based, meaning that it enumerates some or all of these alternate plans and chooses the one with the lowest estimated cost. -Overview +# Overview SQL query planning is often described in terms of 8 modules: @@ -43,52 +43,52 @@ representing the forest of trees generated during Search. Stats, Properties, Cost Model and Transformations are modules that power the Prep, Rewrite and Search phases. - SQL query text - | - +-----v-----+ - parse SQL text according to grammar - | Parse | - report syntax errors - +-----+-----+ - | - (ast) - | - +-----v-----+ - fold constants, check types, resolve - | Analyze | names - +-----+-----+ - annotate tree with semantic info - | - report semantic errors - (ast+) - +-------+ | - | Stats +----->-----v-----+ - normalize tree with cost-agnostic - +-------+ | Prep | transforms (placeholders present) - +-->-----+-----+ - compute initial properties - | | - retrieve and attach stats - | (expr) - done once per PREPARE - | | - +------------+ | +-----v-----+ - capture placeholder values / timestamps - | Transforms |--+--> Rewrite | - normalize tree with cost-agnostic - +------------+ | +-----+-----+ transforms (placeholders not present) - | | - done once per EXECUTE - | (expr) - | | - +-->-----v-----+ - generate equivalent expression trees - +------------+ | Search | - find lowest cost physical plan - | Cost Model +----->-----+-----+ - includes DistSQL physical planning - +------------+ | - (physical plan) - | - +-----v-----+ - | Execution | - +-----------+ + SQL query text + | + +-----v-----+ - parse SQL text according to grammar + | Parse | - report syntax errors + +-----+-----+ + | + (ast) + | + +-----v-----+ - fold constants, check types, resolve + | Analyze | names + +-----+-----+ - annotate tree with semantic info + | - report semantic errors + (ast+) + +-------+ | + | Stats +----->-----v-----+ - normalize tree with cost-agnostic + +-------+ | Prep | transforms (placeholders present) + +-->-----+-----+ - compute initial properties + | | - retrieve and attach stats + | (expr) - done once per PREPARE + | | + +------------+ | +-----v-----+ - capture placeholder values / timestamps + | Transforms |--+--> Rewrite | - normalize tree with cost-agnostic + +------------+ | +-----+-----+ transforms (placeholders not present) + | | - done once per EXECUTE + | (expr) + | | + +-->-----v-----+ - generate equivalent expression trees + +------------+ | Search | - find lowest cost physical plan + | Cost Model +----->-----+-----+ - includes DistSQL physical planning + +------------+ | + (physical plan) + | + +-----v-----+ + | Execution | + +-----------+ The opt-related packages implement portions of these modules, while other parts are implemented elsewhere. For example, other sql packages are used to perform name resolution and type checking which are part of the Analyze phase. -Parse +# Parse The parse phase is not discussed in this document. It transforms the SQL query text into an abstract syntax tree (AST). -Analyze +# Analyze The analyze phase ensures that the AST obeys all SQL semantic rules, and annotates the AST with information that will be used by later phases. In @@ -116,7 +116,7 @@ legal static types. For example, the CONCAT function only accepts zero or more arguments that are statically typed as strings. Violation of the typing rules produces a semantic error. -Properties +# Properties Properties are meta-information that are computed (and sometimes stored) for each node in an expression. Properties power transformations and optimization. @@ -145,7 +145,7 @@ forms a key (unique values) in the results. A derived property is one derived by the optimizer for an expression based on the properties of the child expressions. For example: - SELECT k+1 FROM kv + SELECT k+1 FROM kv Once the ordering of "k" is known from kv's descriptor, the same ordering property can be derived for k+1. During optimization, for each expression with @@ -156,7 +156,7 @@ that provides the required property. For example, an ORDER BY clause creates a required ordering property that can cause the optimizer to add a Sort operator as an enforcer of that property. -Stats +# Stats Table statistics power both the cost model and the search of alternate query plans. A simple example of where statistics guide the search of alternate query @@ -190,7 +190,7 @@ system may bound the inaccuracy by recomputing the stats based on how fast a table is being modified. Or the system may notice when stat estimations are inaccurate during query execution. -Cost Model +# Cost Model The cost model takes an expression as input and computes an estimated "cost" to execute that expression. The unit of "cost" can be arbitrary, though it is @@ -225,7 +225,7 @@ Search finds the lowest cost plan using dynamic programming. That imposes a restriction on the cost model: it must exhibit optimal substructure. An optimal solution can be constructed from optimal solutions of its sub-problems. -Memo +# Memo Memo is a data structure for efficiently storing a forest of query plans. Conceptually, the memo is composed of a numbered set of equivalency classes @@ -298,7 +298,7 @@ Notice that there are now two expressions in memo group 6. The coster (see Cost Model section for more details) will estimate the execution cost of each expression, and the optimizer will select the lowest cost alternative. -Transforms +# Transforms Transforms convert an input expression tree into zero or more logically equivalent trees. Transforms consist of two parts: a "match pattern" and a @@ -377,13 +377,13 @@ tool called Optgen, short for "optimizer generator". Optgen is a domain- specific language (DSL) that provides an intuitive syntax for defining transform rules. Here is an example: - [NormalizeEq] - (Eq - $left:^(Variable) - $right:(Variable) - ) - => - (Eq $right $left) + [NormalizeEq] + (Eq + $left:^(Variable) + $right:(Variable) + ) + => + (Eq $right $left) The expression above the arrow is the match pattern and the expression below the arrow is the replace pattern. This example rule will match Eq expressions @@ -392,7 +392,7 @@ which is a Variable operator. The replace pattern will trigger a replacement that reverses the two inputs. In addition, custom match and replace functions can be defined in order to run arbitrary Go code. -Prep +# Prep Prep (short for "prepare") is the first phase of query optimization, in which the annotated AST is transformed into a single normalized "expression tree". @@ -445,7 +445,7 @@ columns of each (sub-)expression, equivalent columns, not-null columns and functional dependencies. These properties are computed bottom-up as part of constructing the expression tree. -Rewrite +# Rewrite Rewrite is the second phase of query optimization. Placeholder values are available starting at this phase, so new normalization rules will typically @@ -473,7 +473,7 @@ join that requires its inner child to return its rows in a specific order. The same group can be (and sometimes is) optimized multiple times, but with different required properties each time. -Search +# Search Search is the final phase of optimization. Search begins with a single normalized tree that was created by the earlier phases. For each group, the diff --git a/pkg/sql/opt/exec/execbuilder/cascades.go b/pkg/sql/opt/exec/execbuilder/cascades.go index 20b0b12be63a..59f537954b9b 100644 --- a/pkg/sql/opt/exec/execbuilder/cascades.go +++ b/pkg/sql/opt/exec/execbuilder/cascades.go @@ -30,28 +30,28 @@ import ( // We walk through a simple example of a cascade to illustrate the flow around // executing cascades: // -// CREATE TABLE parent (p INT PRIMARY KEY); -// CREATE TABLE child ( -// c INT PRIMARY KEY, -// p INT NOT NULL REFERENCES parent(p) ON DELETE CASCADE -// ); +// CREATE TABLE parent (p INT PRIMARY KEY); +// CREATE TABLE child ( +// c INT PRIMARY KEY, +// p INT NOT NULL REFERENCES parent(p) ON DELETE CASCADE +// ); // -// DELETE FROM parent WHERE p > 1; +// DELETE FROM parent WHERE p > 1; // // The optimizer expression for this query is: // -// delete parent -// ├── columns: -// ├── fetch columns: p:2 -// ├── input binding: &1 -// ├── cascades -// │ └── fk_p_ref_parent -// └── select -// ├── columns: p:2!null -// ├── scan parent -// │ └── columns: p:2!null -// └── filters -// └── p:2 > 1 +// delete parent +// ├── columns: +// ├── fetch columns: p:2 +// ├── input binding: &1 +// ├── cascades +// │ └── fk_p_ref_parent +// └── select +// ├── columns: p:2!null +// ├── scan parent +// │ └── columns: p:2!null +// └── filters +// └── p:2 > 1 // // Note that at this time, the cascading query in the child table was not built. // The expression above does contain a reference to a memo.CascadeBuilder which @@ -77,28 +77,28 @@ import ( // 2. We invoke the memo.CascadeBuilder to optbuild the cascading query. At this // point, the new memo will contain the following expression: // -// delete child -// ├── columns: -// ├── fetch columns: c:4 child.p:5 -// └── semi-join (hash) -// ├── columns: c:4!null child.p:5!null -// ├── scan child -// │ └── columns: c:4!null child.p:5!null -// ├── with-scan &1 -// │ ├── columns: p:6!null -// │ └── mapping: -// │ └── parent.p:1 => p:6 -// └── filters -// └── child.p:5 = p:6 +// delete child +// ├── columns: +// ├── fetch columns: c:4 child.p:5 +// └── semi-join (hash) +// ├── columns: c:4!null child.p:5!null +// ├── scan child +// │ └── columns: c:4!null child.p:5!null +// ├── with-scan &1 +// │ ├── columns: p:6!null +// │ └── mapping: +// │ └── parent.p:1 => p:6 +// └── filters +// └── child.p:5 = p:6 // -// Notes: +// Notes: // - normally, a WithScan can only refer to an ancestor mutation or With -// operator. In this case we are creating a reference "out of the void". -// This works just fine; we can consider adding a special dummy root -// operator but so far it hasn't been necessary; +// operator. In this case we are creating a reference "out of the void". +// This works just fine; we can consider adding a special dummy root +// operator but so far it hasn't been necessary; // - the binding &1 column ID has changed: it used to be 2, it is now 1. -// This is because we are starting with a fresh memo. We need to take into -// account this remapping when referring to the foreign key columns. +// This is because we are starting with a fresh memo. We need to take into +// account this remapping when referring to the foreign key columns. // // 3. We optimize the newly built expression. // @@ -108,7 +108,6 @@ import ( // After PlanFn is called, the resulting plan is executed. Note that this plan // could itself have more exec.Cascades; these are queued and handled in the // same way. -// type cascadeBuilder struct { b *Builder mutationBuffer exec.Node diff --git a/pkg/sql/opt/exec/execbuilder/mutation.go b/pkg/sql/opt/exec/execbuilder/mutation.go index 2d67c79438a1..22bf209fdcfc 100644 --- a/pkg/sql/opt/exec/execbuilder/mutation.go +++ b/pkg/sql/opt/exec/execbuilder/mutation.go @@ -890,11 +890,11 @@ func (b *Builder) buildFKCascades(withID opt.WithID, cascades memo.FKCascades) e // Mutations can commit the transaction as part of the same KV request, // potentially taking advantage of the 1PC optimization. This is not ok to do in // general; a sufficient set of conditions is: -// 1. There is a single mutation in the query. -// 2. The mutation is the root operator, or it is directly under a Project -// with no side-effecting expressions. An example of why we can't allow -// side-effecting expressions: if the projection encounters a -// division-by-zero error, the mutation shouldn't have been committed. +// 1. There is a single mutation in the query. +// 2. The mutation is the root operator, or it is directly under a Project +// with no side-effecting expressions. An example of why we can't allow +// side-effecting expressions: if the projection encounters a +// division-by-zero error, the mutation shouldn't have been committed. // // An extra condition relates to how the FK checks are run. If they run before // the mutation (via the insert fast path), auto commit is possible. If they run @@ -977,9 +977,9 @@ func (b *Builder) shouldApplyImplicitLockingToMutationInput(mutExpr memo.RelExpr // existing rows) then this method determines whether the builder should perform // the following transformation: // -// UPDATE t = SELECT FROM t + INSERT INTO t -// => -// UPDATE t = SELECT FROM t FOR UPDATE + INSERT INTO t +// UPDATE t = SELECT FROM t + INSERT INTO t +// => +// UPDATE t = SELECT FROM t FOR UPDATE + INSERT INTO t // // The transformation is conditional on the UPDATE expression tree matching a // pattern. Specifically, the FOR UPDATE locking mode is only used during the diff --git a/pkg/sql/opt/exec/execbuilder/testdata/hash_sharded_index b/pkg/sql/opt/exec/execbuilder/testdata/hash_sharded_index index 50a8b1a71054..f4437e37543a 100644 --- a/pkg/sql/opt/exec/execbuilder/testdata/hash_sharded_index +++ b/pkg/sql/opt/exec/execbuilder/testdata/hash_sharded_index @@ -532,7 +532,7 @@ vectorized: true columns: (i2, i4, i8, f4, f8, s, c, b, dc, ival, oid, tstz, ts, da, inet, vb) estimated row count: 1 (missing stats) table: sharded_primary_with_many_column_types@sharded_primary_with_many_column_types_pkey - spans: /5/1/1/1/1/1/"1"/"1"/"1"/1/00:00:01/1/1970-01-01T00:00:01Z/1970-01-01T00:00:01Z/1/"\x00 \u007f\x00\x00\x01"/B1/0 + spans: /5/1/1/1/1/1/"1"/"1"/"1"/1/00:00:01/1/1970-01-01T00:00:01Z/1970-01-01T00:00:01Z/1/"\x00 \x7f\x00\x00\x01"/B1/0 # Test to make sure constraint on shard column value is added correctly when # creating a table with output show create table. diff --git a/pkg/sql/opt/exec/explain/plan_gist_factory.go b/pkg/sql/opt/exec/explain/plan_gist_factory.go index 54f33b4df66a..59df67ee0d30 100644 --- a/pkg/sql/opt/exec/explain/plan_gist_factory.go +++ b/pkg/sql/opt/exec/explain/plan_gist_factory.go @@ -54,7 +54,7 @@ func init() { // efficient encoding version should be incremented. // // Version history: -// 1. Initial version. +// 1. Initial version. var gistVersion = 1 // PlanGist is a compact representation of a logical plan meant to be used as diff --git a/pkg/sql/opt/idxconstraint/index_constraints.go b/pkg/sql/opt/idxconstraint/index_constraints.go index d4451c4b455a..d677563480a6 100644 --- a/pkg/sql/opt/idxconstraint/index_constraints.go +++ b/pkg/sql/opt/idxconstraint/index_constraints.go @@ -526,7 +526,9 @@ func (c *indexConstraintCtx) makeSpansForTupleInequality( // makeSpansForTupleIn creates spans for index columns starting at // from a tuple IN tuple expression, for example: -// (a, b, c) IN ((1, 2, 3), (4, 5, 6)) +// +// (a, b, c) IN ((1, 2, 3), (4, 5, 6)) +// // Assumes that both sides are tuples. // The return value indicates if the spans are exactly equivalent // to the expression (and not weaker). @@ -871,32 +873,34 @@ func (c *indexConstraintCtx) binaryMergeSpansForOr( // getMaxSimplifyPrefix finds the longest prefix (maxSimplifyPrefix) such that // every span has the same first maxSimplifyPrefix values for the start and end // key. For example, for: -// [/1/2/3 - /1/2/4] -// [/2/3/4 - /2/3/4] +// +// [/1/2/3 - /1/2/4] +// [/2/3/4 - /2/3/4] +// // the longest prefix is 2. // // This prefix is significant for filter simplification: we can only // drop an expression based on its spans if the offset is at most // maxSimplifyPrefix. Examples: // -// Filter: @1 = 1 AND @2 >= 5 -// Spans: [/1/5 - /1] -// Remaining filter: -// Here maxSimplifyPrefix is 1; we can drop @2 >= 5 from the filter. +// Filter: @1 = 1 AND @2 >= 5 +// Spans: [/1/5 - /1] +// Remaining filter: +// Here maxSimplifyPrefix is 1; we can drop @2 >= 5 from the filter. // -// Filter: @1 >= 1 AND @1 <= 3 AND @2 >= 5 -// Spans: [/1/5 - /3] -// Remaining filter: @2 >= 5 -// Here maxSimplifyPrefix is 0; we cannot drop @2 >= 5. Because the span -// contains more than one value for the first column, there are areas where -// the condition needs to be checked, e.g for /2/0 to /2/4. +// Filter: @1 >= 1 AND @1 <= 3 AND @2 >= 5 +// Spans: [/1/5 - /3] +// Remaining filter: @2 >= 5 +// Here maxSimplifyPrefix is 0; we cannot drop @2 >= 5. Because the span +// contains more than one value for the first column, there are areas where +// the condition needs to be checked, e.g for /2/0 to /2/4. // -// Filter: (@1, @2) IN ((1, 1), (2, 2)) AND @3 >= 3 AND @4 = 4 -// Spans: [/1/1/3/4 - /1/1] -// [/2/2/3/4 - /2/2] -// Remaining filter: @4 = 4 -// Here maxSimplifyPrefix is 2; we can drop the IN and @3 >= 3 but we can't -// drop @4 = 4. +// Filter: (@1, @2) IN ((1, 1), (2, 2)) AND @3 >= 3 AND @4 = 4 +// Spans: [/1/1/3/4 - /1/1] +// [/2/2/3/4 - /2/2] +// Remaining filter: @4 = 4 +// Here maxSimplifyPrefix is 2; we can drop the IN and @3 >= 3 but we can't +// drop @4 = 4. func (c *indexConstraintCtx) getMaxSimplifyPrefix(idxConstraint *constraint.Constraint) int { maxOffset := len(c.columns) - 1 for i := 0; i < idxConstraint.Spans.Count(); i++ { @@ -931,28 +935,28 @@ func (c *indexConstraintCtx) getMaxSimplifyPrefix(idxConstraint *constraint.Cons // The following conditions are (together) sufficient for a sub-expression to be // true: // -// - the spans generated for this sub-expression are equivalent to the -// expression; we call such spans "tight". For example the condition -// `@1 >= 1` results in span `[/1 - ]` which is tight: inside this span, the -// condition is always true. On the other hand, if we have an index on -// @1,@2,@3 and condition `(@1, @3) >= (1, 3)`, the generated span is -// `[/1 - ]` which is not tight: we still need to verify the condition on @3 -// inside this span. +// - the spans generated for this sub-expression are equivalent to the +// expression; we call such spans "tight". For example the condition +// `@1 >= 1` results in span `[/1 - ]` which is tight: inside this span, the +// condition is always true. On the other hand, if we have an index on +// @1,@2,@3 and condition `(@1, @3) >= (1, 3)`, the generated span is +// `[/1 - ]` which is not tight: we still need to verify the condition on @3 +// inside this span. // -// - the spans for the entire filter are completely contained in the (tight) -// spans for this sub-expression. In this case, there can be no rows that are -// inside the filter span but outside the expression span. +// - the spans for the entire filter are completely contained in the (tight) +// spans for this sub-expression. In this case, there can be no rows that are +// inside the filter span but outside the expression span. // -// For example: `@1 = 1 AND @2 = 2` with span `[/1/2 - /1/2]`. When looking -// at sub-expression `@1 = 1` and its span `[/1 - /1]`, we see that it -// contains the filter span `[/1/2 - /1/2]` and thus the condition is always -// true inside `[/1/2 - /1/2`]. For `@2 = 2` we have the span `[/2 - /2]` -// but this span refers to the second index column (so it's actually -// equivalent to a collection of spans `[/?/2 - /?/2]`); the only way we can -// compare it against the filter span is if the latter restricts the previous -// column to a single value (which it does in this case; this is determined -// by getMaxSimplifyPrefix). So `[/1/2 - /1/2]` is contained in the -// expression span and we can simplify `@2 = 2` to `true`. +// For example: `@1 = 1 AND @2 = 2` with span `[/1/2 - /1/2]`. When looking +// at sub-expression `@1 = 1` and its span `[/1 - /1]`, we see that it +// contains the filter span `[/1/2 - /1/2]` and thus the condition is always +// true inside `[/1/2 - /1/2`]. For `@2 = 2` we have the span `[/2 - /2]` +// but this span refers to the second index column (so it's actually +// equivalent to a collection of spans `[/?/2 - /?/2]`); the only way we can +// compare it against the filter span is if the latter restricts the previous +// column to a single value (which it does in this case; this is determined +// by getMaxSimplifyPrefix). So `[/1/2 - /1/2]` is contained in the +// expression span and we can simplify `@2 = 2` to `true`. // // nestedUnderOrExpr indicates if we are processing a ScalarExpr which is the child // of an OrExpr. @@ -1017,13 +1021,14 @@ func (c *indexConstraintCtx) simplifyFilter( // expression. // // Sample usage: -// var ic Instance -// if err := ic.Init(...); err != nil { -// .. -// } -// spans, ok := ic.Spans() -// remFilterGroup := ic.RemainingFilters() -// remFilter := o.Optimize(remFilterGroup, &opt.PhysicalProps{}) +// +// var ic Instance +// if err := ic.Init(...); err != nil { +// .. +// } +// spans, ok := ic.Spans() +// remFilterGroup := ic.RemainingFilters() +// remFilter := o.Optimize(remFilterGroup, &opt.PhysicalProps{}) type Instance struct { indexConstraintCtx @@ -1187,10 +1192,9 @@ func (c *indexConstraintCtx) init( // isIndexColumn returns true if e is an expression that corresponds to index // column . The expression can be either -// - a variable on the index column, or -// - an expression that matches the computed column expression (if the index -// column is computed). -// +// - a variable on the index column, or +// - an expression that matches the computed column expression (if the index +// column is computed). func (c *indexConstraintCtx) isIndexColumn(e opt.Expr, offset int) bool { if v, ok := e.(*memo.VariableExpr); ok && v.Col == c.columns[offset].ID() { return true diff --git a/pkg/sql/opt/idxconstraint/index_constraints_test.go b/pkg/sql/opt/idxconstraint/index_constraints_test.go index 4859faeeffb9..203369b1e44d 100644 --- a/pkg/sql/opt/idxconstraint/index_constraints_test.go +++ b/pkg/sql/opt/idxconstraint/index_constraints_test.go @@ -38,25 +38,24 @@ import ( // // - index-constraints [arg | arg=val | arg=(val1,val2, ...)]... // -// Takes a scalar expression, builds a memo for it, and computes index -// constraints. Arguments: +// Takes a scalar expression, builds a memo for it, and computes index +// constraints. Arguments: // -// - vars=( [not null], ...) +// - vars=( [not null], ...) // -// Information about the columns. +// Information about the columns. // -// - index=( [ascending|asc|descending|desc], ...) +// - index=( [ascending|asc|descending|desc], ...) // -// Information for the index (used by index-constraints). +// Information for the index (used by index-constraints). // -// - nonormalize +// - nonormalize // -// Disable the optimizer normalization rules. +// Disable the optimizer normalization rules. // -// - semtree-normalize -// -// Run TypedExpr normalization before building the memo. +// - semtree-normalize // +// Run TypedExpr normalization before building the memo. func TestIndexConstraints(t *testing.T) { defer leaktest.AfterTest(t)() @@ -254,7 +253,8 @@ func BenchmarkIndexConstraints(b *testing.B) { // parseIndexColumns parses descriptions of index columns; each // string corresponds to an index column and is of the form: -// @id [ascending|asc|descending|desc] [not null] +// +// @id [ascending|asc|descending|desc] [not null] func parseIndexColumns(tb testing.TB, md *opt.Metadata, colStrs []string) []opt.OrderingColumn { findCol := func(alias string) opt.ColumnID { for i := 0; i < md.NumColumns(); i++ { diff --git a/pkg/sql/opt/indexrec/candidate.go b/pkg/sql/opt/indexrec/candidate.go index d393f672571a..8d0a59de142f 100644 --- a/pkg/sql/opt/indexrec/candidate.go +++ b/pkg/sql/opt/indexrec/candidate.go @@ -23,13 +23,13 @@ import ( // referenced in a query. The index candidates are constructed based on the // following rules: // -// 1. Add a single index on all columns in a Group By or Order By expression if -// the columns are from the same table. Otherwise, group expressions into -// indexes by table. For Order By, the index column ordering and column +// 1. Add a single index on all columns in a Group By or Order By expression if +// the columns are from the same table. Otherwise, group expressions into +// indexes by table. For Order By, the index column ordering and column // directions are the same as how it is in the Order By. // 2. Add a single-column index on any Range expression, comparison // expression (=, <, >, <=, >=), and IS expression. -// 3. Add a single-column index on any column that appears in a JOIN predicate. +// 3. Add a single-column index on any column that appears in a JOIN predicate. // 4. If there exist multiple columns from the same table in a JOIN predicate, // create a single index on all such columns. // 5. Construct three groups for each table: EQ, R, and J. @@ -46,6 +46,7 @@ import ( // 7. For JSON and array columns, we create single column inverted indexes. We // also create the following multi-column combination candidates for each // inverted column: eq + 'inverted column', EQ + 'inverted column'. +// // TODO(nehageorge): Add a rule for columns that are referenced in the statement // but do not fall into one of these categories. In order to account for this, // *memo.VariableExpr would be the final case in the switch statement, hit only diff --git a/pkg/sql/opt/invertedidx/geo.go b/pkg/sql/opt/invertedidx/geo.go index d920d16007e8..b655be86bdb7 100644 --- a/pkg/sql/opt/invertedidx/geo.go +++ b/pkg/sql/opt/invertedidx/geo.go @@ -277,10 +277,10 @@ func (g *geoJoinPlanner) extractInvertedJoinConditionFromLeaf( // of the comparison operation. If commuteArgs is true, returns a new function // representing the same relationship but with commuted arguments. For example: // -// ST_Intersects(g1, g2) <-> ST_Intersects(g2, g1) -// ST_Covers(g1, g2) <-> ST_CoveredBy(g2, g1) -// g1 && g2 -> ST_Intersects(g2, g1) -// g1 ~ g2 -> ST_CoveredBy(g2, g1) +// ST_Intersects(g1, g2) <-> ST_Intersects(g2, g1) +// ST_Covers(g1, g2) <-> ST_CoveredBy(g2, g1) +// g1 && g2 -> ST_Intersects(g2, g1) +// g1 ~ g2 -> ST_CoveredBy(g2, g1) // // See geoindex.CommuteRelationshipMap for the full list of mappings. // diff --git a/pkg/sql/opt/invertedidx/inverted_index_expr.go b/pkg/sql/opt/invertedidx/inverted_index_expr.go index de981e5cdde1..24f28161d16e 100644 --- a/pkg/sql/opt/invertedidx/inverted_index_expr.go +++ b/pkg/sql/opt/invertedidx/inverted_index_expr.go @@ -60,11 +60,11 @@ func NewBoundPreFilterer(typ *types.T, expr tree.TypedExpr) (*PreFilterer, inter // derived, then TryFilterInvertedIndex returns ok=false. // // In addition to the inverted filter condition (spanExpr), returns: -// - a constraint of the prefix columns if there are any, -// - remaining filters that must be applied if the span expression is not tight, -// and -// - pre-filterer state that can be used by the invertedFilterer operator to -// reduce the number of false positives returned by the span expression. +// - a constraint of the prefix columns if there are any, +// - remaining filters that must be applied if the span expression is not tight, +// and +// - pre-filterer state that can be used by the invertedFilterer operator to +// reduce the number of false positives returned by the span expression. func TryFilterInvertedIndex( evalCtx *eval.Context, factory *norm.Factory, @@ -464,10 +464,10 @@ type invertedFilterPlanner interface { // delegated to the given invertedFilterPlanner. // // In addition to the inverted.Expression, returns: -// - remaining filters that must be applied if the inverted expression is not -// tight, and -// - pre-filterer state that can be used to reduce false positives. This is -// only non-nil if filterCond is a leaf condition (i.e., has no ANDs or ORs). +// - remaining filters that must be applied if the inverted expression is not +// tight, and +// - pre-filterer state that can be used to reduce false positives. This is +// only non-nil if filterCond is a leaf condition (i.e., has no ANDs or ORs). func extractInvertedFilterCondition( evalCtx *eval.Context, factory *norm.Factory, @@ -509,10 +509,9 @@ func extractInvertedFilterCondition( // isIndexColumn returns true if e is an expression that corresponds to an // inverted index column. The expression can be either: -// - a variable on the index column, or -// - an expression that matches the computed column expression (if the index -// column is computed). -// +// - a variable on the index column, or +// - an expression that matches the computed column expression (if the index +// column is computed). func isIndexColumn( tabID opt.TableID, index cat.Index, e opt.Expr, computedColumns map[opt.ColumnID]opt.ScalarExpr, ) bool { diff --git a/pkg/sql/opt/invertedidx/json_array.go b/pkg/sql/opt/invertedidx/json_array.go index 1bdf622a99d8..595ce194cecd 100644 --- a/pkg/sql/opt/invertedidx/json_array.go +++ b/pkg/sql/opt/invertedidx/json_array.go @@ -735,7 +735,9 @@ func buildFetchContainmentObjects( } // buildObject constructs a new JSON object of the form: -// {: ... {: {key0: }}} +// +// {: ... {: {key0: }}} +// // Where the keys and val are extracted from a fetch val expression by the // caller. Note that key0 is the outer-most fetch val index, so the expression // j->'a'->'b' = 1 results in {"a": {"b": 1}}. diff --git a/pkg/sql/opt/lookupjoin/constraint_builder.go b/pkg/sql/opt/lookupjoin/constraint_builder.go index 3ab4c0d56640..be3420c033b6 100644 --- a/pkg/sql/opt/lookupjoin/constraint_builder.go +++ b/pkg/sql/opt/lookupjoin/constraint_builder.go @@ -28,11 +28,11 @@ import ( // Constraint is used to constrain a lookup join. There are two types of // constraints: // -// 1. Constraints with KeyCols use columns from the input to directly -// constrain lookups into a target index. -// 2. Constraints with a LookupExpr build multiple spans from an expression -// that is evaluated for each input row. These spans are used to perform -// lookups into a target index. +// 1. Constraints with KeyCols use columns from the input to directly +// constrain lookups into a target index. +// 2. Constraints with a LookupExpr build multiple spans from an expression +// that is evaluated for each input row. These spans are used to perform +// lookups into a target index. // // A constraint is not constraining if both KeyCols and LookupExpr are empty. // See IsUnconstrained. @@ -397,49 +397,49 @@ func (b *ConstraintBuilder) Build( // ok=true when a join equality constraint can be generated for the column. This // is possible when: // -// 1. col is non-nullable. -// 2. col is a computed column. -// 3. Columns referenced in the computed expression are a subset of columns -// that already have equality constraints. +// 1. col is non-nullable. +// 2. col is a computed column. +// 3. Columns referenced in the computed expression are a subset of columns +// that already have equality constraints. // // For example, consider the table and query: // -// CREATE TABLE a ( -// a INT -// ) +// CREATE TABLE a ( +// a INT +// ) // -// CREATE TABLE bc ( -// b INT, -// c INT NOT NULL AS (b + 1) STORED -// ) +// CREATE TABLE bc ( +// b INT, +// c INT NOT NULL AS (b + 1) STORED +// ) // -// SELECT * FROM a JOIN b ON a = b +// SELECT * FROM a JOIN b ON a = b // // We can add an equality constraint for c because c is a function of b and b // has an equality constraint in the join predicate: // -// SELECT * FROM a JOIN b ON a = b AND a + 1 = c +// SELECT * FROM a JOIN b ON a = b AND a + 1 = c // // Condition (1) is required to prevent generating invalid equality constraints // for computed column expressions that can evaluate to NULL even when the // columns referenced in the expression are non-NULL. For example, consider the // table and query: // -// CREATE TABLE a ( -// a INT -// ) +// CREATE TABLE a ( +// a INT +// ) // -// CREATE TABLE bc ( -// b INT, -// c INT AS (CASE WHEN b > 0 THEN NULL ELSE -1 END) STORED -// ) +// CREATE TABLE bc ( +// b INT, +// c INT AS (CASE WHEN b > 0 THEN NULL ELSE -1 END) STORED +// ) // -// SELECT a, b FROM a JOIN b ON a = b +// SELECT a, b FROM a JOIN b ON a = b // // The following is an invalid transformation: a row such as (a=1, b=1) would no // longer be returned because NULL=NULL is false. // -// SELECT a, b FROM a JOIN b ON a = b AND (CASE WHEN a > 0 THEN NULL ELSE -1 END) = c +// SELECT a, b FROM a JOIN b ON a = b AND (CASE WHEN a > 0 THEN NULL ELSE -1 END) = c // // TODO(mgartner): We can relax condition (1) to allow nullable columns if it // can be proven that the expression will never evaluate to NULL. We can use diff --git a/pkg/sql/opt/lookupjoin/constraint_builder_test.go b/pkg/sql/opt/lookupjoin/constraint_builder_test.go index aa210c05efc9..22932738a061 100644 --- a/pkg/sql/opt/lookupjoin/constraint_builder_test.go +++ b/pkg/sql/opt/lookupjoin/constraint_builder_test.go @@ -45,14 +45,13 @@ import ( // // Information about the left columns. -// - right=( [not null] [as [stored|virtual], ...) +// - right=( [not null] [as [stored|virtual], ...) // -// Information about the left columns. -// -// - index=( [asc|desc], ...) +// Information about the left columns. // -// Information for the index on the right table. +// - index=( [asc|desc], ...) // +// Information for the index on the right table. func TestLookupConstraints(t *testing.T) { defer leaktest.AfterTest(t)() diff --git a/pkg/sql/opt/memo/constraint_builder.go b/pkg/sql/opt/memo/constraint_builder.go index e5515bdb5aa1..0f4f6bfc2782 100644 --- a/pkg/sql/opt/memo/constraint_builder.go +++ b/pkg/sql/opt/memo/constraint_builder.go @@ -227,7 +227,7 @@ func (cb *constraintsBuilder) buildSingleColumnConstraintConst( // buildConstraintForTupleIn handles the case where we have a tuple IN another // tuple, for instance: // -// (a, b, c) IN ((1, 2, 3), (4, 5, 6)) +// (a, b, c) IN ((1, 2, 3), (4, 5, 6)) // // This function is a less powerful version of makeSpansForTupleIn, since it // does not operate on a particular index. The return value indicates diff --git a/pkg/sql/opt/memo/expr.go b/pkg/sql/opt/memo/expr.go index 7d76fd3e9e19..d3bf34201c62 100644 --- a/pkg/sql/opt/memo/expr.go +++ b/pkg/sql/opt/memo/expr.go @@ -38,12 +38,12 @@ import ( // in the same memo group are linked together in a list that can be traversed // via calls to FirstExpr and NextExpr: // -// +--------------------------------------+ -// | +---------------+ | -// | | |FirstExpr |FirstExpr -// v v | | -// member #1 -------> member #2 --------> member #3 -------> nil -// NextExpr NextExpr NextExpr +// +--------------------------------------+ +// | +---------------+ | +// | | |FirstExpr |FirstExpr +// v v | | +// member #1 -------> member #2 --------> member #3 -------> nil +// NextExpr NextExpr NextExpr // // A relational expression's physical properties and cost are defined once it // has been optimized. @@ -155,8 +155,7 @@ var CountRowsSingleton = &CountRowsExpr{} // TrueFilter is a global instance of the empty FiltersExpr, used in situations // where the filter should always evaluate to true: // -// SELECT * FROM a INNER JOIN b ON True -// +// SELECT * FROM a INNER JOIN b ON True var TrueFilter = FiltersExpr{} // EmptyTuple is a global instance of a TupleExpr that contains no elements. @@ -167,8 +166,7 @@ var EmptyTuple = &TupleExpr{Typ: types.EmptyTuple} // a TupleExpr that contains no elements. It's used when constructing an empty // ValuesExpr: // -// SELECT 1 -// +// SELECT 1 var ScalarListWithEmptyTuple = ScalarListExpr{EmptyTuple} // EmptyGroupingPrivate is a global instance of a GroupingPrivate that has no diff --git a/pkg/sql/opt/memo/expr_format.go b/pkg/sql/opt/memo/expr_format.go index e63853a89f57..be1fe768536b 100644 --- a/pkg/sql/opt/memo/expr_format.go +++ b/pkg/sql/opt/memo/expr_format.go @@ -55,15 +55,17 @@ type ExprFmtFlags int // ) // In our example here, 1 means the flag is on and 0 means the flag is off. -//const ( -// ExprFmtShowAll int = 0 // iota is 0, but it's not used 0000 0000 -// ExprFmtHideMiscProps int = 1 << (iota - 1) -// // iota is 1, 1 << (1 - 1) 0000 0001 = 1 -// ExprFmtHideConstraints // iota is 2, 1 << (2 - 1) 0000 0010 = 2 -// ExprFmtHideFuncDeps // iota is 3, 1 << (3 - 1) 0000 0100 = 4 -// ... -// ExprFmtHideAll // (1 << iota) - 1 -//) +// const ( +// +// ExprFmtShowAll int = 0 // iota is 0, but it's not used 0000 0000 +// ExprFmtHideMiscProps int = 1 << (iota - 1) +// // iota is 1, 1 << (1 - 1) 0000 0001 = 1 +// ExprFmtHideConstraints // iota is 2, 1 << (2 - 1) 0000 0010 = 2 +// ExprFmtHideFuncDeps // iota is 3, 1 << (3 - 1) 0000 0100 = 4 +// ... +// ExprFmtHideAll // (1 << iota) - 1 +// +// ) // If we want to set ExprFmtHideMiscProps and ExprFmtHideConstraints on, we // would have f := ExprFmtHideMiscProps | ExprFmtHideConstraints 0000 0011. // ExprFmtShowAll has all 0000 0000. This is because all flags represent when @@ -1064,7 +1066,8 @@ func (f *ExprFmtCtx) formatScalarWithLabel( // scalarPropsStrings returns a slice of strings, each describing a property; // for example: -// {"type=bool", "outer=(1)", "constraints=(/1: [/1 - /1]; tight)"} +// +// {"type=bool", "outer=(1)", "constraints=(/1: [/1 - /1]; tight)"} func (f *ExprFmtCtx) scalarPropsStrings(scalar opt.ScalarExpr) []string { typ := scalar.DataType() if typ == nil { @@ -1120,7 +1123,8 @@ func (f *ExprFmtCtx) scalarPropsStrings(scalar opt.ScalarExpr) []string { // FormatScalarProps writes out a string representation of the scalar // properties (with a preceding space); for example: -// " [type=bool, outer=(1), constraints=(/1: [/1 - /1]; tight)]" +// +// " [type=bool, outer=(1), constraints=(/1: [/1 - /1]; tight)]" func (f *ExprFmtCtx) FormatScalarProps(scalar opt.ScalarExpr) { props := f.scalarPropsStrings(scalar) if len(props) != 0 { @@ -1218,7 +1222,7 @@ func (f *ExprFmtCtx) formatScalarPrivate(scalar opt.ScalarExpr) { // formatIndex outputs the specified index into the context's buffer with the // format: // -// table_alias@index_name +// table_alias@index_name // // If reverse is true, ",rev" is appended. // @@ -1370,8 +1374,7 @@ func (f *ExprFmtCtx) formatOptionalColList( // given list. Each child shows how the column will be mutated, with the id of // the "before" and "after" columns, similar to this: // -// a:1 => x:4 -// +// a:1 => x:4 func (f *ExprFmtCtx) formatMutationCols( nd RelExpr, tp treeprinter.Node, heading string, colList opt.OptionalColList, tabID opt.TableID, ) { @@ -1410,7 +1413,8 @@ func (f *ExprFmtCtx) ColumnString(id opt.ColumnID) string { // formatColSimple outputs the specified column into the context's buffer using the // following format: -// label:id +// +// label:id // // The :id part is omitted if the formatting flags include ExprFmtHideColumns. // @@ -1446,10 +1450,12 @@ func (f *ExprFmtCtx) formatColSimpleToBuffer(buf *bytes.Buffer, label string, id // formatCol outputs the specified column into the context's buffer using the // following format: -// label:id(type) +// +// label:id(type) // // If the column is not nullable, then this is the format: -// label:id(type!null) +// +// label:id(type!null) // // Some of the components can be omitted depending on formatting flags. // diff --git a/pkg/sql/opt/memo/expr_name_gen.go b/pkg/sql/opt/memo/expr_name_gen.go index 10ac95e671c9..a88ba6aee788 100644 --- a/pkg/sql/opt/memo/expr_name_gen.go +++ b/pkg/sql/opt/memo/expr_name_gen.go @@ -48,7 +48,6 @@ func NewExprNameGenerator(prefix string) *ExprNameGenerator { // was encountered during tree traversal. Thus, in order to generate a // consistent name, always call GenerateName in a pre-order traversal of the // expression tree. -// func (g *ExprNameGenerator) GenerateName(op opt.Operator) string { // Replace all instances of "-" in the operator name with "_" in order to // create a legal table name. diff --git a/pkg/sql/opt/memo/expr_test.go b/pkg/sql/opt/memo/expr_test.go index 0d3d39e2daf7..06269979f4a0 100644 --- a/pkg/sql/opt/memo/expr_test.go +++ b/pkg/sql/opt/memo/expr_test.go @@ -34,24 +34,24 @@ import ( ) // TestExprIsNeverNull runs data-driven testcases of the form -// []... -// -// ---- -// +// +// []... +// +// ---- +// // // See OptTester.Handle for supported commands. In addition to those, we // support: // -// - scalar-is-not-nullable [args] -// -// Builds a scalar expression using the input and performs a best-effort -// check to see if the scalar expression is nullable. It outputs this -// result as a boolean. +// - scalar-is-not-nullable [args] // -// The supported args (in addition to the ones supported by OptTester): +// Builds a scalar expression using the input and performs a best-effort +// check to see if the scalar expression is nullable. It outputs this +// result as a boolean. // -// - vars=(var1 type1 [not null], var2 type2 [not null],...) +// The supported args (in addition to the ones supported by OptTester): // +// - vars=(var1 type1 [not null], var2 type2 [not null],...) func TestExprIsNeverNull(t *testing.T) { defer leaktest.AfterTest(t)() diff --git a/pkg/sql/opt/memo/extract.go b/pkg/sql/opt/memo/extract.go index 82b5737c8c35..e773c2592ee1 100644 --- a/pkg/sql/opt/memo/extract.go +++ b/pkg/sql/opt/memo/extract.go @@ -58,8 +58,8 @@ func CanExtractConstDatum(e opt.Expr) bool { // ExtractConstDatum returns the Datum that represents the value of an // expression with a constant value. An expression with a constant value is: -// - one that has a ConstValue tag, or -// - a tuple or array where all children are constant values. +// - one that has a ConstValue tag, or +// - a tuple or array where all children are constant values. func ExtractConstDatum(e opt.Expr) tree.Datum { switch t := e.(type) { case *NullExpr: diff --git a/pkg/sql/opt/memo/interner.go b/pkg/sql/opt/memo/interner.go index 5198c0812073..676e667ee2ca 100644 --- a/pkg/sql/opt/memo/interner.go +++ b/pkg/sql/opt/memo/interner.go @@ -71,20 +71,20 @@ type internHash uint64 // The non-cryptographic hash function is adapted from fnv.go in Golang's // standard library. That in turn was taken from FNV-1a, described here: // -// https://en.wikipedia.org/wiki/Fowler-Noll-Vo_hash_function +// https://en.wikipedia.org/wiki/Fowler-Noll-Vo_hash_function // // Each expression type follows the same interning pattern: // -// 1. Compute an int64 hash value for the expression using FNV-1a. -// 2. Do a fast 64-bit Go map lookup to determine if an expression with the -// same hash is already in the cache. -// 3. If so, then test whether the existing expression is equivalent, since -// there may be a hash value collision. -// 4. Expressions with colliding hash values are linked together in a list. -// Rather than use an explicit linked list data structure, colliding -// entries are rehashed using a randomly generated hash value that is -// stored in the existing entry. This effectively uses the Go map as if it -// were a hash table of size 2^64. +// 1. Compute an int64 hash value for the expression using FNV-1a. +// 2. Do a fast 64-bit Go map lookup to determine if an expression with the +// same hash is already in the cache. +// 3. If so, then test whether the existing expression is equivalent, since +// there may be a hash value collision. +// 4. Expressions with colliding hash values are linked together in a list. +// Rather than use an explicit linked list data structure, colliding +// entries are rehashed using a randomly generated hash value that is +// stored in the existing entry. This effectively uses the Go map as if it +// were a hash table of size 2^64. // // This pattern enables very low overhead hashing of expressions - the // allocation of a Go map with a fast 64-bit key, plus a couple of reusable @@ -138,14 +138,14 @@ func (in *interner) InternPhysicalProps(val *physical.Required) *physical.Requir // internCache is a helper class that implements the interning pattern described // in the comment for the interner struct. Here is a usage example: // -// var cache internCache -// cache.Start(hash) -// for cache.Next() { -// if isEqual(cache.Item(), other) { -// // Found existing item in cache. -// } -// } -// cache.Add(other) +// var cache internCache +// cache.Start(hash) +// for cache.Next() { +// if isEqual(cache.Item(), other) { +// // Found existing item in cache. +// } +// } +// cache.Add(other) // // The calls to the Next method iterate over any entries with the same hash, // until either a match is found or it is proven their are no matches, in which diff --git a/pkg/sql/opt/memo/logical_props_builder.go b/pkg/sql/opt/memo/logical_props_builder.go index 51a94759a0af..192388fb8de6 100644 --- a/pkg/sql/opt/memo/logical_props_builder.go +++ b/pkg/sql/opt/memo/logical_props_builder.go @@ -35,10 +35,13 @@ var fdAnnID = opt.NewTableAnnID() // buildProps is called by the memo group construction code in order to // initialize the new group's logical properties. // NOTE: When deriving properties from children, be sure to keep the child -// properties immutable by copying them if necessary. +// +// properties immutable by copying them if necessary. +// // NOTE: The parent expression is passed as an expression for convenient access -// to children, but certain properties on it are not yet defined (like -// its logical properties!). +// +// to children, but certain properties on it are not yet defined (like +// its logical properties!). type logicalPropsBuilder struct { evalCtx *eval.Context mem *Memo @@ -2713,14 +2716,15 @@ func deriveWithUses(r opt.Expr) props.WithUsesMap { // DECIMAL. // // A formal definition: -// Let (c1,c2,...) be the outer columns of the scalar expression. Let -// f(x1,x2,..) be the result of the scalar expression for the given outer -// column values. The expression is composite insensitive if, for any two -// sets of values (x1,x2,...) and (y1,y2,...) -// (x1=y1 AND x2=y2 AND ...) => f(x1,x2,...) = f(y1,y2,...) // -// Note that this doesn't mean that the final results are always *identical* -// just that they are logically equal. +// Let (c1,c2,...) be the outer columns of the scalar expression. Let +// f(x1,x2,..) be the result of the scalar expression for the given outer +// column values. The expression is composite insensitive if, for any two +// sets of values (x1,x2,...) and (y1,y2,...) +// (x1=y1 AND x2=y2 AND ...) => f(x1,x2,...) = f(y1,y2,...) +// +// Note that this doesn't mean that the final results are always *identical* +// just that they are logically equal. // // This property is used to determine when a scalar expression can be copied, // with outer column variable references changed to refer to other columns that diff --git a/pkg/sql/opt/memo/memo.go b/pkg/sql/opt/memo/memo.go index 18d061ee76ca..34daa8ca9f9f 100644 --- a/pkg/sql/opt/memo/memo.go +++ b/pkg/sql/opt/memo/memo.go @@ -29,10 +29,10 @@ import ( // called groups where each group contains a set of logically equivalent // expressions. Two expressions are considered logically equivalent if: // -// 1. They return the same number and data type of columns. However, order and -// naming of columns doesn't matter. -// 2. They return the same number of rows, with the same values in each row. -// However, order of rows doesn't matter. +// 1. They return the same number and data type of columns. However, order and +// naming of columns doesn't matter. +// 2. They return the same number of rows, with the same values in each row. +// However, order of rows doesn't matter. // // The different expressions in a single group are called memo expressions // (memo-ized expressions). The children of a memo expression can themselves be @@ -72,17 +72,17 @@ import ( // in-memory instance. This allows interned expressions to be checked for // equivalence by simple pointer comparison. For example: // -// SELECT * FROM a, b WHERE a.x = b.x +// SELECT * FROM a, b WHERE a.x = b.x // // After insertion into the memo, the memo would contain these six groups, with // numbers substituted for pointers to the normalized expression in each group: // -// G6: [inner-join [G1 G2 G5]] -// G5: [eq [G3 G4]] -// G4: [variable b.x] -// G3: [variable a.x] -// G2: [scan b] -// G1: [scan a] +// G6: [inner-join [G1 G2 G5]] +// G5: [eq [G3 G4]] +// G4: [variable b.x] +// G3: [variable a.x] +// G2: [scan b] +// G1: [scan a] // // Each leaf expressions is interned by hashing its operator type and any // private field values. Expressions higher in the tree can then rely on the @@ -98,12 +98,12 @@ import ( // added by the factory. For example, the join commutativity transformation // expands the memo like this: // -// G6: [inner-join [G1 G2 G5]] [inner-join [G2 G1 G5]] -// G5: [eq [G3 G4]] -// G4: [variable b.x] -// G3: [variable a.x] -// G2: [scan b] -// G1: [scan a] +// G6: [inner-join [G1 G2 G5]] [inner-join [G2 G1 G5]] +// G5: [eq [G3 G4]] +// G4: [variable b.x] +// G3: [variable a.x] +// G2: [scan b] +// G1: [scan a] // // See the comments in explorer.go for more details. type Memo struct { @@ -306,14 +306,14 @@ func (m *Memo) HasPlaceholders() bool { // that takes into account the changes. IsStale checks the following // dependencies: // -// 1. Current database: this can change name resolution. -// 2. Current search path: this can change name resolution. -// 3. Current location: this determines time zone, and can change how time- -// related types are constructed and compared. -// 4. Data source schema: this determines most aspects of how the query is -// compiled. -// 5. Data source privileges: current user may no longer have access to one or -// more data sources. +// 1. Current database: this can change name resolution. +// 2. Current search path: this can change name resolution. +// 3. Current location: this determines time zone, and can change how time- +// related types are constructed and compared. +// 4. Data source schema: this determines most aspects of how the query is +// compiled. +// 5. Data source privileges: current user may no longer have access to one or +// more data sources. // // This function cannot swallow errors and return only a boolean, as it may // perform KV operations on behalf of the transaction associated with the diff --git a/pkg/sql/opt/memo/memo_test.go b/pkg/sql/opt/memo/memo_test.go index 4644eeab58ac..5b3f206db0eb 100644 --- a/pkg/sql/opt/memo/memo_test.go +++ b/pkg/sql/opt/memo/memo_test.go @@ -432,10 +432,11 @@ func traverseExpr(expr memo.RelExpr, f func(memo.RelExpr)) { } // runDataDrivenTest runs data-driven testcases of the form -// -// -// ---- -// +// +// +// +// ---- +// // // See OptTester.Handle for supported commands. func runDataDrivenTest(t *testing.T, path string, fmtFlags memo.ExprFmtFlags) { diff --git a/pkg/sql/opt/memo/multiplicity_builder.go b/pkg/sql/opt/memo/multiplicity_builder.go index 6cff5d405e2e..3c1dfaf319c2 100644 --- a/pkg/sql/opt/memo/multiplicity_builder.go +++ b/pkg/sql/opt/memo/multiplicity_builder.go @@ -172,33 +172,33 @@ func getJoinLeftMultiplicityVal(left, right RelExpr, filters FiltersExpr) props. // // Why is condition #2 sufficient to ensure that no left rows are matched more // than once? -// * It implies that left columns are being equated with a lax key from the -// right input. -// * A lax key means that the right rows being equated are unique apart from -// nulls. -// * Equalities are null-rejecting and the right rows are otherwise unique, so -// no left row can be equal to more than one right row on the filters. -// * Therefore, no left row will be matched more than once. +// - It implies that left columns are being equated with a lax key from the +// right input. +// - A lax key means that the right rows being equated are unique apart from +// nulls. +// - Equalities are null-rejecting and the right rows are otherwise unique, so +// no left row can be equal to more than one right row on the filters. +// - Therefore, no left row will be matched more than once. // // As an example: // -// CREATE TABLE x_tab (x INT); -// CREATE TABLE a_tab (a INT UNIQUE); +// CREATE TABLE x_tab (x INT); +// CREATE TABLE a_tab (a INT UNIQUE); // -// x a -// ---- ---- -// NULL NULL -// 1 1 -// 1 2 -// 2 3 +// x a +// ---- ---- +// NULL NULL +// 1 1 +// 1 2 +// 2 3 // -// SELECT * FROM x_tab INNER JOIN a_tab ON x = a; -// => -// x a -// --- -// 1 1 -// 1 1 -// 2 2 +// SELECT * FROM x_tab INNER JOIN a_tab ON x = a; +// => +// x a +// --- +// 1 1 +// 1 1 +// 2 2 // // In this example, no rows from x are duplicated, while the '1' row from a is // duplicated. @@ -219,30 +219,30 @@ func filtersMatchLeftRowsAtMostOnce(left, right RelExpr, filters FiltersExpr) bo // according to the join filters. This is true when the following conditions are // satisfied: // -// 1. If this is a cross join (there are no filters), then either: -// a. The minimum cardinality of the right input is greater than zero. There -// must be at least one right row for the left rows to be preserved. -// b. There is a not-null foreign key column in the left input that references -// an unfiltered column from the right input. +// 1. If this is a cross join (there are no filters), then either: +// a. The minimum cardinality of the right input is greater than zero. There +// must be at least one right row for the left rows to be preserved. +// b. There is a not-null foreign key column in the left input that references +// an unfiltered column from the right input. // -// 2. If this is not a cross join, every filter is an equality that falls under -// one of these two cases: -// a. The self-join case: all equalities are between ColumnIDs that come from -// the same column on the same base table. -// b. The foreign-key case: all equalities are between a foreign key column on -// the left and the column it references from the right. All left columns -// must come from the same foreign key. +// 2. If this is not a cross join, every filter is an equality that falls under +// one of these two cases: +// a. The self-join case: all equalities are between ColumnIDs that come from +// the same column on the same base table. +// b. The foreign-key case: all equalities are between a foreign key column on +// the left and the column it references from the right. All left columns +// must come from the same foreign key. // // In both the self-join and the foreign key cases that are not cross-joins // (cases 2a and 2b): // // - The left columns must be not-null, and // - One of the following must be true: -// - The right columns are unfiltered, or -// - The left and right side of the join must be Select expressions where -// the left side filters imply the right side filters, and the right -// columns - are unfiltered in the right Select's input (see condition -// #3b in the comment for verifyFilterAreValidEqualities). +// - The right columns are unfiltered, or +// - The left and right side of the join must be Select expressions where +// the left side filters imply the right side filters, and the right +// columns - are unfiltered in the right Select's input (see condition +// #3b in the comment for verifyFilterAreValidEqualities). // // Why do the left columns have to be non-null, and the right columns unfiltered // or filtered identically as their corresponding left column? In both the @@ -297,17 +297,17 @@ func filtersMatchAllLeftRows(left, right RelExpr, filters FiltersExpr) bool { // verifyFiltersAreValidEqualities returns the set of equality columns in the // right relation and true when all the following conditions are satisfied: // -// 1. All filters are equalities. -// 2. All equalities directly compare two columns. -// 3. All equalities x=y (or y=x) have x as a left non-null column and y as a -// right column, and either: -// a. y is an unfiltered column in the right expression, or -// b. both the left and right expressions are Selects; the left side -// filters imply the right side filters when replacing x with y; and y -// is an unfiltered column in the right Select's input. -// 4. All equality columns come from a base table. -// 5. All left columns come from a single table, and all right columns come -// from a single table. +// 1. All filters are equalities. +// 2. All equalities directly compare two columns. +// 3. All equalities x=y (or y=x) have x as a left non-null column and y as a +// right column, and either: +// a. y is an unfiltered column in the right expression, or +// b. both the left and right expressions are Selects; the left side +// filters imply the right side filters when replacing x with y; and y +// is an unfiltered column in the right Select's input. +// 4. All equality columns come from a base table. +// 5. All left columns come from a single table, and all right columns come +// from a single table. // // Returns ok=false if any of these conditions are unsatisfied. func verifyFiltersAreValidEqualities( @@ -380,11 +380,11 @@ func verifyFiltersAreValidEqualities( // rightHasSingleFilterThatMatchesLeft returns true if: // -// 1. Both left and right are Select expressions. -// 2. rightCol is unfiltered in right's input. -// 3. The left Select has a filter in the form leftCol=const. -// 4. The right Select has a single filter in the form rightCol=const where -// the const value is the same as the const value in (2). +// 1. Both left and right are Select expressions. +// 2. rightCol is unfiltered in right's input. +// 3. The left Select has a filter in the form leftCol=const. +// 4. The right Select has a single filter in the form rightCol=const where +// the const value is the same as the const value in (2). // // This function is used by verifyFiltersAreValidEqualities to try to prove that // every row in the left input of a join will have a match in the right input diff --git a/pkg/sql/opt/memo/statistics_builder.go b/pkg/sql/opt/memo/statistics_builder.go index c62e307c931a..b5e31d46ab1d 100644 --- a/pkg/sql/opt/memo/statistics_builder.go +++ b/pkg/sql/opt/memo/statistics_builder.go @@ -131,13 +131,13 @@ const ( // For example, here is a query plan with corresponding estimated statistics at // each level: // -// Query: SELECT y FROM a WHERE x=1 +// Query: SELECT y FROM a WHERE x=1 // -// Plan: Project y Row Count: 10, Distinct(x): 1 -// | -// Select x=1 Row Count: 10, Distinct(x): 1 -// | -// Scan a Row Count: 100, Distinct(x): 10 +// Plan: Project y Row Count: 10, Distinct(x): 1 +// | +// Select x=1 Row Count: 10, Distinct(x): 1 +// | +// Scan a Row Count: 100, Distinct(x): 10 // // The statistics for the Scan operator were presumably retrieved from the // underlying table statistics cached in the metadata. The statistics for @@ -188,7 +188,7 @@ const ( // To better understand how the statisticsBuilder works, let us consider this // simple query, which consists of a scan followed by an aggregation: // -// SELECT count(*), x, y FROM t GROUP BY x, y +// SELECT count(*), x, y FROM t GROUP BY x, y // // The statistics for the scan of t will be calculated first, since logical // properties are built bottom-up. The estimated row count is retrieved from @@ -206,25 +206,25 @@ const ( // that no statistics are cached, this is the order of function calls for the // above example (somewhat simplified): // -// +-------------+ +--------------+ -// 1. | buildScan t | 2. | buildGroupBy | -// +-------------+ +--------------+ -// | | -// +-----------------------+ +-------------------------+ -// | makeTableStatistics t | | colStatFromChild (x, y) | -// +-----------------------+ +-------------------------+ -// | -// +--------------------+ -// | colStatScan (x, y) | -// +--------------------+ -// | -// +---------------------+ -// | colStatTable (x, y) | -// +---------------------+ -// | -// +--------------------+ -// | colStatLeaf (x, y) | -// +--------------------+ +// +-------------+ +--------------+ +// 1. | buildScan t | 2. | buildGroupBy | +// +-------------+ +--------------+ +// | | +// +-----------------------+ +-------------------------+ +// | makeTableStatistics t | | colStatFromChild (x, y) | +// +-----------------------+ +-------------------------+ +// | +// +--------------------+ +// | colStatScan (x, y) | +// +--------------------+ +// | +// +---------------------+ +// | colStatTable (x, y) | +// +---------------------+ +// | +// +--------------------+ +// | colStatLeaf (x, y) | +// +--------------------+ // // See props/statistics.go for more details. type statisticsBuilder struct { @@ -3411,12 +3411,12 @@ func (sb *statisticsBuilder) applyConstraintSet( // columns is done in the logical props builder. // // For example, consider the following constraint sets: -// /a/b/c: [/1 - /1/2/3] [/1/2/5 - /1/2/8] -// /c: [/6 - /6] +// +// /a/b/c: [/1 - /1/2/3] [/1/2/5 - /1/2/8] +// /c: [/6 - /6] // // The first constraint set filters nulls out of column a, and the // second constraint set filters nulls out of column c. -// func (sb *statisticsBuilder) updateNullCountsFromNotNullCols( notNullCols opt.ColSet, s *props.Statistics, ) { @@ -3438,20 +3438,22 @@ func (sb *statisticsBuilder) updateNullCountsFromNotNullCols( // // For example, consider the following constraint set: // -// /a/b/c: [/1/2/3 - /1/2/3] [/1/2/5 - /1/2/8] -// /c: [/6 - /6] +// /a/b/c: [/1/2/3 - /1/2/3] [/1/2/5 - /1/2/8] +// /c: [/6 - /6] // // After the first constraint is processed, s.ColStats contains the // following: -// [a] -> { ... DistinctCount: 1 ... } -// [b] -> { ... DistinctCount: 1 ... } -// [c] -> { ... DistinctCount: 5 ... } +// +// [a] -> { ... DistinctCount: 1 ... } +// [b] -> { ... DistinctCount: 1 ... } +// [c] -> { ... DistinctCount: 5 ... } // // After the second constraint is processed, column c is further constrained, // so s.ColStats contains the following: -// [a] -> { ... DistinctCount: 1 ... } -// [b] -> { ... DistinctCount: 1 ... } -// [c] -> { ... DistinctCount: 1 ... } +// +// [a] -> { ... DistinctCount: 1 ... } +// [b] -> { ... DistinctCount: 1 ... } +// [c] -> { ... DistinctCount: 1 ... } // // Note that updateDistinctCountsFromConstraint is pessimistic, and assumes // that there is at least one row for every possible value provided by the @@ -3464,7 +3466,7 @@ func (sb *statisticsBuilder) updateNullCountsFromNotNullCols( // query specifically mentions some exact values we should use that as a hint. // For example, consider the following constraint: // -// /a: [ - 5][10 - 10][15 - 15] +// /a: [ - 5][10 - 10][15 - 15] // // In this case, updateDistinctCountsFromConstraint will infer that there // are at least two distinct values (10 and 15). This lower bound will be @@ -3666,63 +3668,65 @@ func (sb *statisticsBuilder) updateDistinctNullCountsFromEquivalency( // or (2) by assuming they are correlated. // // (1) Assuming independence between columns, we can calculate the selectivity -// by taking the product of selectivities of each constrained column. In -// the general case, this can be represented by the formula: // -// ┬-┬ ⎛ new_distinct(i) ⎞ -// selectivity = │ │ ⎜ --------------- ⎟ -// ┴ ┴ ⎝ old_distinct(i) ⎠ -// i in -// {constrained -// columns} +// by taking the product of selectivities of each constrained column. In +// the general case, this can be represented by the formula: +// +// ┬-┬ ⎛ new_distinct(i) ⎞ +// selectivity = │ │ ⎜ --------------- ⎟ +// ┴ ┴ ⎝ old_distinct(i) ⎠ +// i in +// {constrained +// columns} // // (2) If we instead assume there is some correlation between columns, we -// calculate the selectivity using multi-column statistics. // -// ⎛ new_distinct({constrained columns}) ⎞ -// selectivity = ⎜ ----------------------------------- ⎟ -// ⎝ old_distinct({constrained columns}) ⎠ +// calculate the selectivity using multi-column statistics. // -// This formula looks simple, but the challenge is that it is difficult -// to determine the correct value for new_distinct({constrained columns}) -// if each column is not constrained to a single value. For example, if -// new_distinct(x)=2 and new_distinct(y)=2, new_distinct({x,y}) could be 2, -// 3 or 4. We estimate the new distinct count as follows, using the concept -// of "soft functional dependency (FD) strength" as defined in [1]: +// ⎛ new_distinct({constrained columns}) ⎞ +// selectivity = ⎜ ----------------------------------- ⎟ +// ⎝ old_distinct({constrained columns}) ⎠ // -// new_distinct({x,y}) = min_value + range * (1 - FD_strength_scaled) +// This formula looks simple, but the challenge is that it is difficult +// to determine the correct value for new_distinct({constrained columns}) +// if each column is not constrained to a single value. For example, if +// new_distinct(x)=2 and new_distinct(y)=2, new_distinct({x,y}) could be 2, +// 3 or 4. We estimate the new distinct count as follows, using the concept +// of "soft functional dependency (FD) strength" as defined in [1]: // -// where +// new_distinct({x,y}) = min_value + range * (1 - FD_strength_scaled) // -// min_value = max(new_distinct(x), new_distinct(y)) -// max_value = new_distinct(x) * new_distinct(y) -// range = max_value - min_value +// where // -// ⎛ max(old_distinct(x),old_distinct(y)) ⎞ -// FD_strength = ⎜ ------------------------------------ ⎟ -// ⎝ old_distinct({x,y}) ⎠ +// min_value = max(new_distinct(x), new_distinct(y)) +// max_value = new_distinct(x) * new_distinct(y) +// range = max_value - min_value // -// ⎛ max(old_distinct(x), old_distinct(y)) ⎞ -// min_FD_strength = ⎜ ------------------------------------- ⎟ -// ⎝ old_distinct(x) * old_distinct(y) ⎠ +// ⎛ max(old_distinct(x),old_distinct(y)) ⎞ +// FD_strength = ⎜ ------------------------------------ ⎟ +// ⎝ old_distinct({x,y}) ⎠ // -// ⎛ FD_strength - min_FD_strength ⎞ // scales FD_strength -// FD_strength_scaled = ⎜ ----------------------------- ⎟ // to be between -// ⎝ 1 - min_FD_strength ⎠ // 0 and 1 +// ⎛ max(old_distinct(x), old_distinct(y)) ⎞ +// min_FD_strength = ⎜ ------------------------------------- ⎟ +// ⎝ old_distinct(x) * old_distinct(y) ⎠ // -// Suppose that old_distinct(x)=100 and old_distinct(y)=10. If x and y are -// perfectly correlated, old_distinct({x,y})=100. Using the example from -// above, new_distinct(x)=2 and new_distinct(y)=2. Plugging in the values -// into the equation, we get: +// ⎛ FD_strength - min_FD_strength ⎞ // scales FD_strength +// FD_strength_scaled = ⎜ ----------------------------- ⎟ // to be between +// ⎝ 1 - min_FD_strength ⎠ // 0 and 1 // -// FD_strength_scaled = 1 -// new_distinct({x,y}) = 2 + (4 - 2) * (1 - 1) = 2 +// Suppose that old_distinct(x)=100 and old_distinct(y)=10. If x and y are +// perfectly correlated, old_distinct({x,y})=100. Using the example from +// above, new_distinct(x)=2 and new_distinct(y)=2. Plugging in the values +// into the equation, we get: // -// If x and y are completely independent, however, old_distinct({x,y})=1000. -// In this case, we get: +// FD_strength_scaled = 1 +// new_distinct({x,y}) = 2 + (4 - 2) * (1 - 1) = 2 // -// FD_strength_scaled = 0 -// new_distinct({x,y}) = 2 + (4 - 2) * (1 - 0) = 4 +// If x and y are completely independent, however, old_distinct({x,y})=1000. +// In this case, we get: +// +// FD_strength_scaled = 0 +// new_distinct({x,y}) = 2 + (4 - 2) * (1 - 0) = 4 // // Note that even if we calculate the selectivity based on equation (2) above, // we still want to take equation (1) into account. This is because it is @@ -3732,7 +3736,7 @@ func (sb *statisticsBuilder) updateDistinctNullCountsFromEquivalency( // individually, we must give some weight to equation (1). Therefore, instead // of equation (2) we actually return the following selectivity: // -// selectivity = (1 - w) * (equation 1) + w * (equation 2) +// selectivity = (1 - w) * (equation 1) + w * (equation 2) // // where w is the constant multiColWeight. // @@ -3745,8 +3749,8 @@ func (sb *statisticsBuilder) updateDistinctNullCountsFromEquivalency( // correlation. // // [1] Ilyas, Ihab F., et al. "CORDS: automatic discovery of correlations and -// soft functional dependencies." SIGMOD 2004. // +// soft functional dependencies." SIGMOD 2004. func (sb *statisticsBuilder) selectivityFromMultiColDistinctCounts( cols opt.ColSet, e RelExpr, s *props.Statistics, ) (selectivity, selectivityUpperBound props.Selectivity) { @@ -3894,25 +3898,28 @@ func (sb *statisticsBuilder) selectivityFromMultiColDistinctCounts( // correlation coefficient between pairs of columns during table stats // collection. Instead, this is just a proxy obtained by estimating three values // for the selectivity of the filter constraining the given columns: -// 1. lb (lower bound): the value returned by multiplying the individual -// conjunct selectivities together, estimated from single-column distinct -// counts. This would be the selectivity of the entire predicate if the -// columns were completely independent. -// 2. ub (upper bound): the lowest single-conjunct selectivity estimated from -// single-column distinct counts. This would be the selectivity of the entire -// predicate if the columns were completely correlated. In other words, this -// would be the selectivity if the value of the column with the most -// selective predicate functionally determined the value of all other -// constrained columns. -// 3. mc (multi-column selectivity): the value returned by estimating the -// predicate selectivity with a combination of single-column and multi-column -// distinct counts. It falls somewhere between upper and lower bound, and -// thus approximates the level of correlation of the columns. +// 1. lb (lower bound): the value returned by multiplying the individual +// conjunct selectivities together, estimated from single-column distinct +// counts. This would be the selectivity of the entire predicate if the +// columns were completely independent. +// 2. ub (upper bound): the lowest single-conjunct selectivity estimated from +// single-column distinct counts. This would be the selectivity of the entire +// predicate if the columns were completely correlated. In other words, this +// would be the selectivity if the value of the column with the most +// selective predicate functionally determined the value of all other +// constrained columns. +// 3. mc (multi-column selectivity): the value returned by estimating the +// predicate selectivity with a combination of single-column and multi-column +// distinct counts. It falls somewhere between upper and lower bound, and +// thus approximates the level of correlation of the columns. // // The "correlation" returned by this function is thus: -// corr = (mc - lb) / (ub - lb) +// +// corr = (mc - lb) / (ub - lb) +// // where -// lb <= mc <= ub +// +// lb <= mc <= ub // // This value will be used to refine the selectivity estimate from single-column // histograms, which do not contain information about column correlations. @@ -4123,14 +4130,13 @@ func (sb *statisticsBuilder) selectivityFromNullsRemoved( // predicateSelectivity calculates the selectivity of a predicate, using the // following formula: // -// sel = (output row count) / (input row count) +// sel = (output row count) / (input row count) // // where // -// output row count = -// (fraction of non-null values preserved) * (number of non-null input rows) + -// (fraction of null values preserved) * (number of null input rows) -// +// output row count = +// (fraction of non-null values preserved) * (number of non-null input rows) + +// (fraction of null values preserved) * (number of null input rows) func (sb *statisticsBuilder) predicateSelectivity( nonNullSelectivity, nullSelectivity props.Selectivity, inputNullCount, inputRowCount float64, ) props.Selectivity { @@ -4249,11 +4255,14 @@ func (sb *statisticsBuilder) selectivityFromOredEquivalencies( // selectivity of an individual ORed predicate. // // Given predicates A and B and probability function P: -// P(A or B) = P(A) + P(B) - P(A and B) +// +// P(A or B) = P(A) + P(B) - P(A and B) +// // Continuation: -// P(A or B or C) = P(A or B) + P(C) - P((A or B) and C) -// P(A or B or C or D) = P(A or B or C) + P(D) - P((A or B or C) and D) -// ... +// +// P(A or B or C) = P(A or B) + P(C) - P((A or B) and C) +// P(A or B or C or D) = P(A or B or C) + P(D) - P((A or B or C) and D) +// ... // // This seems to be a standard approach in the database world. // The textbook solution is more complex: @@ -4261,10 +4270,13 @@ func (sb *statisticsBuilder) selectivityFromOredEquivalencies( // https://stats.stackexchange.com/questions/87533/whats-the-general-disjunction-rule-for-n-events // // Q: Does the iterative approach do a good enough job of approximating -// the non-iterative textbook solution? +// +// the non-iterative textbook solution? // // In the formula we assume A and B are independent, so: -// P(A and B) = P(A) * P(B) +// +// P(A and B) = P(A) * P(B) +// // The independence assumption may not be correct in all cases. // Would using the full formula lead to more errors since the independence // assumption is used in more terms in that formula? @@ -4432,7 +4444,7 @@ func (sb *statisticsBuilder) selectivityFromUnappliedConjuncts( // and the determinant columns each have distinctCount = 1, we should consider // the implied correlations for selectivity calculation. Consider the query: // -// SELECT * FROM customer WHERE id = 123 and name = 'John Smith' +// SELECT * FROM customer WHERE id = 123 and name = 'John Smith' // // If id is the primary key of customer, then name is functionally determined // by id. We only need to consider the selectivity of id, not name, since id @@ -4441,7 +4453,6 @@ func (sb *statisticsBuilder) selectivityFromUnappliedConjuncts( // eliminating columns that can be functionally determined by other columns. // If the distinct count on all of these reduced columns is one, then we return // this reduced column set to be used for selectivity calculation. -// func (sb *statisticsBuilder) tryReduceCols( cols opt.ColSet, s *props.Statistics, fd *props.FuncDepSet, ) opt.ColSet { diff --git a/pkg/sql/opt/memo/typing.go b/pkg/sql/opt/memo/typing.go index 2d9c6e300aca..632b1312b367 100644 --- a/pkg/sql/opt/memo/typing.go +++ b/pkg/sql/opt/memo/typing.go @@ -62,11 +62,13 @@ func InferBinaryType(op opt.Operator, leftType, rightType *types.T) *types.T { // InferWhensType returns the type of a CASE expression, which is // of the form: -// CASE [ ] -// WHEN THEN -// [ WHEN THEN ] ... -// [ ELSE ] -// END +// +// CASE [ ] +// WHEN THEN +// [ WHEN THEN ] ... +// [ ELSE ] +// END +// // All possible values should have the same type, and that is the type of the // case. func InferWhensType(whens ScalarListExpr, orElse opt.ScalarExpr) *types.T { @@ -348,11 +350,13 @@ func typeCoalesce(e opt.ScalarExpr) *types.T { // typeCase returns the type of a CASE expression, which is // of the form: -// CASE [ ] -// WHEN THEN -// [ WHEN THEN ] ... -// [ ELSE ] -// END +// +// CASE [ ] +// WHEN THEN +// [ WHEN THEN ] ... +// [ ELSE ] +// END +// // The type is equal to the type of the WHEN THEN clauses, or // the type of the ELSE value if all the previous types are unknown. func typeCase(e opt.ScalarExpr) *types.T { diff --git a/pkg/sql/opt/memo/typing_test.go b/pkg/sql/opt/memo/typing_test.go index 1ca87ddf0f65..6e26bbc52014 100644 --- a/pkg/sql/opt/memo/typing_test.go +++ b/pkg/sql/opt/memo/typing_test.go @@ -66,8 +66,8 @@ func TestBinaryAllowsNullArgs(t *testing.T) { // TestTypingUnaryAssumptions ensures that unary overloads conform to certain // assumptions we're making in the type inference code: -// 1. The return type can be inferred from the operator type and the data -// types of its operand. +// 1. The return type can be inferred from the operator type and the data +// types of its operand. func TestTypingUnaryAssumptions(t *testing.T) { for name, overloads := range tree.UnaryOps { for i, overload := range overloads { @@ -92,10 +92,10 @@ func TestTypingUnaryAssumptions(t *testing.T) { // TestTypingComparisonAssumptions ensures that comparison overloads conform to // certain assumptions we're making in the type inference code: -// 1. All comparison ops will be present in tree.CmpOps after being mapped -// with NormalizeComparison. -// 2. The overload can be inferred from the operator type and the data -// types of its operands. +// 1. All comparison ops will be present in tree.CmpOps after being mapped +// with NormalizeComparison. +// 2. The overload can be inferred from the operator type and the data +// types of its operands. func TestTypingComparisonAssumptions(t *testing.T) { for _, op := range opt.ComparisonOperators { newOp, _, _ := memo.NormalizeComparison(op) @@ -127,10 +127,10 @@ func TestTypingComparisonAssumptions(t *testing.T) { // TestTypingAggregateAssumptions ensures that aggregate overloads conform to // certain assumptions we're making in the type inference code: -// 1. The return type can be inferred from the operator type and the data -// types of its operand. -// 2. The return type of overloads is fixed. -// 3. The return type for min/max aggregates is same as type of argument. +// 1. The return type can be inferred from the operator type and the data +// types of its operand. +// 2. The return type of overloads is fixed. +// 3. The return type for min/max aggregates is same as type of argument. func TestTypingAggregateAssumptions(t *testing.T) { for _, name := range builtins.AllAggregateBuiltinNames { if name == builtins.AnyNotNull || diff --git a/pkg/sql/opt/metadata.go b/pkg/sql/opt/metadata.go index 8329cada0382..ede02e9d46e3 100644 --- a/pkg/sql/opt/metadata.go +++ b/pkg/sql/opt/metadata.go @@ -49,21 +49,21 @@ type privilegeBitmap uint32 // // For example, consider the query: // -// SELECT x FROM a WHERE y > 0 +// SELECT x FROM a WHERE y > 0 // // There are 2 columns in the above query: x and y. During name resolution, the // above query becomes: // -// SELECT [0] FROM a WHERE [1] > 0 -// -- [0] -> x -// -- [1] -> y +// SELECT [0] FROM a WHERE [1] > 0 +// -- [0] -> x +// -- [1] -> y // // An operator is allowed to reuse some or all of the column ids of an input if: // -// 1. For every output row, there exists at least one input row having identical -// values for those columns. -// 2. OR if no such input row exists, there is at least one output row having -// NULL values for all those columns (e.g. when outer join NULL-extends). +// 1. For every output row, there exists at least one input row having identical +// values for those columns. +// 2. OR if no such input row exists, there is at least one output row having +// NULL values for all those columns (e.g. when outer join NULL-extends). // // For example, is it safe for a Select to use its input's column ids because it // only filters rows. Likewise, pass-through column ids of a Project can be @@ -71,7 +71,7 @@ type privilegeBitmap uint32 // // For an example where columns cannot be reused, consider the query: // -// SELECT * FROM a AS l JOIN a AS r ON (l.x = r.y) +// SELECT * FROM a AS l JOIN a AS r ON (l.x = r.y) // // In this query, `l.x` is not equivalent to `r.x` and `l.y` is not equivalent // to `r.y`. Therefore, we need to give these columns different ids. @@ -562,14 +562,13 @@ func (md *Metadata) ColumnMeta(colID ColumnID) *ColumnMeta { // QualifiedAlias returns the column alias, possibly qualified with the table, // schema, or database name: // -// 1. If fullyQualify is true, then the returned alias is prefixed by the -// original, fully qualified name of the table: tab.Name().FQString(). -// -// 2. If there's another column in the metadata with the same column alias but -// a different table name, then prefix the column alias with the table -// name: "tabName.columnAlias". If alwaysQualify is true, then the column -// alias is always prefixed with the table alias. +// 1. If fullyQualify is true, then the returned alias is prefixed by the +// original, fully qualified name of the table: tab.Name().FQString(). // +// 2. If there's another column in the metadata with the same column alias but +// a different table name, then prefix the column alias with the table +// name: "tabName.columnAlias". If alwaysQualify is true, then the column +// alias is always prefixed with the table alias. func (md *Metadata) QualifiedAlias( colID ColumnID, fullyQualify, alwaysQualify bool, catalog cat.Catalog, ) string { diff --git a/pkg/sql/opt/norm/bool_funcs.go b/pkg/sql/opt/norm/bool_funcs.go index 5c01760c55ff..478b822565db 100644 --- a/pkg/sql/opt/norm/bool_funcs.go +++ b/pkg/sql/opt/norm/bool_funcs.go @@ -27,9 +27,12 @@ func (c *CustomFuncs) ConcatLeftDeepAnds(left, right opt.ScalarExpr) opt.ScalarE } // NegateComparison negates a comparison op like: -// a.x = 5 +// +// a.x = 5 +// // to: -// a.x <> 5 +// +// a.x <> 5 func (c *CustomFuncs) NegateComparison( cmp opt.Operator, left, right opt.ScalarExpr, ) opt.ScalarExpr { @@ -48,11 +51,11 @@ func (c *CustomFuncs) CanNegateComparison(cmp opt.Operator) bool { // whether it appears as a conjunct in the right expression. If so, it returns // the matching conjunct. Otherwise, it returns ok=false. For example: // -// A OR A => A -// B OR A => nil -// A OR (A AND B) => A -// (A AND B) OR (A AND C) => A -// (A AND B AND C) OR (A AND (D OR E)) => A +// A OR A => A +// B OR A => nil +// A OR (A AND B) => A +// (A AND B) OR (A AND C) => A +// (A AND B AND C) OR (A AND (D OR E)) => A // // Once a redundant conjunct has been found, it is extracted via a call to the // ExtractRedundantConjunct function. Redundant conjuncts are extracted from @@ -98,13 +101,13 @@ func (c *CustomFuncs) isConjunct(candidate, conjunction opt.ScalarExpr) bool { // and returns an And of the conjunct with the remaining Or expression (a // logically equivalent expression). For example: // -// (A AND B) OR (A AND C) => A AND (B OR C) +// (A AND B) OR (A AND C) => A AND (B OR C) // // If extracting the conjunct from one of the OR conditions would result in an // empty condition, the conjunct itself is returned (a logically equivalent // expression). For example: // -// A OR (A AND B) => A +// A OR (A AND B) => A // // These transformations are useful for finding a conjunct that can be pushed // down in the query tree. For example, if the redundant conjunct A is fully diff --git a/pkg/sql/opt/norm/comp_funcs.go b/pkg/sql/opt/norm/comp_funcs.go index 3c2bdcec2982..f42da099b45f 100644 --- a/pkg/sql/opt/norm/comp_funcs.go +++ b/pkg/sql/opt/norm/comp_funcs.go @@ -21,9 +21,12 @@ import ( // CommuteInequality swaps the operands of an inequality comparison expression, // changing the operator to compensate: -// 5 < x +// +// 5 < x +// // to: -// x > 5 +// +// x > 5 func (c *CustomFuncs) CommuteInequality( op opt.Operator, left, right opt.ScalarExpr, ) opt.ScalarExpr { @@ -33,9 +36,12 @@ func (c *CustomFuncs) CommuteInequality( // NormalizeTupleEquality remaps the elements of two tuples compared for // equality, like this: -// (a, b, c) = (x, y, z) +// +// (a, b, c) = (x, y, z) +// // into this: -// (a = x) AND (b = y) AND (c = z) +// +// (a = x) AND (b = y) AND (c = z) func (c *CustomFuncs) NormalizeTupleEquality(left, right memo.ScalarListExpr) opt.ScalarExpr { if len(left) != len(right) { panic(errors.AssertionFailedf("tuple length mismatch")) diff --git a/pkg/sql/opt/norm/decorrelate_funcs.go b/pkg/sql/opt/norm/decorrelate_funcs.go index 698fb69c423b..b9d15f2a45a6 100644 --- a/pkg/sql/opt/norm/decorrelate_funcs.go +++ b/pkg/sql/opt/norm/decorrelate_funcs.go @@ -122,14 +122,13 @@ func (c *CustomFuncs) deriveHasHoistableSubquery(scalar opt.ScalarExpr) bool { // subqueries. Any found queries are hoisted into LeftJoinApply or // InnerJoinApply operators, depending on subquery cardinality: // -// SELECT * FROM xy WHERE (SELECT u FROM uv WHERE u=x LIMIT 1) IS NULL -// => -// SELECT xy.* -// FROM xy -// LEFT JOIN LATERAL (SELECT u FROM uv WHERE u=x LIMIT 1) -// ON True -// WHERE u IS NULL -// +// SELECT * FROM xy WHERE (SELECT u FROM uv WHERE u=x LIMIT 1) IS NULL +// => +// SELECT xy.* +// FROM xy +// LEFT JOIN LATERAL (SELECT u FROM uv WHERE u=x LIMIT 1) +// ON True +// WHERE u IS NULL func (c *CustomFuncs) HoistSelectSubquery( input memo.RelExpr, filters memo.FiltersExpr, ) memo.RelExpr { @@ -157,13 +156,12 @@ func (c *CustomFuncs) HoistSelectSubquery( // correlated subqueries. Any found queries are hoisted into LeftJoinApply // or InnerJoinApply operators, depending on subquery cardinality: // -// SELECT (SELECT max(u) FROM uv WHERE u=x) AS max FROM xy -// => -// SELECT max -// FROM xy -// INNER JOIN LATERAL (SELECT max(u) FROM uv WHERE u=x) -// ON True -// +// SELECT (SELECT max(u) FROM uv WHERE u=x) AS max FROM xy +// => +// SELECT max +// FROM xy +// INNER JOIN LATERAL (SELECT max(u) FROM uv WHERE u=x) +// ON True func (c *CustomFuncs) HoistProjectSubquery( input memo.RelExpr, projections memo.ProjectionsExpr, passthrough opt.ColSet, ) memo.RelExpr { @@ -188,22 +186,21 @@ func (c *CustomFuncs) HoistProjectSubquery( // subqueries. Any found queries are hoisted into LeftJoinApply or // InnerJoinApply operators, depending on subquery cardinality: // -// SELECT y, z -// FROM xy -// FULL JOIN yz -// ON (SELECT u FROM uv WHERE u=x LIMIT 1) IS NULL -// => -// SELECT y, z -// FROM xy -// FULL JOIN LATERAL -// ( -// SELECT * -// FROM yz -// LEFT JOIN LATERAL (SELECT u FROM uv WHERE u=x LIMIT 1) -// ON True -// ) -// ON u IS NULL -// +// SELECT y, z +// FROM xy +// FULL JOIN yz +// ON (SELECT u FROM uv WHERE u=x LIMIT 1) IS NULL +// => +// SELECT y, z +// FROM xy +// FULL JOIN LATERAL +// ( +// SELECT * +// FROM yz +// LEFT JOIN LATERAL (SELECT u FROM uv WHERE u=x LIMIT 1) +// ON True +// ) +// ON u IS NULL func (c *CustomFuncs) HoistJoinSubquery( op opt.Operator, left, right memo.RelExpr, on memo.FiltersExpr, private *memo.JoinPrivate, ) memo.RelExpr { @@ -232,18 +229,18 @@ func (c *CustomFuncs) HoistJoinSubquery( // subqueries. Any found queries are hoisted into LeftJoinApply or // InnerJoinApply operators, depending on subquery cardinality: // -// SELECT (VALUES (SELECT u FROM uv WHERE u=x LIMIT 1)) FROM xy -// => -// SELECT -// ( -// SELECT vals.* -// FROM (VALUES ()) -// LEFT JOIN LATERAL (SELECT u FROM uv WHERE u=x LIMIT 1) -// ON True -// INNER JOIN LATERAL (VALUES (u)) vals -// ON True -// ) -// FROM xy +// SELECT (VALUES (SELECT u FROM uv WHERE u=x LIMIT 1)) FROM xy +// => +// SELECT +// ( +// SELECT vals.* +// FROM (VALUES ()) +// LEFT JOIN LATERAL (SELECT u FROM uv WHERE u=x LIMIT 1) +// ON True +// INNER JOIN LATERAL (VALUES (u)) vals +// ON True +// ) +// FROM xy // // The dummy VALUES clause with a singleton empty row is added to the tree in // order to use the hoister, which requires an initial input query. While a @@ -273,25 +270,24 @@ func (c *CustomFuncs) HoistValuesSubquery( // correlated subqueries. Any found queries are hoisted into LeftJoinApply or // InnerJoinApply operators, depending on subquery cardinality: // -// SELECT generate_series -// FROM xy -// INNER JOIN LATERAL ROWS FROM -// ( -// generate_series(1, (SELECT v FROM uv WHERE u=x)) -// ) -// => -// SELECT generate_series -// FROM xy -// ROWS FROM -// ( -// SELECT generate_series -// FROM (VALUES ()) -// LEFT JOIN LATERAL (SELECT v FROM uv WHERE u=x) -// ON True -// INNER JOIN LATERAL ROWS FROM (generate_series(1, v)) -// ON True -// ) -// +// SELECT generate_series +// FROM xy +// INNER JOIN LATERAL ROWS FROM +// ( +// generate_series(1, (SELECT v FROM uv WHERE u=x)) +// ) +// => +// SELECT generate_series +// FROM xy +// ROWS FROM +// ( +// SELECT generate_series +// FROM (VALUES ()) +// LEFT JOIN LATERAL (SELECT v FROM uv WHERE u=x) +// ON True +// INNER JOIN LATERAL ROWS FROM (generate_series(1, v)) +// ON True +// ) func (c *CustomFuncs) HoistProjectSetSubquery(input memo.RelExpr, zip memo.ZipExpr) memo.RelExpr { newZip := make(memo.ZipExpr, 0, len(zip)) @@ -435,11 +431,10 @@ func (c *CustomFuncs) NonKeyCols(in memo.RelExpr) opt.ColSet { // sets of aggregate functions to be added to the resulting Aggregations // operator, with one set appended to the other, like this: // -// (Aggregations -// [(ConstAgg (Variable 1)) (ConstAgg (Variable 2)) (FirstAgg (Variable 3))] -// [1,2,3] -// ) -// +// (Aggregations +// [(ConstAgg (Variable 1)) (ConstAgg (Variable 2)) (FirstAgg (Variable 3))] +// [1,2,3] +// ) func (c *CustomFuncs) MakeAggCols2( aggOp opt.Operator, cols opt.ColSet, aggOp2 opt.Operator, cols2 opt.ColSet, ) memo.AggregationsExpr { @@ -526,9 +521,9 @@ func (c *CustomFuncs) CanaryColSet(canaryCol opt.ColumnID) opt.ColSet { // AggsCanBeDecorrelated returns true if every aggregate satisfies one of the // following conditions: // -// * It is CountRows (because it will be translated into Count), -// * It ignores nulls (because nothing extra must be done for it) -// * It gives NULL on no input (because this is how we translate non-null +// - It is CountRows (because it will be translated into Count), +// - It ignores nulls (because nothing extra must be done for it) +// - It gives NULL on no input (because this is how we translate non-null // ignoring aggregates) // // TODO(justin): we can lift the third condition if we have a function that @@ -769,22 +764,22 @@ func (r *subqueryHoister) input() memo.RelExpr { // JoinApply operator to ensure that it has no effect on the cardinality of its // input. For example: // -// SELECT * -// FROM xy -// WHERE -// (SELECT u FROM uv WHERE u=x LIMIT 1) IS NOT NULL -// OR EXISTS(SELECT * FROM jk WHERE j=x) -// => -// SELECT xy.* -// FROM xy -// LEFT JOIN LATERAL (SELECT u FROM uv WHERE u=x LIMIT 1) -// ON True -// INNER JOIN LATERAL -// ( -// SELECT (CONST_AGG(True) IS NOT NULL) AS exists FROM jk WHERE j=x -// ) -// ON True -// WHERE u IS NOT NULL OR exists +// SELECT * +// FROM xy +// WHERE +// (SELECT u FROM uv WHERE u=x LIMIT 1) IS NOT NULL +// OR EXISTS(SELECT * FROM jk WHERE j=x) +// => +// SELECT xy.* +// FROM xy +// LEFT JOIN LATERAL (SELECT u FROM uv WHERE u=x LIMIT 1) +// ON True +// INNER JOIN LATERAL +// ( +// SELECT (CONST_AGG(True) IS NOT NULL) AS exists FROM jk WHERE j=x +// ) +// ON True +// WHERE u IS NOT NULL OR exists // // The choice of whether to use LeftJoinApply or InnerJoinApply depends on the // cardinality of the hoisted subquery. If zero rows can be returned from the @@ -859,18 +854,18 @@ func (r *subqueryHoister) hoistAll(scalar opt.ScalarExpr) opt.ScalarExpr { // constructGroupByExists transforms a scalar Exists expression like this: // -// EXISTS(SELECT * FROM a WHERE a.x=b.x) +// EXISTS(SELECT * FROM a WHERE a.x=b.x) // // into a scalar GroupBy expression that returns a one row, one column relation: // -// SELECT (CONST_AGG(True) IS NOT NULL) AS exists -// FROM (SELECT * FROM a WHERE a.x=b.x) +// SELECT (CONST_AGG(True) IS NOT NULL) AS exists +// FROM (SELECT * FROM a WHERE a.x=b.x) // // The expression uses an internally-defined CONST_AGG aggregation function, // since it's able to short-circuit on the first non-null it encounters. The // above expression is equivalent to: // -// SELECT COUNT(True) > 0 FROM (SELECT * FROM a WHERE a.x=b.x) +// SELECT COUNT(True) > 0 FROM (SELECT * FROM a WHERE a.x=b.x) // // CONST_AGG (and COUNT) always return exactly one boolean value in the context // of a scalar GroupBy expression. Because its operand is always True, the only @@ -912,23 +907,23 @@ func (r *subqueryHoister) constructGroupByExists(subquery memo.RelExpr) memo.Rel // constructGroupByAny transforms a scalar Any expression like this: // -// z = ANY(SELECT x FROM xy) +// z = ANY(SELECT x FROM xy) // // into a scalar GroupBy expression that returns a one row, one column relation // that is equivalent to this: // -// SELECT -// CASE -// WHEN bool_or(notnull) AND z IS NOT Null THEN True -// ELSE bool_or(notnull) IS NULL THEN False -// ELSE Null -// END -// FROM -// ( -// SELECT x IS NOT Null AS notnull -// FROM xy -// WHERE (z=x) IS NOT False -// ) +// SELECT +// CASE +// WHEN bool_or(notnull) AND z IS NOT Null THEN True +// ELSE bool_or(notnull) IS NULL THEN False +// ELSE Null +// END +// FROM +// ( +// SELECT x IS NOT Null AS notnull +// FROM xy +// WHERE (z=x) IS NOT False +// ) // // BOOL_OR returns true if any input is true, else false if any input is false, // else null. This is a mismatch with ANY, which returns true if any input is @@ -938,46 +933,46 @@ func (r *subqueryHoister) constructGroupByExists(subquery memo.RelExpr) memo.Rel // are difficult to hoist above left joins). The following procedure solves the // mismatch between BOOL_OR and ANY, as well as avoids correlated projections: // -// 1. Filter out false comparison rows with an initial filter. The result of -// ANY does not change, no matter how many false rows are added or removed. -// This step has the effect of mapping a set containing only false -// comparison rows to the empty set (which is desirable). +// 1. Filter out false comparison rows with an initial filter. The result of +// ANY does not change, no matter how many false rows are added or removed. +// This step has the effect of mapping a set containing only false +// comparison rows to the empty set (which is desirable). // -// 2. Step #1 leaves only true and null comparison rows. A null comparison row -// occurs when either the left or right comparison operand is null (Any -// only allows comparison operators that propagate nulls). Map each null -// row to a false row, but only in the case where the right operand is null -// (i.e. the operand that came from the subquery). The case where the left -// operand is null will be handled later. +// 2. Step #1 leaves only true and null comparison rows. A null comparison row +// occurs when either the left or right comparison operand is null (Any +// only allows comparison operators that propagate nulls). Map each null +// row to a false row, but only in the case where the right operand is null +// (i.e. the operand that came from the subquery). The case where the left +// operand is null will be handled later. // -// 3. Use the BOOL_OR aggregation function on the true/false values from step -// #2. If there is at least one true value, then BOOL_OR returns true. If -// there are no values (the empty set case), then BOOL_OR returns null. -// Because of the previous steps, this indicates that the original set -// contained only false values (or no values at all). +// 3. Use the BOOL_OR aggregation function on the true/false values from step +// #2. If there is at least one true value, then BOOL_OR returns true. If +// there are no values (the empty set case), then BOOL_OR returns null. +// Because of the previous steps, this indicates that the original set +// contained only false values (or no values at all). // -// 4. A True result from BOOL_OR is ambiguous. It could mean that the -// comparison returned true for one of the rows in the group. Or, it could -// mean that the left operand was null. The CASE statement ensures that -// True is only returned if the left operand was not null. +// 4. A True result from BOOL_OR is ambiguous. It could mean that the +// comparison returned true for one of the rows in the group. Or, it could +// mean that the left operand was null. The CASE statement ensures that +// True is only returned if the left operand was not null. // -// 5. In addition, the CASE statement maps a null return value to false, and -// false to null. This matches ANY behavior. +// 5. In addition, the CASE statement maps a null return value to false, and +// false to null. This matches ANY behavior. // // The following is a table showing the various interesting cases: // -// | subquery | before | after | after -// z | x values | BOOL_OR | BOOL_OR | CASE -// ------+-----------+---------------+---------+------- -// 1 | (1) | (true) | true | true -// 1 | (1, null) | (true, false) | true | true -// 1 | (1, 2) | (true) | true | true -// 1 | (null) | (false) | false | null -// null | (1) | (true) | true | null -// null | (1, null) | (true, false) | true | null -// null | (null) | (false) | false | null -// 2 | (1) | (empty) | null | false -// *any* | (empty) | (empty) | null | false +// | subquery | before | after | after +// z | x values | BOOL_OR | BOOL_OR | CASE +// ------+-----------+---------------+---------+------- +// 1 | (1) | (true) | true | true +// 1 | (1, null) | (true, false) | true | true +// 1 | (1, 2) | (true) | true | true +// 1 | (null) | (false) | false | null +// null | (1) | (true) | true | null +// null | (1, null) | (true, false) | true | null +// null | (null) | (false) | false | null +// 2 | (1) | (empty) | null | false +// *any* | (empty) | (empty) | null | false // // It is important that the set given to BOOL_OR does not contain any null // values (the reason for step #2). Null is reserved for use by the diff --git a/pkg/sql/opt/norm/factory.go b/pkg/sql/opt/norm/factory.go index dfd4487bc221..e2ff8d4eb5f6 100644 --- a/pkg/sql/opt/norm/factory.go +++ b/pkg/sql/opt/norm/factory.go @@ -246,17 +246,17 @@ func (f *Factory) EvalContext() *eval.Context { // // Sample usage: // -// var replaceFn ReplaceFunc -// replaceFn = func(e opt.Expr) opt.Expr { -// if e.Op() == opt.PlaceholderOp { -// return f.ConstructConst(evalPlaceholder(e)) -// } +// var replaceFn ReplaceFunc +// replaceFn = func(e opt.Expr) opt.Expr { +// if e.Op() == opt.PlaceholderOp { +// return f.ConstructConst(evalPlaceholder(e)) +// } // -// // Copy e, calling replaceFn on its inputs recursively. -// return f.CopyAndReplaceDefault(e, replaceFn) -// } +// // Copy e, calling replaceFn on its inputs recursively. +// return f.CopyAndReplaceDefault(e, replaceFn) +// } // -// f.CopyAndReplace(from, fromProps, replaceFn) +// f.CopyAndReplace(from, fromProps, replaceFn) // // NOTE: Callers must take care to always create brand new copies of non- // singleton source nodes rather than referencing existing nodes. The source diff --git a/pkg/sql/opt/norm/fold_constants_funcs.go b/pkg/sql/opt/norm/fold_constants_funcs.go index dc79c74e7715..4fac21b82cfc 100644 --- a/pkg/sql/opt/norm/fold_constants_funcs.go +++ b/pkg/sql/opt/norm/fold_constants_funcs.go @@ -41,7 +41,7 @@ import ( // // Examples illustrating the various cases: // -// 1) Prepare and execute query with placeholders +// 1. Prepare and execute query with placeholders // // SELECT * FROM t WHERE time > now() - $1 // @@ -51,7 +51,7 @@ import ( // folded, along with the subtraction. If we have an index on time, we will // use it. // -// 2) Prepare and execute query without placeholders +// 2. Prepare and execute query without placeholders // // SELECT * FROM t WHERE time > now() - '1 minute'::INTERVAL // @@ -63,7 +63,7 @@ import ( // placeholders here, but AssignPlaceholders will nevertheless recreate the // expression, allowing folding to happen. // -// 3) Execute query without placeholders +// 3. Execute query without placeholders // // SELECT * FROM t WHERE time > now() - '1 minute'::INTERVAL // @@ -73,7 +73,6 @@ import ( // the plan cache. In the future, we may want to detect queries that are // re-executed frequently and cache a non-folded version like in the prepare // case. -// type FoldingControl struct { // allowStable controls whether canFoldOperator returns true or false for // volatility.Stable. @@ -406,20 +405,20 @@ func (c *CustomFuncs) FoldAssignmentCast( // TO is monotonic. // That is, if a and b are values of type FROM, then // -// 1. a = b implies a::TO = b::TO and -// 2. a < b implies a::TO <= b::TO +// 1. a = b implies a::TO = b::TO and +// 2. a < b implies a::TO <= b::TO // // Property (1) can be violated by cases like: // -// '-0'::FLOAT = '0'::FLOAT, but '-0'::FLOAT::STRING != '0'::FLOAT::STRING +// '-0'::FLOAT = '0'::FLOAT, but '-0'::FLOAT::STRING != '0'::FLOAT::STRING // // Property (2) can be violated by cases like: // -// 2 < 10, but 2::STRING > 10::STRING. +// 2 < 10, but 2::STRING > 10::STRING. // // Note that the stronger version of (2), // -// a < b implies a::TO < b::TO +// a < b implies a::TO < b::TO // // is not required, for instance this is not generally true of conversion from // a TIMESTAMP to a DATE, but certain such conversions can still generate spans @@ -557,9 +556,9 @@ func (c *CustomFuncs) FoldColumnAccess( // to Null when any of its arguments are Null. A function can be folded to Null // in this case if all of the following are true: // -// 1. It is not evaluated when any of its arguments are null -// (CalledOnNullInput=false). -// 2. It is a normal function, not an aggregate, window, or generator. +// 1. It is not evaluated when any of its arguments are null +// (CalledOnNullInput=false). +// 2. It is a normal function, not an aggregate, window, or generator. // // See FoldFunctionWithNullArg for more details. func (c *CustomFuncs) CanFoldFunctionWithNullArg(private *memo.FunctionPrivate) bool { diff --git a/pkg/sql/opt/norm/general_funcs.go b/pkg/sql/opt/norm/general_funcs.go index ceecadfe56e8..4ed4e4bcf22c 100644 --- a/pkg/sql/opt/norm/general_funcs.go +++ b/pkg/sql/opt/norm/general_funcs.go @@ -304,7 +304,7 @@ func (c *CustomFuncs) OuterCols(e opt.Expr) opt.ColSet { // column, or in other words, a reference to a variable that is not bound within // its own scope. For example: // -// SELECT * FROM a WHERE EXISTS(SELECT * FROM b WHERE b.x = a.x) +// SELECT * FROM a WHERE EXISTS(SELECT * FROM b WHERE b.x = a.x) // // The a.x variable in the EXISTS subquery references a column outside the scope // of the subquery. It is an "outer column" for the subquery (see the comment on @@ -315,11 +315,12 @@ func (c *CustomFuncs) HasOuterCols(input opt.Expr) bool { // IsCorrelated returns true if any variable in the source expression references // a column from the given set of output columns. For example: -// (InnerJoin -// (Scan a) -// (Scan b) -// [ ... (FiltersItem $item:(Eq (Variable a.x) (Const 1))) ... ] -// ) +// +// (InnerJoin +// (Scan a) +// (Scan b) +// [ ... (FiltersItem $item:(Eq (Variable a.x) (Const 1))) ... ] +// ) // // The $item expression is correlated with the (Scan a) expression because it // references one of its columns. But the $item expression is not correlated @@ -331,11 +332,11 @@ func (c *CustomFuncs) IsCorrelated(src memo.RelExpr, cols opt.ColSet) bool { // IsBoundBy returns true if all outer references in the source expression are // bound by the given columns. For example: // -// (InnerJoin -// (Scan a) -// (Scan b) -// [ ... $item:(FiltersItem (Eq (Variable a.x) (Const 1))) ... ] -// ) +// (InnerJoin +// (Scan a) +// (Scan b) +// [ ... $item:(FiltersItem (Eq (Variable a.x) (Const 1))) ... ] +// ) // // The $item expression is fully bound by the output columns of the (Scan a) // expression because all of its outer references are satisfied by the columns @@ -388,11 +389,11 @@ func (c *CustomFuncs) FilterOuterCols(filters memo.FiltersExpr) opt.ColSet { // FiltersBoundBy returns true if all outer references in any of the filter // conditions are bound by the given columns. For example: // -// (InnerJoin -// (Scan a) -// (Scan b) -// $filters:[ (FiltersItem (Eq (Variable a.x) (Const 1))) ] -// ) +// (InnerJoin +// (Scan a) +// (Scan b) +// $filters:[ (FiltersItem (Eq (Variable a.x) (Const 1))) ] +// ) // // The $filters expression is fully bound by the output columns of the (Scan a) // expression because all of its outer references are satisfied by the columns @@ -689,14 +690,14 @@ func (c *CustomFuncs) ReplaceFiltersItem( // from the given list that are fully bound by the given columns (i.e. all // outer references are to one of these columns). For example: // -// (InnerJoin -// (Scan a) -// (Scan b) -// (Filters [ -// (Eq (Variable a.x) (Variable b.x)) -// (Gt (Variable a.x) (Const 1)) -// ]) -// ) +// (InnerJoin +// (Scan a) +// (Scan b) +// (Filters [ +// (Eq (Variable a.x) (Variable b.x)) +// (Gt (Variable a.x) (Const 1)) +// ]) +// ) // // Calling ExtractBoundConditions with the filter conditions list and the output // columns of (Scan a) would extract the (Gt) expression, since its outer @@ -883,11 +884,10 @@ func (c *CustomFuncs) AppendAggCols( // function of the given operator type for each of column in the given set. For // example, for ConstAggOp and columns (1,2), this expression is returned: // -// (Aggregations -// [(ConstAgg (Variable 1)) (ConstAgg (Variable 2))] -// [1,2] -// ) -// +// (Aggregations +// [(ConstAgg (Variable 1)) (ConstAgg (Variable 2))] +// [1,2] +// ) func (c *CustomFuncs) MakeAggCols(aggOp opt.Operator, cols opt.ColSet) memo.AggregationsExpr { colsLen := cols.Len() aggs := make(memo.AggregationsExpr, colsLen) @@ -963,8 +963,7 @@ func (c *CustomFuncs) IsPositiveInt(datum tree.Datum) bool { // For example, NormalizeCmpTimeZoneFunction uses this function implicitly to // match a specific function, like so: // -// (Function $args:* (FunctionPrivate "timezone")) -// +// (Function $args:* (FunctionPrivate "timezone")) func (c *CustomFuncs) EqualsString(left string, right string) bool { return left == right } diff --git a/pkg/sql/opt/norm/groupby_funcs.go b/pkg/sql/opt/norm/groupby_funcs.go index fcf22cc17ae7..d87b5f6838e5 100644 --- a/pkg/sql/opt/norm/groupby_funcs.go +++ b/pkg/sql/opt/norm/groupby_funcs.go @@ -36,12 +36,11 @@ func (c *CustomFuncs) RemoveGroupingCols( // aggregates are written into outElems and outColList. As an example, for // columns (1,2) and operator ConstAggOp, makeAggCols will set the following: // -// outElems[0] = (ConstAggOp (Variable 1)) -// outElems[1] = (ConstAggOp (Variable 2)) -// -// outColList[0] = 1 -// outColList[1] = 2 +// outElems[0] = (ConstAggOp (Variable 1)) +// outElems[1] = (ConstAggOp (Variable 2)) // +// outColList[0] = 1 +// outColList[1] = 2 func (c *CustomFuncs) makeAggCols( aggOp opt.Operator, cols opt.ColSet, outAggs memo.AggregationsExpr, ) { @@ -298,9 +297,9 @@ func (c *CustomFuncs) SingleRegressionCountArgument( // CanMergeAggs returns true if one of the following applies to each of the // given outer aggregation expressions: -// 1. The aggregation can be merged with a single inner aggregation. -// 2. The aggregation takes an inner grouping column as input and ignores -// duplicates. +// 1. The aggregation can be merged with a single inner aggregation. +// 2. The aggregation takes an inner grouping column as input and ignores +// duplicates. func (c *CustomFuncs) CanMergeAggs( innerAggs, outerAggs memo.AggregationsExpr, innerGroupingCols opt.ColSet, ) bool { diff --git a/pkg/sql/opt/norm/inline_funcs.go b/pkg/sql/opt/norm/inline_funcs.go index 42be5d747875..d9f7210211d7 100644 --- a/pkg/sql/opt/norm/inline_funcs.go +++ b/pkg/sql/opt/norm/inline_funcs.go @@ -84,7 +84,7 @@ func (c *CustomFuncs) inlineConstants( // one time, or if the projection expressions contain a correlated subquery. // For example: // -// SELECT x+1, x+2, y FROM a +// SELECT x+1, x+2, y FROM a // // HasDuplicateRefs would be true, since the x column is referenced twice. // @@ -189,9 +189,8 @@ func (c *CustomFuncs) VirtualColumns(scanPrivate *memo.ScanPrivate) opt.ColSet { // InlinableVirtualColumnFilters returns a new filters expression containing any // of the given filters that meet the criteria: // -// 1. The filter has references to any of the columns in virtualColumns. -// 2. The filter is not a correlated subquery. -// +// 1. The filter has references to any of the columns in virtualColumns. +// 2. The filter is not a correlated subquery. func (c *CustomFuncs) InlinableVirtualColumnFilters( filters memo.FiltersExpr, virtualColumns opt.ColSet, ) (inlinableFilters memo.FiltersExpr) { @@ -309,9 +308,12 @@ func (c *CustomFuncs) extractVarEqualsConst( // CanInlineConstVar returns true if there is an opportunity in the filters to // inline a variable restricted to be a constant, as in: -// SELECT * FROM foo WHERE a = 4 AND a IN (1, 2, 3, 4). +// +// SELECT * FROM foo WHERE a = 4 AND a IN (1, 2, 3, 4). +// // => -// SELECT * FROM foo WHERE a = 4 AND 4 IN (1, 2, 3, 4). +// +// SELECT * FROM foo WHERE a = 4 AND 4 IN (1, 2, 3, 4). func (c *CustomFuncs) CanInlineConstVar(f memo.FiltersExpr) bool { // usedIndices tracks the set of filter indices we've used to infer constant // values, so we don't inline into them. diff --git a/pkg/sql/opt/norm/join_funcs.go b/pkg/sql/opt/norm/join_funcs.go index 83bf2e6a609b..2e0ca82fb080 100644 --- a/pkg/sql/opt/norm/join_funcs.go +++ b/pkg/sql/opt/norm/join_funcs.go @@ -51,7 +51,7 @@ func (c *CustomFuncs) ConstructNonLeftJoin( // SimplifyNotNullEquality simplifies an expression of the following form: // -// (Is | IsNot (Eq) (True | False | Null)) +// (Is | IsNot (Eq) (True | False | Null)) // // in the case where the Eq expression is guaranteed to never result in null. // The testOp argument must be IsOp or IsNotOp, and the constOp argument must be @@ -186,15 +186,14 @@ func (c *CustomFuncs) MapJoinOpEqualities( // there is a single condition with one left column and one right column. // For example, consider this query: // -// SELECT * FROM a, b WHERE a.x = b.x AND a.x = a.y AND a.y = b.y +// SELECT * FROM a, b WHERE a.x = b.x AND a.x = a.y AND a.y = b.y // // It has an equivalence group {a.x, a.y, b.x, b.y}. The columns a.x and a.y // are on the left side, and b.x and b.y are on the right side. Initially there // are two conditions that cross both sides. After mapping, the query would be // converted to: // -// SELECT * FROM a, b WHERE a.x = a.y AND b.x = b.y AND a.x = b.x -// +// SELECT * FROM a, b WHERE a.x = a.y AND b.x = b.y AND a.x = b.x func (c *CustomFuncs) mapJoinOpEquivalenceGroup( filters memo.FiltersExpr, col opt.ColumnID, @@ -267,7 +266,7 @@ func (c *CustomFuncs) mapJoinOpEquivalenceGroup( // // For example, consider this query: // -// SELECT * FROM a INNER JOIN b ON a.x=b.x AND a.x + b.y = 5 +// SELECT * FROM a INNER JOIN b ON a.x=b.x AND a.x + b.y = 5 // // Since there is an equality predicate on a.x=b.x, it is possible to map // a.x + b.y = 5 to b.x + b.y = 5, and that allows the filter to be pushed down @@ -319,7 +318,7 @@ func (c *CustomFuncs) CanMapJoinOpFilter( // // For example, consider this query: // -// SELECT * FROM a INNER JOIN b ON a.x=b.x AND a.x + b.y = 5 +// SELECT * FROM a INNER JOIN b ON a.x=b.x AND a.x + b.y = 5 // // If MapJoinOpFilter is called with src as a.x + b.y = 5 and dst as (Scan b), // it returns b.x + b.y = 5. MapJoinOpFilter should not be called with the @@ -390,16 +389,16 @@ func (c *CustomFuncs) MapJoinOpFilter( // In general, replacing composite columns with "equivalent" (equal) columns // might change the result of an expression. For example, consider this query: // -// SELECT * FROM -// (VALUES (1.0)) AS t1(x), -// (VALUES (1.00)) AS t2(y) -// WHERE x=y AND x::text = '1.0'; +// SELECT * FROM +// (VALUES (1.0)) AS t1(x), +// (VALUES (1.00)) AS t2(y) +// WHERE x=y AND x::text = '1.0'; // // It should return the following result: // -// x | y -// -----+------ -// 1.0 | 1.00 +// x | y +// -----+------ +// 1.0 | 1.00 // // But if we use the equality predicate x=y to map x to y and infer an // additional filter y::text = '1.0', the query would return nothing. diff --git a/pkg/sql/opt/norm/norm_test.go b/pkg/sql/opt/norm/norm_test.go index 9456a87af104..622cdb6ea3d8 100644 --- a/pkg/sql/opt/norm/norm_test.go +++ b/pkg/sql/opt/norm/norm_test.go @@ -26,17 +26,19 @@ import ( // TestNormRules tests the various Optgen normalization rules found in the rules // directory. The tests are data-driven cases of the form: -// -// -// ---- -// +// +// +// +// ---- +// // // See OptTester.Handle for supported commands. // // Rules files can be run separately like this: -// make test PKG=./pkg/sql/opt/norm TESTS="TestNormRules/bool" -// make test PKG=./pkg/sql/opt/norm TESTS="TestNormRules/comp" -// ... +// +// make test PKG=./pkg/sql/opt/norm TESTS="TestNormRules/bool" +// make test PKG=./pkg/sql/opt/norm TESTS="TestNormRules/comp" +// ... func TestNormRules(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) @@ -55,8 +57,9 @@ func TestNormRules(t *testing.T) { } // TestRuleProps files can be run separately like this: -// make test PKG=./pkg/sql/opt/norm TESTS="TestNormRuleProps/orderings" -// ... +// +// make test PKG=./pkg/sql/opt/norm TESTS="TestNormRuleProps/orderings" +// ... func TestNormRuleProps(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) diff --git a/pkg/sql/opt/norm/project_builder.go b/pkg/sql/opt/norm/project_builder.go index 610ee6f156bc..4b4c5d5ff9d9 100644 --- a/pkg/sql/opt/norm/project_builder.go +++ b/pkg/sql/opt/norm/project_builder.go @@ -18,14 +18,13 @@ import ( // projectBuilder is a helper for constructing a ProjectOp that augments an // input with new synthesized and passthrough columns. Sample usage: // -// var pb projectBuilder -// pb.init(c, passthrough) -// e1 := pb.add(some expression) -// e2 := pb.add(some other expression) -// augmentedInput := pb.buildProject(input) -// // e1 and e2 are VariableOp expressions, with input columns -// // produced by augmentedInput. -// +// var pb projectBuilder +// pb.init(c, passthrough) +// e1 := pb.add(some expression) +// e2 := pb.add(some other expression) +// augmentedInput := pb.buildProject(input) +// // e1 and e2 are VariableOp expressions, with input columns +// // produced by augmentedInput. type projectBuilder struct { c *CustomFuncs projections memo.ProjectionsExpr diff --git a/pkg/sql/opt/norm/project_funcs.go b/pkg/sql/opt/norm/project_funcs.go index 8df893528dbd..fc2747d549d2 100644 --- a/pkg/sql/opt/norm/project_funcs.go +++ b/pkg/sql/opt/norm/project_funcs.go @@ -64,10 +64,9 @@ func (c *CustomFuncs) MergeProjections( // in the final Values operator, and Project synthesized columns are added to // it. Any unreferenced Values columns are discarded. For example: // -// SELECT column1, 3 FROM (VALUES (1, 2)) -// => -// (VALUES (1, 3)) -// +// SELECT column1, 3 FROM (VALUES (1, 2)) +// => +// (VALUES (1, 3)) func (c *CustomFuncs) MergeProjectWithValues( projections memo.ProjectionsExpr, passthrough opt.ColSet, input memo.RelExpr, ) memo.RelExpr { @@ -103,8 +102,8 @@ func (c *CustomFuncs) MergeProjectWithValues( // CanUnnestTuplesFromValues returns true if the given single-column Values // operator has tuples that can be unfolded into multiple columns. // This is the case if: -// 1. The single output column is of type tuple. -// 2. All tuples in the single column are either TupleExpr's or ConstExpr's +// 1. The single output column is of type tuple. +// 2. All tuples in the single column are either TupleExpr's or ConstExpr's // that wrap DTuples, as opposed to dynamically generated tuples. func (c *CustomFuncs) CanUnnestTuplesFromValues(values *memo.ValuesExpr) bool { colTypeFam := c.mem.Metadata().ColumnMeta(values.Cols[0]).Type.Family() @@ -177,14 +176,13 @@ func (c *CustomFuncs) MakeColsForUnnestTuples(tupleColID opt.ColumnID) opt.ColLi // Values operator with the tuple expanded out into the Values rows. // For example, these rows: // -// ((1, 2),) -// ((3, 4),) +// ((1, 2),) +// ((3, 4),) // // would be unnested as: // -// (1, 2) -// (3, 4) -// +// (1, 2) +// (3, 4) func (c *CustomFuncs) UnnestTuplesFromValues( values *memo.ValuesExpr, valuesCols opt.ColList, ) memo.RelExpr { @@ -258,6 +256,7 @@ func (c *CustomFuncs) FoldTupleColumnAccess( // within the JSON object can be referenced. // 2. All JSON keys referenced by the projections are present in the first row. // 3. All JSON keys present in the first row are present in all other rows. +// // CanUnnestJSONFromValues should only be called if the Values operator has a // single column and at least one row. // @@ -452,10 +451,11 @@ func (c *CustomFuncs) UnnestJSONFromValues( // replaced by new Variables wrapping columns from the output of the given // ValuesExpr. Example: // -// SELECT j->'a' AS j_a FROM ... +// SELECT j->'a' AS j_a FROM ... +// // => -// SELECT j_a FROM ... // +// SELECT j_a FROM ... func (c *CustomFuncs) FoldJSONFieldAccess( projections memo.ProjectionsExpr, newCols opt.ColList, @@ -535,7 +535,6 @@ func (c *CustomFuncs) MakeColsForUnnestJSON( // 1. The ProjectionsItem remaps an output column from the given ValuesExpr. // // 2. The Values output column being remapped is not in the passthrough set. -// func (c *CustomFuncs) CanPushColumnRemappingIntoValues( projections memo.ProjectionsExpr, passthrough opt.ColSet, values *memo.ValuesExpr, ) bool { @@ -557,22 +556,25 @@ func (c *CustomFuncs) CanPushColumnRemappingIntoValues( // // Example: // project -// ├── columns: x:2!null -// ├── values -// │ ├── columns: column1:1!null -// │ ├── cardinality: [2 - 2] -// │ ├── (1,) -// │ └── (2,) -// └── projections -// └── column1:1 [as=x:2, outer=(1)] +// +// ├── columns: x:2!null +// ├── values +// │ ├── columns: column1:1!null +// │ ├── cardinality: [2 - 2] +// │ ├── (1,) +// │ └── (2,) +// └── projections +// └── column1:1 [as=x:2, outer=(1)] +// // => // project -// ├── columns: x:2!null -// └── values -// ├── columns: x:2!null -// ├── cardinality: [2 - 2] -// ├── (1,) -// └── (2,) +// +// ├── columns: x:2!null +// └── values +// ├── columns: x:2!null +// ├── cardinality: [2 - 2] +// ├── (1,) +// └── (2,) // // This allows other rules to fire. In the above example, EliminateProject can // now remove the Project altogether. @@ -696,30 +698,33 @@ func (c *CustomFuncs) AssignmentCastCols(projections memo.ProjectionsExpr) opt.C // Example: // // project -// ├── columns: x:2 y:3 -// ├── values -// │ ├── columns: column1:1 -// │ ├── cardinality: [2 - 2] -// │ ├── (1,) -// │ └── (2,) -// └── projections -// ├── assignment-cast: STRING [as=x:2] -// │ └── column1:1 -// └── 'foo' [as=y:3] +// +// ├── columns: x:2 y:3 +// ├── values +// │ ├── columns: column1:1 +// │ ├── cardinality: [2 - 2] +// │ ├── (1,) +// │ └── (2,) +// └── projections +// ├── assignment-cast: STRING [as=x:2] +// │ └── column1:1 +// └── 'foo' [as=y:3] +// // => // project -// ├── columns: x:2 y:3 -// ├── values -// │ ├── columns: x:2 -// │ ├── cardinality: [2 - 2] -// │ ├── tuple -// │ │ └── assignment-cast: STRING -// │ │ └── 1 -// │ └── tuple -// │ └── assignment-cast: STRING -// │ └── 2 -// └── projections -// └── 'foo' [as=y:3] +// +// ├── columns: x:2 y:3 +// ├── values +// │ ├── columns: x:2 +// │ ├── cardinality: [2 - 2] +// │ ├── tuple +// │ │ └── assignment-cast: STRING +// │ │ └── 1 +// │ └── tuple +// │ └── assignment-cast: STRING +// │ └── 2 +// └── projections +// └── 'foo' [as=y:3] // // This allows other rules to fire, with the ultimate goal of eliminating the // project so that the insert fast-path optimization is used in more cases and @@ -822,13 +827,13 @@ func extractAssignmentCastInputColAndTargetType( // ConstExpr wrapping a DTuple. Expressions within a static tuple can be // determined during planning: // -// (1, 2) -// (x, y) +// (1, 2) +// (x, y) // // By contrast, expressions within a dynamic tuple can only be determined at // run-time: // -// SELECT (SELECT (x, y) FROM xy) +// SELECT (SELECT (x, y) FROM xy) // // Here, if there are 0 rows in xy, the tuple value will be NULL. Or, if there // is more than one row in xy, a dynamic error will be raised. diff --git a/pkg/sql/opt/norm/project_set_funcs.go b/pkg/sql/opt/norm/project_set_funcs.go index a0aa56baaaa2..ed697195bffd 100644 --- a/pkg/sql/opt/norm/project_set_funcs.go +++ b/pkg/sql/opt/norm/project_set_funcs.go @@ -173,13 +173,13 @@ func (c *CustomFuncs) ConstructValuesFromZips(zip memo.ZipExpr) memo.RelExpr { // wraps a DArray or an ArrayExpr. The complete set of expressions within a // static array can be determined during planning: // -// ARRAY[1,2] -// ARRAY[x,y] +// ARRAY[1,2] +// ARRAY[x,y] // // By contrast, expressions within a dynamic array can only be determined at // run-time: // -// SELECT (SELECT array_agg(x) FROM xy) +// SELECT (SELECT array_agg(x) FROM xy) // // Here, the length of the array is only known at run-time. func (c *CustomFuncs) IsStaticArray(scalar opt.ScalarExpr) bool { diff --git a/pkg/sql/opt/norm/reject_nulls_funcs.go b/pkg/sql/opt/norm/reject_nulls_funcs.go index 45309bed9c2c..69a143d544d6 100644 --- a/pkg/sql/opt/norm/reject_nulls_funcs.go +++ b/pkg/sql/opt/norm/reject_nulls_funcs.go @@ -65,10 +65,10 @@ func (c *CustomFuncs) NullRejectAggVar( // of the first projection that is referenced by nullRejectCols. A column is // only null-rejected if: // -// 1. It is in the RejectNullCols ColSet of the input expression (null -// rejection has been requested) +// 1. It is in the RejectNullCols ColSet of the input expression (null +// rejection has been requested) // -// 2. A NULL in the column implies that the projection will also be NULL. +// 2. A NULL in the column implies that the projection will also be NULL. // // NullRejectProjections panics if no such projection is found. func (c *CustomFuncs) NullRejectProjections( @@ -213,19 +213,12 @@ func DeriveRejectNullCols(in memo.RelExpr, disabledRules util.FastIntSet) opt.Co // eligible for null rejection. If an aggregate input column has requested null // rejection, then pass along its request if the following criteria are met: // -// 1. The aggregate function ignores null values, meaning that its value -// would not change if input null values are filtered. -// -// 2. The aggregate function returns null if its input is empty. And since -// by #1, the presence of nulls does not alter the result, the aggregate -// function would return null if its input contains only null values. -// -// 3. No other input columns are referenced by other aggregate functions in -// the GroupBy (all functions must refer to the same column), with the -// possible exception of ConstAgg. A ConstAgg aggregate can be safely -// ignored because all rows in each group must have the same value for this -// column, so it doesn't matter which rows are filtered. +// 1. The aggregate function ignores null values, meaning that its value +// would not change if input null values are filtered. // +// 2. The aggregate function returns null if its input is empty. And since +// by #1, the presence of nulls does not alter the result, the aggregate +// function would return null if its input contains only null values. func deriveGroupByRejectNullCols(in memo.RelExpr, disabledRules util.FastIntSet) opt.ColSet { input := in.Child(0).(memo.RelExpr) aggs := *in.Child(1).(*memo.AggregationsExpr) @@ -303,16 +296,8 @@ func (c *CustomFuncs) MakeNullRejectFilters(nullRejectCols opt.ColSet) memo.Filt // RejectNullCols set of the input can be null-rejected. In addition, projected // columns can also be null-rejected when: // -// 1. The projection "transmits" nulls - it returns NULL when one or more of -// its inputs is NULL. -// -// 2. One or more of the projection's input columns are in the RejectNullCols -// ColSet of the input expression. Note that this condition is not strictly -// necessary in order for a null-rejecting filter to be pushed down, but it -// ensures that filters are only pushed down when they are requested by a -// child operator (for example, an outer join that may be simplified). This -// prevents filters from getting in the way of other rules. -// +// 1. The projection "transmits" nulls - it returns NULL when one or more of +// its inputs is NULL. func deriveProjectRejectNullCols(in memo.RelExpr, disabledRules util.FastIntSet) opt.ColSet { rejectNullCols := DeriveRejectNullCols(in.Child(0).(memo.RelExpr), disabledRules) projections := *in.Child(1).(*memo.ProjectionsExpr) diff --git a/pkg/sql/opt/norm/window_funcs.go b/pkg/sql/opt/norm/window_funcs.go index 63be7248e5f9..fddf31323b86 100644 --- a/pkg/sql/opt/norm/window_funcs.go +++ b/pkg/sql/opt/norm/window_funcs.go @@ -54,17 +54,18 @@ func (c *CustomFuncs) MakeSegmentedOrdering( // precisely the property that lets us push limit operators below window // functions: // -// (Limit (Window $input) n) = (Window (Limit $input n)) +// (Limit (Window $input) n) = (Window (Limit $input n)) // // Note that the frame affects whether a given window function is prefix-safe or not. // rank() is prefix-safe under any frame, but avg(): -// * is not prefix-safe under RANGE BETWEEN UNBOUNDED PRECEDING TO CURRENT ROW -// (the default), because we might cut off mid-peer group. If we can -// guarantee that the ordering is over a key, then this becomes safe. -// * is not prefix-safe under ROWS BETWEEN UNBOUNDED PRECEDING TO UNBOUNDED -// FOLLOWING, because it needs to look at the entire partition. -// * is prefix-safe under ROWS BETWEEN UNBOUNDED PRECEDING TO CURRENT ROW, -// because it only needs to look at the rows up to any given row. +// - is not prefix-safe under RANGE BETWEEN UNBOUNDED PRECEDING TO CURRENT ROW +// (the default), because we might cut off mid-peer group. If we can +// guarantee that the ordering is over a key, then this becomes safe. +// - is not prefix-safe under ROWS BETWEEN UNBOUNDED PRECEDING TO UNBOUNDED +// FOLLOWING, because it needs to look at the entire partition. +// - is prefix-safe under ROWS BETWEEN UNBOUNDED PRECEDING TO CURRENT ROW, +// because it only needs to look at the rows up to any given row. +// // (We don't currently handle this case). // // This function is best-effort. It's OK to report a function not as diff --git a/pkg/sql/opt/norm/with_funcs.go b/pkg/sql/opt/norm/with_funcs.go index 7ec5a46a8615..06e6b583df47 100644 --- a/pkg/sql/opt/norm/with_funcs.go +++ b/pkg/sql/opt/norm/with_funcs.go @@ -17,9 +17,9 @@ import ( // CanInlineWith returns whether or not it's valid to inline binding in expr. // This is the case when materialize is explicitly set to false, or when: -// 1. binding has no volatile expressions (because once it's inlined, there's no -// guarantee it will be executed fully), and -// 2. binding is referenced at most once in expr. +// 1. binding has no volatile expressions (because once it's inlined, there's no +// guarantee it will be executed fully), and +// 2. binding is referenced at most once in expr. func (c *CustomFuncs) CanInlineWith(binding, expr memo.RelExpr, private *memo.WithPrivate) bool { // If materialization is set, ignore the checks below. if private.Mtr.Set { diff --git a/pkg/sql/opt/opbench/config.go b/pkg/sql/opt/opbench/config.go index e64494afb772..5a5870e73def 100644 --- a/pkg/sql/opt/opbench/config.go +++ b/pkg/sql/opt/opbench/config.go @@ -91,7 +91,7 @@ func (it *ConfigIterator) Next() (Configuration, bool) { // value for each "slot". So if the first option has 2 choices and the second // has 3, the increment process goes like: // -// [0 0] => [1 0] => [0 1] => [1 1] => [0 2] => [1 2] => done. +// [0 0] => [1 0] => [0 1] => [1 1] => [0 2] => [1 2] => done. func (it *ConfigIterator) increment() { i := 0 for i < len(it.options) { diff --git a/pkg/sql/opt/operator.go b/pkg/sql/opt/operator.go index 3b053a126f91..7e865e521e23 100644 --- a/pkg/sql/opt/operator.go +++ b/pkg/sql/opt/operator.go @@ -57,13 +57,12 @@ func (op Operator) SyntaxTag() string { // or more children, and an optional private value. The entire tree can be // easily visited using a pattern like this: // -// var visit func(e Expr) -// visit := func(e Expr) { -// for i, n := 0, e.ChildCount(); i < n; i++ { -// visit(e.Child(i)) -// } -// } -// +// var visit func(e Expr) +// visit := func(e Expr) { +// for i, n := 0, e.ChildCount(); i < n; i++ { +// visit(e.Child(i)) +// } +// } type Expr interface { // Op returns the operator type of the expression. Op() Operator @@ -305,15 +304,15 @@ func BoolOperatorRequiresNotNullArgs(op Operator) bool { // rows where its first argument evaluates to NULL. In other words, it always // evaluates to the same result even if those rows are filtered. For example: // -// SELECT string_agg(x, y) -// FROM (VALUES ('foo', ','), ('bar', ','), (NULL, ',')) t(x, y) +// SELECT string_agg(x, y) +// FROM (VALUES ('foo', ','), ('bar', ','), (NULL, ',')) t(x, y) // // In this example, the NULL row can be removed from the input, and the // string_agg function still returns the same result. Contrast this to the // array_agg function: // -// SELECT array_agg(x) -// FROM (VALUES ('foo'), (NULL), ('bar')) t(x) +// SELECT array_agg(x) +// FROM (VALUES ('foo'), (NULL), ('bar')) t(x) // // If the NULL row is removed here, array_agg returns {foo,bar} instead of // {foo,NULL,bar}. @@ -409,11 +408,11 @@ func AggregateIsNeverNull(op Operator) bool { // words, the inner-outer aggregate pair forms a valid "decomposition" of a // single aggregate. For example, the following pairs of queries are equivalent: // -// SELECT sum(s) FROM (SELECT sum(y) FROM xy GROUP BY x) AS f(s); -// SELECT sum(y) FROM xy; +// SELECT sum(s) FROM (SELECT sum(y) FROM xy GROUP BY x) AS f(s); +// SELECT sum(y) FROM xy; // -// SELECT sum_int(c) FROM (SELECT count(y) FROM xy GROUP BY x) AS f(c); -// SELECT count(y) FROM xy; +// SELECT sum_int(c) FROM (SELECT count(y) FROM xy GROUP BY x) AS f(c); +// SELECT count(y) FROM xy; // // Note: some aggregates like StringAggOp are decomposable in theory, but in // practice can not be easily merged as in the examples above. diff --git a/pkg/sql/opt/optbuilder/arbiter_set.go b/pkg/sql/opt/optbuilder/arbiter_set.go index f0a7575cf05d..b738b87d7c68 100644 --- a/pkg/sql/opt/optbuilder/arbiter_set.go +++ b/pkg/sql/opt/optbuilder/arbiter_set.go @@ -105,7 +105,6 @@ func (a *arbiterSet) ContainsUniqueConstraint(uniq cat.UniqueOrdinal) bool { // pred is nil. // - canaryOrd is the table column ordinal of a not-null column in the // constraint's table. -// func (a *arbiterSet) ForEach( f func(name string, conflictOrds util.FastIntSet, pred tree.Expr, canaryOrd int), ) { @@ -150,14 +149,14 @@ func (a *arbiterSet) removeIndex(idx cat.IndexOrdinal) { // unique constraints. It is only useful when an ON CONFLICT statement specifies // no columns or constraints. For example, consider the table and statement: // -// CREATE TABLE t ( -// a INT, -// b INT, -// UNIQUE INDEX a_b_key (a, b), -// UNIQUE WITHOUT INDEX b_key (b) -// ) +// CREATE TABLE t ( +// a INT, +// b INT, +// UNIQUE INDEX a_b_key (a, b), +// UNIQUE WITHOUT INDEX b_key (b) +// ) // -// INSERT INTO t VALUES (1, 2) ON CONFLICT DO NOTHING +// INSERT INTO t VALUES (1, 2) ON CONFLICT DO NOTHING // // There is no need to use both a_b_key and b_key as arbiters for the INSERT // statement because any conflict in a_b_key will also be a conflict in b_key. @@ -230,10 +229,9 @@ func (m *minArbiterSet) ArbiterSet() arbiterSet { // the unique constraint. An arbiter index is redundant if both of the following // hold: // -// 1. Its conflict columns are a super set of the given conflict columns. -// 2. The index and unique constraint are both non-partial, or have the same -// partial predicate. -// +// 1. Its conflict columns are a super set of the given conflict columns. +// 2. The index and unique constraint are both non-partial, or have the same +// partial predicate. func (m *minArbiterSet) findRedundantIndex( uniq cat.UniqueConstraint, ) (_ cat.IndexOrdinal, ok bool) { diff --git a/pkg/sql/opt/optbuilder/builder.go b/pkg/sql/opt/optbuilder/builder.go index b8214666e992..8b47e01aacb3 100644 --- a/pkg/sql/opt/optbuilder/builder.go +++ b/pkg/sql/opt/optbuilder/builder.go @@ -260,18 +260,21 @@ func (b *Builder) buildStmtAtRoot(stmt tree.Statement, desiredTypes []*types.T) // statement. // // NOTE: The following descriptions of the inScope parameter and outScope -// return value apply for all buildXXX() functions in this directory. -// Note that some buildXXX() functions pass outScope as a parameter -// rather than a return value so its scopeColumns can be built up -// incrementally across several function calls. +// +// return value apply for all buildXXX() functions in this directory. +// Note that some buildXXX() functions pass outScope as a parameter +// rather than a return value so its scopeColumns can be built up +// incrementally across several function calls. // // inScope This parameter contains the name bindings that are visible for this -// statement/expression (e.g., passed in from an enclosing statement). +// +// statement/expression (e.g., passed in from an enclosing statement). // // outScope This return value contains the newly bound variables that will be -// visible to enclosing statements, as well as a pointer to any -// "parent" scope that is still visible. The top-level memo expression -// for the built statement/expression is returned in outScope.expr. +// +// visible to enclosing statements, as well as a pointer to any +// "parent" scope that is still visible. The top-level memo expression +// for the built statement/expression is returned in outScope.expr. func (b *Builder) buildStmt( stmt tree.Statement, desiredTypes []*types.T, inScope *scope, ) (outScope *scope) { diff --git a/pkg/sql/opt/optbuilder/builder_test.go b/pkg/sql/opt/optbuilder/builder_test.go index c13c41b2c803..74ba63b4ec4f 100644 --- a/pkg/sql/opt/optbuilder/builder_test.go +++ b/pkg/sql/opt/optbuilder/builder_test.go @@ -33,25 +33,25 @@ import ( ) // TestBuilder runs data-driven testcases of the form -// []... -// -// ---- -// +// +// []... +// +// ---- +// // // See OptTester.Handle for supported commands. In addition to those, we // support: // -// - build-scalar [args] -// -// Builds a memo structure from a SQL scalar expression and outputs a -// representation of the "expression view" of the memo structure. +// - build-scalar [args] // -// The supported args (in addition to the ones supported by OptTester): +// Builds a memo structure from a SQL scalar expression and outputs a +// representation of the "expression view" of the memo structure. // -// - vars=(var1 type1, var2 type2,...) +// The supported args (in addition to the ones supported by OptTester): // -// Information about columns that the scalar expression can refer to. +// - vars=(var1 type1, var2 type2,...) // +// Information about columns that the scalar expression can refer to. func TestBuilder(t *testing.T) { defer leaktest.AfterTest(t)() diff --git a/pkg/sql/opt/optbuilder/fk_cascade.go b/pkg/sql/opt/optbuilder/fk_cascade.go index 2036c1b4133b..ebf0d25c04c9 100644 --- a/pkg/sql/opt/optbuilder/fk_cascade.go +++ b/pkg/sql/opt/optbuilder/fk_cascade.go @@ -33,28 +33,27 @@ import ( // It provides a method to build the cascading delete in the child table, // equivalent to a query like: // -// DELETE FROM child WHERE fk IN (SELECT fk FROM original_mutation_input) +// DELETE FROM child WHERE fk IN (SELECT fk FROM original_mutation_input) // // The input to the mutation is a semi-join of the table with the mutation // input: // -// delete child -// └── semi-join (hash) -// ├── columns: c:5!null child.p:6!null -// ├── scan child -// │ └── columns: c:5!null child.p:6!null -// ├── with-scan &1 -// │ ├── columns: p:7!null -// │ └── mapping: -// │ └── parent.p:2 => p:7 -// └── filters -// └── child.p:6 = p:7 +// delete child +// └── semi-join (hash) +// ├── columns: c:5!null child.p:6!null +// ├── scan child +// │ └── columns: c:5!null child.p:6!null +// ├── with-scan &1 +// │ ├── columns: p:7!null +// │ └── mapping: +// │ └── parent.p:2 => p:7 +// └── filters +// └── child.p:6 = p:7 // // Note that NULL values in the mutation input don't require any special // handling - they will be effectively ignored by the semi-join. // // See testdata/fk-on-delete-cascades for more examples. -// type onDeleteCascadeBuilder struct { mutatedTable cat.Table // fkInboundOrdinal is the ordinal of the inbound foreign key constraint on @@ -121,23 +120,23 @@ func (cb *onDeleteCascadeBuilder) Build( // It provides a method to build the cascading delete in the child table, // equivalent to a query like: // -// DELETE FROM child WHERE AND fk IS NOT NULL +// DELETE FROM child WHERE AND fk IS NOT NULL // // The input to the mutation is a Select on top of a Scan. For example: // // ── delete child -// ├── columns: -// ├── fetch columns: c:8 child.p:9 -// └── select -// ├── columns: c:8!null child.p:9!null -// ├── scan child -// │ └── columns: c:8!null child.p:9!null -// └── filters -// ├── child.p:9 > 1 -// └── child.p:9 IS DISTINCT FROM CAST(NULL AS INT8) // -// See testdata/fk-on-delete-cascades for more examples. +// ├── columns: +// ├── fetch columns: c:8 child.p:9 +// └── select +// ├── columns: c:8!null child.p:9!null +// ├── scan child +// │ └── columns: c:8!null child.p:9!null +// └── filters +// ├── child.p:9 > 1 +// └── child.p:9 IS DISTINCT FROM CAST(NULL AS INT8) // +// See testdata/fk-on-delete-cascades for more examples. type onDeleteFastCascadeBuilder struct { mutatedTable cat.Table // fkInboundOrdinal is the ordinal of the inbound foreign key constraint on @@ -360,39 +359,38 @@ func (cb *onDeleteFastCascadeBuilder) Build( // It provides a method to build the cascading delete in the child table, // equivalent to a query like: // -// UPDATE SET fk = NULL FROM child WHERE fk IN (SELECT fk FROM original_mutation_input) -// or -// UPDATE SET fk = DEFAULT FROM child WHERE fk IN (SELECT fk FROM original_mutation_input) +// UPDATE SET fk = NULL FROM child WHERE fk IN (SELECT fk FROM original_mutation_input) +// or +// UPDATE SET fk = DEFAULT FROM child WHERE fk IN (SELECT fk FROM original_mutation_input) // // The input to the mutation is a semi-join of the table with the mutation // input: // -// update child -// ├── columns: -// ├── fetch columns: c:5 child.p:6 -// ├── update-mapping: -// │ └── column8:8 => child.p:4 -// └── project -// ├── columns: column8:8 c:5!null child.p:6 -// ├── semi-join (hash) -// │ ├── columns: c:5!null child.p:6 -// │ ├── scan child -// │ │ └── columns: c:5!null child.p:6 -// │ ├── with-scan &1 -// │ │ ├── columns: p:7!null -// │ │ └── mapping: -// │ │ └── parent.p:2 => p:7 -// │ └── filters -// │ └── child.p:6 = p:7 -// └── projections -// └── NULL::INT8 [as=column8:8] +// update child +// ├── columns: +// ├── fetch columns: c:5 child.p:6 +// ├── update-mapping: +// │ └── column8:8 => child.p:4 +// └── project +// ├── columns: column8:8 c:5!null child.p:6 +// ├── semi-join (hash) +// │ ├── columns: c:5!null child.p:6 +// │ ├── scan child +// │ │ └── columns: c:5!null child.p:6 +// │ ├── with-scan &1 +// │ │ ├── columns: p:7!null +// │ │ └── mapping: +// │ │ └── parent.p:2 => p:7 +// │ └── filters +// │ └── child.p:6 = p:7 +// └── projections +// └── NULL::INT8 [as=column8:8] // // Note that NULL values in the mutation input don't require any special // handling - they will be effectively ignored by the semi-join. // // See testdata/fk-on-delete-set-null and fk-on-delete-set-default for more // examples. -// type onDeleteSetBuilder struct { mutatedTable cat.Table // fkInboundOrdinal is the ordinal of the inbound foreign key constraint on @@ -490,20 +488,19 @@ func (cb *onDeleteSetBuilder) Build( // For example, if we have a child table with foreign key on p, the expression // will look like this: // -// semi-join (hash) -// ├── columns: c:5!null child.p:6!null -// ├── scan child -// │ └── columns: c:5!null child.p:6!null -// ├── with-scan &1 -// │ ├── columns: p:7!null -// │ └── mapping: -// │ └── parent.p:2 => p:7 -// └── filters -// └── child.p:6 = p:7 +// semi-join (hash) +// ├── columns: c:5!null child.p:6!null +// ├── scan child +// │ └── columns: c:5!null child.p:6!null +// ├── with-scan &1 +// │ ├── columns: p:7!null +// │ └── mapping: +// │ └── parent.p:2 => p:7 +// └── filters +// └── child.p:6 = p:7 // // Note that NULL values in the mutation input don't require any special // handling - they will be effectively ignored by the semi-join. -// func (b *Builder) buildDeleteCascadeMutationInput( childTable cat.Table, childTableAlias *tree.TableName, @@ -571,34 +568,34 @@ func (b *Builder) buildDeleteCascadeMutationInput( // It provides a method to build the cascading update in the child table, // equivalent to a query like: // -// UPDATE child SET fk = fk_new_val -// FROM (SELECT fk_old_val, fk_new_val FROM original_mutation_input) -// WHERE fk_old_val IS DISTINCT FROM fk_new_val AND fk = fk_old_val +// UPDATE child SET fk = fk_new_val +// FROM (SELECT fk_old_val, fk_new_val FROM original_mutation_input) +// WHERE fk_old_val IS DISTINCT FROM fk_new_val AND fk = fk_old_val // // The input to the mutation is an inner-join of the table with the mutation // input, producing the old and new FK values for each row: // -// update child -// ├── columns: -// ├── fetch columns: c:6 child.p:7 -// ├── update-mapping: -// │ └── p_new:9 => child.p:5 -// ├── input binding: &2 -// └─── inner-join (hash) -// ├── columns: c:6!null child.p:7!null p:8!null p_new:9!null -// ├── scan child -// │ └── columns: c:6!null child.p:7!null -// ├── select -// │ ├── columns: p:8!null p_new:9!null -// │ ├── with-scan &1 -// │ │ ├── columns: p:8!null p_new:9!null -// │ │ └── mapping: -// │ │ ├── parent.p:2 => p:8 -// │ │ └── p_new:3 => p_new:9 -// │ └── filters -// │ └── p:8 IS DISTINCT FROM p_new:9 -// └── filters -// └── child.p:7 = p:8 +// update child +// ├── columns: +// ├── fetch columns: c:6 child.p:7 +// ├── update-mapping: +// │ └── p_new:9 => child.p:5 +// ├── input binding: &2 +// └─── inner-join (hash) +// ├── columns: c:6!null child.p:7!null p:8!null p_new:9!null +// ├── scan child +// │ └── columns: c:6!null child.p:7!null +// ├── select +// │ ├── columns: p:8!null p_new:9!null +// │ ├── with-scan &1 +// │ │ ├── columns: p:8!null p_new:9!null +// │ │ └── mapping: +// │ │ ├── parent.p:2 => p:8 +// │ │ └── p_new:3 => p_new:9 +// │ └── filters +// │ └── p:8 IS DISTINCT FROM p_new:9 +// └── filters +// └── child.p:7 = p:8 // // The inner join equality columns form a key in the with-scan (because they // form a key in the parent table); so the inner-join is essentially equivalent @@ -608,7 +605,6 @@ func (b *Builder) buildDeleteCascadeMutationInput( // handling - they will be effectively ignored by the join. // // See testdata/fk-on-update-* for more examples. -// type onUpdateCascadeBuilder struct { mutatedTable cat.Table // fkInboundOrdinal is the ordinal of the inbound foreign key constraint on @@ -703,21 +699,21 @@ func (cb *onUpdateCascadeBuilder) Build( // For example, if we have a child table with foreign key on p, the expression // will look like this: // -// inner-join (hash) -// ├── columns: c:6!null child.p:7!null p:8!null p_new:9!null -// ├── scan child -// │ └── columns: c:6!null child.p:7!null -// ├── select -// │ ├── columns: p:8!null p_new:9!null -// │ ├── with-scan &1 -// │ │ ├── columns: p:8!null p_new:9!null -// │ │ └── mapping: -// │ │ ├── parent.p:2 => p:8 -// │ │ └── p_new:3 => p_new:9 -// │ └── filters -// │ └── p:8 IS DISTINCT FROM p_new:9 -// └── filters -// └── child.p:7 = p:8 +// inner-join (hash) +// ├── columns: c:6!null child.p:7!null p:8!null p_new:9!null +// ├── scan child +// │ └── columns: c:6!null child.p:7!null +// ├── select +// │ ├── columns: p:8!null p_new:9!null +// │ ├── with-scan &1 +// │ │ ├── columns: p:8!null p_new:9!null +// │ │ └── mapping: +// │ │ ├── parent.p:2 => p:8 +// │ │ └── p_new:3 => p_new:9 +// │ └── filters +// │ └── p:8 IS DISTINCT FROM p_new:9 +// └── filters +// └── child.p:7 = p:8 // // The inner join equality columns form a key in the with-scan (because they // form a key in the parent table); so the inner-join is essentially equivalent @@ -739,7 +735,6 @@ func (cb *onUpdateCascadeBuilder) Build( // inserted rows the "old" values are all NULL and won't match anything in the // inner-join anyway. This reasoning is very similar to that of FK checks for // Upserts (see buildFKChecksForUpsert). -// func (b *Builder) buildUpdateCascadeMutationInput( childTable cat.Table, childTableAlias *tree.TableName, diff --git a/pkg/sql/opt/optbuilder/groupby.go b/pkg/sql/opt/optbuilder/groupby.go index f2533bf2c510..a4dcf96f2333 100644 --- a/pkg/sql/opt/optbuilder/groupby.go +++ b/pkg/sql/opt/optbuilder/groupby.go @@ -108,12 +108,11 @@ type groupby struct { // expression in the SELECT list must be a GROUP BY expression or be composed // of GROUP BY expressions. For example, this query is legal: // -// SELECT COUNT(*), k + v FROM kv GROUP by k, v +// SELECT COUNT(*), k + v FROM kv GROUP by k, v // // but this query is not: // -// SELECT COUNT(*), k + v FROM kv GROUP BY k - v -// +// SELECT COUNT(*), k + v FROM kv GROUP BY k - v type groupByStrSet map[string]*scopeColumn // hasNonCommutativeAggregates checks whether any of the aggregates are @@ -485,9 +484,11 @@ func (b *Builder) buildHaving(having tree.TypedExpr, fromScope *scope) opt.Scala // // groupBy The given GROUP BY expressions. // selects The select expressions are needed in case one of the GROUP BY -// expressions is an index into to the select list. For example, -// SELECT count(*), k FROM t GROUP BY 2 -// indicates that the grouping is on the second select expression, k. +// +// expressions is an index into to the select list. For example, +// SELECT count(*), k FROM t GROUP BY 2 +// indicates that the grouping is on the second select expression, k. +// // fromScope The scope for the input to the aggregation (the FROM clause). func (b *Builder) buildGroupingList( groupBy tree.GroupBy, selects tree.SelectExprs, projectionsScope *scope, fromScope *scope, @@ -517,16 +518,22 @@ func (b *Builder) buildGroupingList( // expression. The expression (or expressions, if we have a star) is added to // groupStrs and to the aggInScope. // -// // groupBy The given GROUP BY expression. // selects The select expressions are needed in case the GROUP BY -// expression is an index into to the select list. +// +// expression is an index into to the select list. +// // projectionsScope The scope that contains the columns for the SELECT targets -// (used when GROUP BY refers to a target by alias). +// +// (used when GROUP BY refers to a target by alias). +// // fromScope The scope for the input to the aggregation (the FROM -// clause). +// +// clause). +// // aggInScope The scope that will contain the grouping expressions as well -// as the aggregate function arguments. +// +// as the aggregate function arguments. func (b *Builder) buildGrouping( groupBy tree.Expr, selects tree.SelectExprs, projectionsScope, fromScope, aggInScope *scope, ) { @@ -674,10 +681,10 @@ func translateAggName(name string) string { // aggregate function are extracted and added to aggInScope. The aggregate // function expression itself is added to aggOutScope. For example: // -// SELECT SUM(x+1) FROM xy -// => -// aggInScope : x+1 AS column1 -// aggOutScope: SUM(column1) +// SELECT SUM(x+1) FROM xy +// => +// aggInScope : x+1 AS column1 +// aggOutScope: SUM(column1) // // buildAggregateFunction returns a pointer to the aggregateInfo containing // the function definition, fully built arguments, and the aggregate output diff --git a/pkg/sql/opt/optbuilder/insert.go b/pkg/sql/opt/optbuilder/insert.go index 59aedf22836d..d5e3c1c2200e 100644 --- a/pkg/sql/opt/optbuilder/insert.go +++ b/pkg/sql/opt/optbuilder/insert.go @@ -37,7 +37,7 @@ const duplicateUpsertErrText = "UPSERT or INSERT...ON CONFLICT command cannot af // cannot be inserted due to a conflict, the "excluded" data source contains // that row, so that its columns can be referenced in the conflict clause: // -// INSERT INTO ab VALUES (1, 2) ON CONFLICT (a) DO UPDATE b=excluded.b+1 +// INSERT INTO ab VALUES (1, 2) ON CONFLICT (a) DO UPDATE b=excluded.b+1 // // It is located in the special crdb_internal schema so that it never overlaps // with user data sources. @@ -55,45 +55,45 @@ func init() { // begin, an input expression is constructed which outputs these columns to // insert into the target table: // -// 1. Columns explicitly specified by the user in SELECT or VALUES expression. +// 1. Columns explicitly specified by the user in SELECT or VALUES expression. // -// 2. Columns not specified by the user, but having a default value declared -// in schema (or being nullable). +// 2. Columns not specified by the user, but having a default value declared +// in schema (or being nullable). // -// 3. Computed columns. +// 3. Computed columns. // -// 4. Mutation columns which are being added or dropped by an online schema -// change. +// 4. Mutation columns which are being added or dropped by an online schema +// change. // // buildInsert starts by constructing the input expression, and then wraps it // with Project operators which add default, computed, and mutation columns. The // final input expression will project values for all columns in the target // table. For example, if this is the schema and INSERT statement: // -// CREATE TABLE abcd ( -// a INT PRIMARY KEY, -// b INT, -// c INT DEFAULT(10), -// d INT AS (b+c) STORED -// ) -// INSERT INTO abcd (a) VALUES (1) +// CREATE TABLE abcd ( +// a INT PRIMARY KEY, +// b INT, +// c INT DEFAULT(10), +// d INT AS (b+c) STORED +// ) +// INSERT INTO abcd (a) VALUES (1) // // Then an input expression equivalent to this would be built: // -// SELECT ins_a, ins_b, ins_c, ins_b + ins_c AS ins_d -// FROM (VALUES (1, NULL, 10)) AS t(ins_a, ins_b, ins_c) +// SELECT ins_a, ins_b, ins_c, ins_b + ins_c AS ins_d +// FROM (VALUES (1, NULL, 10)) AS t(ins_a, ins_b, ins_c) // // If an ON CONFLICT clause is present (or if it was an UPSERT statement), then // additional columns are added to the input expression: // -// 1. Columns containing existing values fetched from the target table and -// used to detect conflicts and to formulate the key/value update commands. +// 1. Columns containing existing values fetched from the target table and +// used to detect conflicts and to formulate the key/value update commands. // -// 2. Columns containing updated values to set when a conflict is detected, as -// specified by the user. +// 2. Columns containing updated values to set when a conflict is detected, as +// specified by the user. // -// 3. Computed columns which will be updated when a conflict is detected and -// that are dependent on one or more updated columns. +// 3. Computed columns which will be updated when a conflict is detected and +// that are dependent on one or more updated columns. // // A LEFT OUTER JOIN associates each row to insert with the corresponding // existing row (#1 above). If the row does not exist, then the existing columns @@ -129,26 +129,26 @@ func init() { // Putting it all together, if this is the schema and INSERT..ON CONFLICT // statement: // -// CREATE TABLE abc (a INT PRIMARY KEY, b INT, c INT) -// INSERT INTO abc VALUES (1, 2), (1, 3) ON CONFLICT (a) DO UPDATE SET b=10 +// CREATE TABLE abc (a INT PRIMARY KEY, b INT, c INT) +// INSERT INTO abc VALUES (1, 2), (1, 3) ON CONFLICT (a) DO UPDATE SET b=10 // // Then an input expression roughly equivalent to this would be built (note that // the DISTINCT ON is really the UpsertDistinctOn operator, which behaves a bit // differently than the DistinctOn operator): // -// SELECT -// fetch_a, -// fetch_b, -// fetch_c, -// CASE WHEN fetch_a IS NULL ins_a ELSE fetch_a END AS ups_a, -// CASE WHEN fetch_a IS NULL ins_b ELSE 10 END AS ups_b, -// CASE WHEN fetch_a IS NULL ins_c ELSE fetch_c END AS ups_c, -// FROM ( -// SELECT DISTINCT ON (ins_a) * -// FROM (VALUES (1, 2, NULL), (1, 3, NULL)) AS ins(ins_a, ins_b, ins_c) -// ) -// LEFT OUTER JOIN abc AS fetch(fetch_a, fetch_b, fetch_c) -// ON ins_a = fetch_a +// SELECT +// fetch_a, +// fetch_b, +// fetch_c, +// CASE WHEN fetch_a IS NULL ins_a ELSE fetch_a END AS ups_a, +// CASE WHEN fetch_a IS NULL ins_b ELSE 10 END AS ups_b, +// CASE WHEN fetch_a IS NULL ins_c ELSE fetch_c END AS ups_c, +// FROM ( +// SELECT DISTINCT ON (ins_a) * +// FROM (VALUES (1, 2, NULL), (1, 3, NULL)) AS ins(ins_a, ins_b, ins_c) +// ) +// LEFT OUTER JOIN abc AS fetch(fetch_a, fetch_b, fetch_c) +// ON ins_a = fetch_a // // Here, the fetch_a column has been designated as the canary column, since it // is NOT NULL in the schema. It is used as the CASE condition to decide between @@ -163,16 +163,16 @@ func init() { // input has no duplicates, and an ANTI JOIN to check whether a conflict exists. // For example: // -// CREATE TABLE ab (a INT PRIMARY KEY, b INT) -// INSERT INTO ab (a, b) VALUES (1, 2), (1, 3) ON CONFLICT DO NOTHING +// CREATE TABLE ab (a INT PRIMARY KEY, b INT) +// INSERT INTO ab (a, b) VALUES (1, 2), (1, 3) ON CONFLICT DO NOTHING // // Then an input expression roughly equivalent to this would be built: // -// SELECT x, y -// FROM (SELECT DISTINCT ON (x) * FROM (VALUES (1, 2), (1, 3))) AS input(x, y) -// WHERE NOT EXISTS( -// SELECT ab.a WHERE input.x = ab.a -// ) +// SELECT x, y +// FROM (SELECT DISTINCT ON (x) * FROM (VALUES (1, 2), (1, 3))) AS input(x, y) +// WHERE NOT EXISTS( +// SELECT ab.a WHERE input.x = ab.a +// ) // // Note that an ordered input to the INSERT does not provide any guarantee about // the order in which mutations are applied, or the order of any returned rows @@ -348,13 +348,13 @@ func (b *Builder) buildInsert(ins *tree.Insert, inScope *scope) (outScope *scope // fetch existing rows, and then the KV Put operation can be used to blindly // insert a new record or overwrite an existing record. This is possible when: // -// 1. There are no secondary indexes. Existing values are needed to delete -// secondary index rows when the update causes them to move. -// 2. There are no implicit partitioning columns in the primary index. -// 3. All non-key columns (including mutation columns) have insert and update -// values specified for them. -// 4. Each update value is the same as the corresponding insert value. -// 5. There are no inbound foreign keys containing non-key columns. +// 1. There are no secondary indexes. Existing values are needed to delete +// secondary index rows when the update causes them to move. +// 2. There are no implicit partitioning columns in the primary index. +// 3. All non-key columns (including mutation columns) have insert and update +// values specified for them. +// 4. Each update value is the same as the corresponding insert value. +// 5. There are no inbound foreign keys containing non-key columns. // // TODO(andyk): The fast path is currently only enabled when the UPSERT alias // is explicitly selected by the user. It's possible to fast path some queries @@ -461,14 +461,14 @@ func (mb *mutationBuilder) checkPrimaryKeyForInsert() { // Alternatively, all columns can be unspecified. If neither condition is true, // checkForeignKeys raises an error. Here is an example: // -// CREATE TABLE orders ( -// id INT, -// cust_id INT, -// state STRING, -// FOREIGN KEY (cust_id, state) REFERENCES customers (id, state) MATCH FULL -// ) +// CREATE TABLE orders ( +// id INT, +// cust_id INT, +// state STRING, +// FOREIGN KEY (cust_id, state) REFERENCES customers (id, state) MATCH FULL +// ) // -// INSERT INTO orders (cust_id) VALUES (1) +// INSERT INTO orders (cust_id) VALUES (1) // // This INSERT statement would trigger a static error, because only cust_id is // specified in the INSERT statement. Either the state column must be specified @@ -528,7 +528,7 @@ func (mb *mutationBuilder) checkForeignKeysForInsert() { // used when the target columns are not explicitly specified in the INSERT // statement: // -// INSERT INTO t VALUES (1, 2, 3) +// INSERT INTO t VALUES (1, 2, 3) // // In this example, the first three columns of table t would be added as target // columns. @@ -824,11 +824,11 @@ func (mb *mutationBuilder) buildInputForUpsert( // setUpsertCols sets the list of columns to be updated in case of conflict. // There are two cases to handle: // -// 1. Target columns are explicitly specified: -// UPSERT INTO abc (col1, col2, ...) +// 1. Target columns are explicitly specified: +// UPSERT INTO abc (col1, col2, ...) // -// 2. Target columns are implicitly derived: -// UPSERT INTO abc +// 2. Target columns are implicitly derived: +// UPSERT INTO abc // // In case #1, only the columns that were specified by the user will be updated. // In case #2, all non-mutation columns in the table will be updated. @@ -837,9 +837,9 @@ func (mb *mutationBuilder) buildInputForUpsert( // updated. This can have an impact in unusual cases where equal SQL values have // different representations. For example: // -// CREATE TABLE abc (a DECIMAL PRIMARY KEY, b DECIMAL) -// INSERT INTO abc VALUES (1, 2.0) -// UPSERT INTO abc VALUES (1.0, 2) +// CREATE TABLE abc (a DECIMAL PRIMARY KEY, b DECIMAL) +// INSERT INTO abc VALUES (1, 2.0) +// UPSERT INTO abc VALUES (1.0, 2) // // The UPSERT statement will update the value of column "b" from 2 => 2.0, but // will not modify column "a". @@ -919,16 +919,16 @@ func (mb *mutationBuilder) buildUpsert(returning tree.ReturningExprs) { // inserted into the target table, or else used to update an existing row, // depending on whether the canary column is null. For example: // -// UPSERT INTO ab VALUES (ins_a, ins_b) ON CONFLICT (a) DO UPDATE SET b=upd_b +// UPSERT INTO ab VALUES (ins_a, ins_b) ON CONFLICT (a) DO UPDATE SET b=upd_b // // will cause the columns to be projected: // -// SELECT -// fetch_a, -// fetch_b, -// CASE WHEN fetch_a IS NULL ins_a ELSE fetch_a END AS ups_a, -// CASE WHEN fetch_b IS NULL ins_b ELSE upd_b END AS ups_b, -// FROM (SELECT ins_a, ins_b, upd_b, fetch_a, fetch_b FROM ...) +// SELECT +// fetch_a, +// fetch_b, +// CASE WHEN fetch_a IS NULL ins_a ELSE fetch_a END AS ups_a, +// CASE WHEN fetch_b IS NULL ins_b ELSE upd_b END AS ups_b, +// FROM (SELECT ins_a, ins_b, upd_b, fetch_a, fetch_b FROM ...) // // For each column, a CASE expression is created that toggles between the insert // and update values depending on whether the canary column is null. These diff --git a/pkg/sql/opt/optbuilder/join.go b/pkg/sql/opt/optbuilder/join.go index be3869704f25..32423e44b00e 100644 --- a/pkg/sql/opt/optbuilder/join.go +++ b/pkg/sql/opt/optbuilder/join.go @@ -240,10 +240,11 @@ func (b *Builder) constructJoin( // // With NATURAL JOIN or JOIN USING (a,b,c,...), SQL allows us to refer to the // columns a,b,c directly; these columns have the following semantics: -// a = IFNULL(left.a, right.a) -// b = IFNULL(left.b, right.b) -// c = IFNULL(left.c, right.c) -// ... +// +// a = IFNULL(left.a, right.a) +// b = IFNULL(left.b, right.b) +// c = IFNULL(left.c, right.c) +// ... // // Furthermore, a star has to resolve the columns in the following order: // merged columns, non-equality columns from the left table, non-equality @@ -259,37 +260,37 @@ func (b *Builder) constructJoin( // // Example: // -// left has columns (a,b,x) -// right has columns (a,b,y) +// left has columns (a,b,x) +// right has columns (a,b,y) // -// - SELECT * FROM left JOIN right USING(a,b) +// - SELECT * FROM left JOIN right USING(a,b) // -// join has columns: -// 1: left.a -// 2: left.b -// 3: left.x -// 4: right.a -// 5: right.b -// 6: right.y +// join has columns: +// 1: left.a +// 2: left.b +// 3: left.x +// 4: right.a +// 5: right.b +// 6: right.y // -// projection has columns and corresponding variable expressions: -// 1: a aka left.a @1 -// 2: b aka left.b @2 -// 3: left.x @3 -// 4: right.a (hidden) @4 -// 5: right.b (hidden) @5 -// 6: right.y @6 +// projection has columns and corresponding variable expressions: +// 1: a aka left.a @1 +// 2: b aka left.b @2 +// 3: left.x @3 +// 4: right.a (hidden) @4 +// 5: right.b (hidden) @5 +// 6: right.y @6 // // If the join was a FULL OUTER JOIN, the columns would be: -// 1: a IFNULL(@1,@4) -// 2: b IFNULL(@2,@5) -// 3: left.a (hidden) @1 -// 4: left.b (hidden) @2 -// 5: left.x @3 -// 6: right.a (hidden) @4 -// 7: right.b (hidden) @5 -// 8: right.y @6 // +// 1: a IFNULL(@1,@4) +// 2: b IFNULL(@2,@5) +// 3: left.a (hidden) @1 +// 4: left.b (hidden) @2 +// 5: left.x @3 +// 6: right.a (hidden) @4 +// 7: right.b (hidden) @5 +// 8: right.y @6 type usingJoinBuilder struct { b *Builder joinType descpb.JoinType diff --git a/pkg/sql/opt/optbuilder/limit.go b/pkg/sql/opt/optbuilder/limit.go index baa967ad94f1..360234171ce1 100644 --- a/pkg/sql/opt/optbuilder/limit.go +++ b/pkg/sql/opt/optbuilder/limit.go @@ -19,7 +19,9 @@ import ( // // parentScope is the scope for the LIMIT/OFFSET expressions; this is not the // same as inScope, because statements like: -// SELECT k FROM kv LIMIT k +// +// SELECT k FROM kv LIMIT k +// // are not valid. func (b *Builder) buildLimit(limit *tree.Limit, parentScope, inScope *scope) { if limit.Offset != nil { diff --git a/pkg/sql/opt/optbuilder/locking.go b/pkg/sql/opt/optbuilder/locking.go index a116915ae662..8c56336a03b7 100644 --- a/pkg/sql/opt/optbuilder/locking.go +++ b/pkg/sql/opt/optbuilder/locking.go @@ -31,10 +31,10 @@ import ( // access the same row. In order from weakest to strongest, the lock strength // variants are: // -// FOR KEY SHARE -// FOR SHARE -// FOR NO KEY UPDATE -// FOR UPDATE +// FOR KEY SHARE +// FOR SHARE +// FOR NO KEY UPDATE +// FOR UPDATE // // The second property is the locking wait policy (see tree.LockingWaitPolicy). // A locking wait policy represents the policy a table scan uses to interact @@ -45,8 +45,8 @@ import ( // to handling locks held by other transactions. These non-standard policies // are: // -// SKIP LOCKED -// NOWAIT +// SKIP LOCKED +// NOWAIT // // In addition to these two properties, locking clauses can contain an optional // list of target relations. When provided, the locking clause applies only to @@ -55,12 +55,11 @@ import ( // // Put together, a complex locking spec might look like: // -// SELECT ... FROM ... FOR SHARE NOWAIT FOR UPDATE OF t1, t2 +// SELECT ... FROM ... FOR SHARE NOWAIT FOR UPDATE OF t1, t2 // // which would be represented as: // -// [ {ForShare, LockWaitError, []}, {ForUpdate, LockWaitBlock, [t1, t2]} ] -// +// [ {ForShare, LockWaitError, []}, {ForUpdate, LockWaitBlock, [t1, t2]} ] type lockingSpec []*tree.LockingItem // noRowLocking indicates that no row-level locking has been specified. diff --git a/pkg/sql/opt/optbuilder/mutation_builder.go b/pkg/sql/opt/optbuilder/mutation_builder.go index 3f7ea93abbdb..df9312a9a341 100644 --- a/pkg/sql/opt/optbuilder/mutation_builder.go +++ b/pkg/sql/opt/optbuilder/mutation_builder.go @@ -242,11 +242,11 @@ func (mb *mutationBuilder) setFetchColIDs(cols []scopeColumn) { // buildInputForUpdate constructs a Select expression from the fields in // the Update operator, similar to this: // -// SELECT -// FROM -// WHERE -// ORDER BY -// LIMIT +// SELECT +// FROM
+// WHERE +// ORDER BY +// LIMIT // // All columns from the table to update are added to fetchColList. // If a FROM clause is defined, we build out each of the table @@ -375,11 +375,11 @@ func (mb *mutationBuilder) buildInputForUpdate( // buildInputForDelete constructs a Select expression from the fields in // the Delete operator, similar to this: // -// SELECT -// FROM
-// WHERE -// ORDER BY -// LIMIT +// SELECT +// FROM
+// WHERE +// ORDER BY +// LIMIT // // All columns from the table to update are added to fetchColList. // TODO(andyk): Do needed column analysis to project fewer columns if possible. @@ -511,7 +511,7 @@ func (mb *mutationBuilder) extractValuesInput(inputRows *tree.Select) *tree.Valu // corresponding column. This is only possible when the input is a VALUES // clause. For example: // -// INSERT INTO t (a, b) (VALUES (1, DEFAULT), (DEFAULT, 2)) +// INSERT INTO t (a, b) (VALUES (1, DEFAULT), (DEFAULT, 2)) // // Here, the two DEFAULT specifiers are replaced by the default value expression // for the a and b columns, respectively. @@ -579,12 +579,12 @@ func (mb *mutationBuilder) replaceDefaultExprs(inRows *tree.Select) (outRows *tr // missing columns. // // Values are synthesized for columns based on checking these rules, in order: -// 1. If column has a default value specified for it, use that as its value. -// 2. If column is nullable, use NULL as its value. -// 3. If column is currently being added or dropped (i.e. a mutation column), -// use a default value (0 for INT column, "" for STRING column, etc). Note -// that the existing "fetched" value returned by the scan cannot be used, -// since it may not have been initialized yet by the backfiller. +// 1. If column has a default value specified for it, use that as its value. +// 2. If column is nullable, use NULL as its value. +// 3. If column is currently being added or dropped (i.e. a mutation column), +// use a default value (0 for INT column, "" for STRING column, etc). Note +// that the existing "fetched" value returned by the scan cannot be used, +// since it may not have been initialized yet by the backfiller. // // If includeOrdinary is false, then only WriteOnly columns are considered. // @@ -944,7 +944,7 @@ func (mb *mutationBuilder) makeMutationPrivate(needResults bool) *memo.MutationP // might mutate the column, or it might be returned by the mutation statement, // or it might not be used at all. Columns take priority in this order: // -// upsert, update, fetch, insert +// upsert, update, fetch, insert // // If an upsert column is available, then it already combines an update/fetch // value with an insert value, so it takes priority. If an update column is diff --git a/pkg/sql/opt/optbuilder/mutation_builder_arbiter.go b/pkg/sql/opt/optbuilder/mutation_builder_arbiter.go index caf419eb361c..ec4ca33835b5 100644 --- a/pkg/sql/opt/optbuilder/mutation_builder_arbiter.go +++ b/pkg/sql/opt/optbuilder/mutation_builder_arbiter.go @@ -43,16 +43,16 @@ import ( // // An arbiter index: // -// 1. Must have lax key columns that match the columns in the ON CONFLICT -// clause. -// 2. If it is a partial index, its predicate must be implied by the -// arbiter predicate supplied by the user. +// 1. Must have lax key columns that match the columns in the ON CONFLICT +// clause. +// 2. If it is a partial index, its predicate must be implied by the +// arbiter predicate supplied by the user. // // An arbiter constraint: // -// 1. Must have columns that match the columns in the ON CONFLICT clause. -// 2. If it is a partial constraint, its predicate must be implied by the -// arbiter predicate supplied by the user. +// 1. Must have columns that match the columns in the ON CONFLICT clause. +// 2. If it is a partial constraint, its predicate must be implied by the +// arbiter predicate supplied by the user. func (mb *mutationBuilder) findArbiters(onConflict *tree.OnConflict) arbiterSet { if onConflict == nil { // No on conflict constraint means that we're in the UPSERT case, which should @@ -146,11 +146,10 @@ func partialIndexArbiterError(onConflict *tree.OnConflict, tableName tree.Name) // constraints are returned so that uniqueness is guaranteed on the respective // subsets of rows. In summary, if conflictOrds is non-empty, this function: // -// 1. Returns a single non-partial or pseudo-partial arbiter index, if found. -// 2. Return a single non-partial or pseudo-partial arbiter constraint, if -// found. -// 3. Otherwise, returns all partial arbiter indexes and constraints. -// +// 1. Returns a single non-partial or pseudo-partial arbiter index, if found. +// 2. Return a single non-partial or pseudo-partial arbiter constraint, if +// found. +// 3. Otherwise, returns all partial arbiter indexes and constraints. func (mb *mutationBuilder) inferArbitersFromConflictOrds( conflictOrds util.FastIntSet, arbiterPredicate tree.Expr, ) arbiterSet { @@ -273,11 +272,10 @@ func (mb *mutationBuilder) inferArbitersFromConflictOrds( // anti-join wraps the current mb.outScope.expr (which produces the insert rows) // and removes rows that would conflict with existing rows. // -// - conflictOrds is the set of table column ordinals that the arbiter +// - conflictOrds is the set of table column ordinals that the arbiter // guarantees uniqueness of. -// - pred is the partial index or constraint predicate. If the arbiter is +// - pred is the partial index or constraint predicate. If the arbiter is // not a partial index or constraint, pred is nil. -// func (mb *mutationBuilder) buildAntiJoinForDoNothingArbiter( inScope *scope, conflictOrds util.FastIntSet, pred tree.Expr, ) { @@ -360,7 +358,6 @@ func (mb *mutationBuilder) buildAntiJoinForDoNothingArbiter( // - partialIndexDistinctCol is a column that allows the UpsertDistinctOn to // only de-duplicate insert rows that satisfy the partial index predicate. // If the arbiter is not a partial index, partialIndexDistinctCol is nil. -// func (mb *mutationBuilder) buildLeftJoinForUpsertArbiter( inScope *scope, conflictOrds util.FastIntSet, pred tree.Expr, ) { @@ -451,7 +448,6 @@ func (mb *mutationBuilder) buildLeftJoinForUpsertArbiter( // constraint, partialArbiterDistinctCol is nil. // - errorOnDup indicates whether multiple rows in the same distinct group // should trigger an error. If empty, no error is triggered. -// func (mb *mutationBuilder) buildDistinctOnForArbiter( insertColScope *scope, conflictOrds util.FastIntSet, @@ -490,8 +486,8 @@ func (mb *mutationBuilder) buildDistinctOnForArbiter( // partial index or unique constraint predicate should be de-duplicated. For // example: // -// CREATE TABLE t (a INT, b INT, UNIQUE INDEX (a) WHERE b > 0) -// INSERT INTO t VALUES (1, 1), (1, 2), (1, -1), (1, -10) ON CONFLICT DO NOTHING +// CREATE TABLE t (a INT, b INT, UNIQUE INDEX (a) WHERE b > 0) +// INSERT INTO t VALUES (1, 1), (1, 2), (1, -1), (1, -10) ON CONFLICT DO NOTHING // // The rows (1, 1), (1, -1), and (1, -10) should be inserted. (1, -1) // and (1, -10) should not be removed from the input set. Even though @@ -504,10 +500,10 @@ func (mb *mutationBuilder) buildDistinctOnForArbiter( // and NULL otherwise. For the example above, the projected column would // be (b > 0) OR NULL. The values of the projected rows would be: // -// (1, 1) -> (1, 1, true) -// (1, 2) -> (1, 2, true) -// (1, -1) -> (1, -1, NULL) -// (1, -10) -> (1, -10, NULL) +// (1, 1) -> (1, 1, true) +// (1, 2) -> (1, 2, true) +// (1, -1) -> (1, -1, NULL) +// (1, -10) -> (1, -10, NULL) // // The set of conflict columns to be used for de-duplication includes a and the // newly projected column. The UpsertDistinctOn considers NULL values as unique, diff --git a/pkg/sql/opt/optbuilder/mutation_builder_fk.go b/pkg/sql/opt/optbuilder/mutation_builder_fk.go index a09d37c7dede..98b4cae83a61 100644 --- a/pkg/sql/opt/optbuilder/mutation_builder_fk.go +++ b/pkg/sql/opt/optbuilder/mutation_builder_fk.go @@ -48,21 +48,21 @@ import ( // being a WithScan of the mutation input and the right side being the // referenced table. A simple example of an insert with a FK check: // -// insert child -// ├── ... -// ├── input binding: &1 -// └── f-k-checks -// └── f-k-checks-item: child(p) -> parent(p) -// └── anti-join (hash) -// ├── columns: column2:5!null -// ├── with-scan &1 -// │ ├── columns: column2:5!null -// │ └── mapping: -// │ └── column2:4 => column2:5 -// ├── scan parent -// │ └── columns: parent.p:6!null -// └── filters -// └── column2:5 = parent.p:6 +// insert child +// ├── ... +// ├── input binding: &1 +// └── f-k-checks +// └── f-k-checks-item: child(p) -> parent(p) +// └── anti-join (hash) +// ├── columns: column2:5!null +// ├── with-scan &1 +// │ ├── columns: column2:5!null +// │ └── mapping: +// │ └── column2:4 => column2:5 +// ├── scan parent +// │ └── columns: parent.p:6!null +// └── filters +// └── column2:5 = parent.p:6 // // See testdata/fk-checks-insert for more examples. func (mb *mutationBuilder) buildFKChecksForInsert() { @@ -90,21 +90,22 @@ func (mb *mutationBuilder) buildFKChecksForInsert() { // In the case of delete, each FK check query is a semi-join with the left side // being a WithScan of the mutation input and the right side being the // referencing table. For example: -// delete parent -// ├── ... -// ├── input binding: &1 -// └── f-k-checks -// └── f-k-checks-item: child(p) -> parent(p) -// └── semi-join (hash) -// ├── columns: p:7!null -// ├── with-scan &1 -// │ ├── columns: p:7!null -// │ └── mapping: -// │ └── parent.p:5 => p:7 -// ├── scan child -// │ └── columns: child.p:9!null -// └── filters -// └── p:7 = child.p:9 +// +// delete parent +// ├── ... +// ├── input binding: &1 +// └── f-k-checks +// └── f-k-checks-item: child(p) -> parent(p) +// └── semi-join (hash) +// ├── columns: p:7!null +// ├── with-scan &1 +// │ ├── columns: p:7!null +// │ └── mapping: +// │ └── parent.p:5 => p:7 +// ├── scan child +// │ └── columns: child.p:9!null +// └── filters +// └── p:7 = child.p:9 // // See testdata/fk-checks-delete for more examples. // @@ -112,7 +113,6 @@ func (mb *mutationBuilder) buildFKChecksForInsert() { // // See onDeleteCascadeBuilder, onDeleteFastCascadeBuilder, onDeleteSetBuilder // for details. -// func (mb *mutationBuilder) buildFKChecksAndCascadesForDelete() { if mb.tab.InboundForeignKeyCount() == 0 { // No relevant FKs. @@ -177,56 +177,55 @@ func (mb *mutationBuilder) buildFKChecksAndCascadesForDelete() { // // In the case of update, there are two types of FK check queries: // -// - insertion-side checks are very similar to the checks we issue for insert; -// they are an anti-join with the left side being a WithScan of the "new" -// values for each row. For example: -// update child -// ├── ... -// ├── input binding: &1 -// └── f-k-checks -// └── f-k-checks-item: child(p) -> parent(p) -// └── anti-join (hash) -// ├── columns: column5:6!null -// ├── with-scan &1 -// │ ├── columns: column5:6!null -// │ └── mapping: -// │ └── column5:5 => column5:6 -// ├── scan parent -// │ └── columns: parent.p:8!null -// └── filters -// └── column5:6 = parent.p:8 +// - insertion-side checks are very similar to the checks we issue for insert; +// they are an anti-join with the left side being a WithScan of the "new" +// values for each row. For example: +// update child +// ├── ... +// ├── input binding: &1 +// └── f-k-checks +// └── f-k-checks-item: child(p) -> parent(p) +// └── anti-join (hash) +// ├── columns: column5:6!null +// ├── with-scan &1 +// │ ├── columns: column5:6!null +// │ └── mapping: +// │ └── column5:5 => column5:6 +// ├── scan parent +// │ └── columns: parent.p:8!null +// └── filters +// └── column5:6 = parent.p:8 // -// - deletion-side checks are similar to the checks we issue for delete; they -// are a semi-join but the left side input is more complicated: it is an -// Except between a WithScan of the "old" values and a WithScan of the "new" -// values for each row (this is the set of values that are effectively -// removed from the table). For example: -// update parent -// ├── ... -// ├── input binding: &1 -// └── f-k-checks -// └── f-k-checks-item: child(p) -> parent(p) -// └── semi-join (hash) -// ├── columns: p:8!null -// ├── except -// │ ├── columns: p:8!null -// │ ├── left columns: p:8!null -// │ ├── right columns: column7:9 -// │ ├── with-scan &1 -// │ │ ├── columns: p:8!null -// │ │ └── mapping: -// │ │ └── parent.p:5 => p:8 -// │ └── with-scan &1 -// │ ├── columns: column7:9!null -// │ └── mapping: -// │ └── column7:7 => column7:9 -// ├── scan child -// │ └── columns: child.p:11!null -// └── filters -// └── p:8 = child.p:11 +// - deletion-side checks are similar to the checks we issue for delete; they +// are a semi-join but the left side input is more complicated: it is an +// Except between a WithScan of the "old" values and a WithScan of the "new" +// values for each row (this is the set of values that are effectively +// removed from the table). For example: +// update parent +// ├── ... +// ├── input binding: &1 +// └── f-k-checks +// └── f-k-checks-item: child(p) -> parent(p) +// └── semi-join (hash) +// ├── columns: p:8!null +// ├── except +// │ ├── columns: p:8!null +// │ ├── left columns: p:8!null +// │ ├── right columns: column7:9 +// │ ├── with-scan &1 +// │ │ ├── columns: p:8!null +// │ │ └── mapping: +// │ │ └── parent.p:5 => p:8 +// │ └── with-scan &1 +// │ ├── columns: column7:9!null +// │ └── mapping: +// │ └── column7:7 => column7:9 +// ├── scan child +// │ └── columns: child.p:11!null +// └── filters +// └── p:8 = child.p:11 // // Only FK relations that involve updated columns result in FK checks. -// func (mb *mutationBuilder) buildFKChecksForUpdate() { if mb.tab.OutboundForeignKeyCount() == 0 && mb.tab.InboundForeignKeyCount() == 0 { return @@ -360,14 +359,15 @@ func (mb *mutationBuilder) buildFKChecksForUpdate() { // The main difference is that for update, the "new" values were readily // available, whereas for upsert, the "new" values can be the result of an // expression of the form: -// CASE WHEN canary IS NULL THEN inserter-value ELSE updated-value END +// +// CASE WHEN canary IS NULL THEN inserter-value ELSE updated-value END +// // These expressions are already projected as part of the mutation input and are // directly accessible through WithScan. // // Only FK relations that involve updated columns result in deletion-side FK // checks. The insertion-side FK checks are always needed (similar to insert) // because any of the rows might result in an insert rather than an update. -// func (mb *mutationBuilder) buildFKChecksForUpsert() { numOutbound := mb.tab.OutboundForeignKeyCount() numInbound := mb.tab.InboundForeignKeyCount() diff --git a/pkg/sql/opt/optbuilder/orderby.go b/pkg/sql/opt/optbuilder/orderby.go index e4c70ccaaf38..9f6e1c016976 100644 --- a/pkg/sql/opt/optbuilder/orderby.go +++ b/pkg/sql/opt/optbuilder/orderby.go @@ -57,7 +57,9 @@ func (b *Builder) analyzeOrderBy( // Since the ordering property can only refer to output columns, we may need // to add a projection for the ordering columns. For example, consider the // following query: -// SELECT a FROM t ORDER BY c +// +// SELECT a FROM t ORDER BY c +// // The `c` column must be retained in the projection (and the presentation // property then omits it). // diff --git a/pkg/sql/opt/optbuilder/project.go b/pkg/sql/opt/optbuilder/project.go index 6324396aefba..fa2b4944e749 100644 --- a/pkg/sql/opt/optbuilder/project.go +++ b/pkg/sql/opt/optbuilder/project.go @@ -185,7 +185,7 @@ func (b *Builder) buildProjectionList(inScope *scope, projectionsScope *scope) { // resolveColRef looks for the common case of a standalone column reference // expression, like this: // -// SELECT ..., c, ... FROM ... +// SELECT ..., c, ... FROM ... // // It resolves the column name to a scopeColumn and returns it as a TypedExpr. func (b *Builder) resolveColRef(e tree.Expr, inScope *scope) tree.TypedExpr { @@ -227,13 +227,18 @@ func (b *Builder) getColName(expr tree.SelectExpr) string { // the expression as its value. // // texpr The given scalar expression. The expression is any scalar -// expression except for a bare variable or aggregate (those are -// handled separately in buildVariableProjection and -// buildFunction). +// +// expression except for a bare variable or aggregate (those are +// handled separately in buildVariableProjection and +// buildFunction). +// // scalar The memo expression that has already been built for the given -// typed expression. +// +// typed expression. +// // outCol The output column of the scalar which is being built. It can be -// nil if outScope is nil. +// +// nil if outScope is nil. // // See Builder.buildStmt for a description of the remaining input and return // values. @@ -269,9 +274,12 @@ func (b *Builder) finishBuildScalar( // // col Column containing the scalar expression that's been referenced. // outCol The output column which is being built. It can be nil if outScope is -// nil. +// +// nil. +// // colRefs The set of columns referenced so far by the scalar expression being -// built. If not nil, it is updated with the ID of this column. +// +// built. If not nil, it is updated with the ID of this column. // // See Builder.buildStmt for a description of the remaining input and return // values. @@ -324,10 +332,10 @@ func (b *Builder) finishBuildScalarRef( // // Sample usage: // -// pb := makeProjectionBuilder(b, scope) -// b.Add(name, expr, typ) -// ... -// scope = pb.Finish() +// pb := makeProjectionBuilder(b, scope) +// b.Add(name, expr, typ) +// ... +// scope = pb.Finish() // // Note that this is all a cheap no-op if Add is not called. type projectionBuilder struct { diff --git a/pkg/sql/opt/optbuilder/scalar.go b/pkg/sql/opt/optbuilder/scalar.go index 91e2e37c8ab7..8b902e89ee2d 100644 --- a/pkg/sql/opt/optbuilder/scalar.go +++ b/pkg/sql/opt/optbuilder/scalar.go @@ -516,8 +516,9 @@ func (b *Builder) buildAnyScalar( // f The given function expression. // outCol The output column of the function being built. // colRefs The set of columns referenced so far by the scalar expression -// being built. If not nil, it is updated with any columns seen in -// finishBuildScalarRef. +// +// being built. If not nil, it is updated with any columns seen in +// finishBuildScalarRef. // // See Builder.buildStmt for a description of the remaining input and // return values. @@ -721,12 +722,12 @@ func (b *Builder) buildRangeCond( // checkSubqueryOuterCols uses the subquery outer columns to update the given // set of column references and the set of outer columns for any enclosing // subuqery. It also performs the following checks: -// 1. If aggregates are not allowed in the current context (e.g., if we -// are building the WHERE clause), it checks that the subquery does not -// reference any aggregates from this scope. -// 2. If this is a grouping context, it checks that any outer columns from -// the given subquery that reference inScope are either aggregate or -// grouping columns in inScope. +// 1. If aggregates are not allowed in the current context (e.g., if we +// are building the WHERE clause), it checks that the subquery does not +// reference any aggregates from this scope. +// 2. If this is a grouping context, it checks that any outer columns from +// the given subquery that reference inScope are either aggregate or +// grouping columns in inScope. func (b *Builder) checkSubqueryOuterCols( subqueryOuterCols opt.ColSet, inGroupingContext bool, inScope *scope, colRefs *opt.ColSet, ) { diff --git a/pkg/sql/opt/optbuilder/scope.go b/pkg/sql/opt/optbuilder/scope.go index 3a98d0723af4..f6486bb3124d 100644 --- a/pkg/sql/opt/optbuilder/scope.go +++ b/pkg/sql/opt/optbuilder/scope.go @@ -654,7 +654,9 @@ func (s *scope) findFuncArgCol(idx tree.PlaceholderIdx) *scopeColumn { // startAggFunc is called when the builder starts building an aggregate // function. It is used to disallow nested aggregates and ensure that a // grouping error is not called on the aggregate arguments. For example: -// SELECT max(v) FROM kv GROUP BY k +// +// SELECT max(v) FROM kv GROUP BY k +// // should not throw an error, even though v is not a grouping column. // Non-grouping columns are allowed inside aggregate functions. // diff --git a/pkg/sql/opt/optbuilder/scope_column.go b/pkg/sql/opt/optbuilder/scope_column.go index 60aa117af684..3f35b6f2e357 100644 --- a/pkg/sql/opt/optbuilder/scope_column.go +++ b/pkg/sql/opt/optbuilder/scope_column.go @@ -237,20 +237,19 @@ func (*scopeColumn) Variable() {} // optbuilder builds expressions, but added to the metadata and displayed in opt // trees with another. This is useful for: // -// 1. Creating more descriptive metadata names, while having refNames that are -// required for column resolution while building expressions. This is -// particularly useful in mutations where there are multiple versions of -// target table columns for fetching, inserting, and updating that must be -// referenced by the same name. -// -// 2. Creating descriptive metadata names for anonymous columns that -// cannot be referenced. This is useful for columns like synthesized -// check constraint columns and partial index columns which cannot be -// referenced by other expressions. Prior to the creation of -// scopeColumnName, the same descriptive name added to the metadata -// could be referenced, making optbuilder vulnerable to "ambiguous -// column" bugs when a user table had a column with the same name. +// 1. Creating more descriptive metadata names, while having refNames that are +// required for column resolution while building expressions. This is +// particularly useful in mutations where there are multiple versions of +// target table columns for fetching, inserting, and updating that must be +// referenced by the same name. // +// 2. Creating descriptive metadata names for anonymous columns that +// cannot be referenced. This is useful for columns like synthesized +// check constraint columns and partial index columns which cannot be +// referenced by other expressions. Prior to the creation of +// scopeColumnName, the same descriptive name added to the metadata +// could be referenced, making optbuilder vulnerable to "ambiguous +// column" bugs when a user table had a column with the same name. type scopeColumnName struct { // refName is the name used when resolving columns while building an // expression. If it is empty, the column is anonymous and cannot be diff --git a/pkg/sql/opt/optbuilder/select.go b/pkg/sql/opt/optbuilder/select.go index a4866a6dd958..ad1ec2325e55 100644 --- a/pkg/sql/opt/optbuilder/select.go +++ b/pkg/sql/opt/optbuilder/select.go @@ -458,10 +458,11 @@ func errorOnInvalidMultiregionDB(evalCtx *eval.Context, tabMeta *opt.TableMeta) // DELETE). // // NOTE: Callers must take care that mutation columns (columns that are being -// added or dropped from the table) are only used when performing mutation -// DML statements (INSERT, UPDATE, UPSERT, DELETE). They cannot be used in -// any other way because they may not have been initialized yet by the -// backfiller! +// +// added or dropped from the table) are only used when performing mutation +// DML statements (INSERT, UPDATE, UPSERT, DELETE). They cannot be used in +// any other way because they may not have been initialized yet by the +// backfiller! // // See Builder.buildStmt for a description of the remaining input and return // values. @@ -1204,11 +1205,11 @@ func (b *Builder) buildFromTables( // in the reverse order that they appear in the list, with the innermost join // involving the tables at the end of the list. For example: // -// SELECT * FROM a,b,c +// SELECT * FROM a,b,c // // is joined like: // -// SELECT * FROM a JOIN (b JOIN c ON true) ON true +// SELECT * FROM a JOIN (b JOIN c ON true) ON true // // This ordering is guaranteed for queries not involving lateral joins for the // time being, to ensure we don't break any queries which have been @@ -1262,10 +1263,10 @@ func (b *Builder) exprIsLateral(t tree.TableExpr) bool { // left-to-right) rather than right-deep (from right-to-left) which we do // typically for perf backwards-compatibility. // -// SELECT * FROM a, b, c +// SELECT * FROM a, b, c // -// buildFromTablesRightDeep: a JOIN (b JOIN c) -// buildFromWithLateral: (a JOIN b) JOIN c +// buildFromTablesRightDeep: a JOIN (b JOIN c) +// buildFromWithLateral: (a JOIN b) JOIN c func (b *Builder) buildFromWithLateral( tables tree.TableExprs, locking lockingSpec, inScope *scope, ) (outScope *scope) { diff --git a/pkg/sql/opt/optbuilder/srfs.go b/pkg/sql/opt/optbuilder/srfs.go index ebdf79d6c509..87c211c8a169 100644 --- a/pkg/sql/opt/optbuilder/srfs.go +++ b/pkg/sql/opt/optbuilder/srfs.go @@ -72,8 +72,7 @@ var _ tree.TypedExpr = &srf{} // returns tuples of values from a,b,c picked "simultaneously". NULLs // are used when an iterator is "shorter" than another. For example: // -// zip([1,2,3], ['a','b']) = [(1,'a'), (2,'b'), (3, null)] -// +// zip([1,2,3], ['a','b']) = [(1,'a'), (2,'b'), (3, null)] func (b *Builder) buildZip(exprs tree.Exprs, inScope *scope) (outScope *scope) { outScope = inScope.push() @@ -205,7 +204,7 @@ func (b *Builder) finishBuildGeneratorFunction( // ProjectSet is necessary in case some of the SRFs depend on the input. // For example, consider this query: // -// SELECT generate_series(t.a, t.a + 1) FROM t +// SELECT generate_series(t.a, t.a + 1) FROM t // // In this case, the inputs to generate_series depend on table t, so during // execution, generate_series will be called once for each row of t. diff --git a/pkg/sql/opt/optbuilder/subquery.go b/pkg/sql/opt/optbuilder/subquery.go index 2f515026c68d..53e1fa970581 100644 --- a/pkg/sql/opt/optbuilder/subquery.go +++ b/pkg/sql/opt/optbuilder/subquery.go @@ -331,18 +331,17 @@ func (b *Builder) buildSingleRowSubquery( // // We use the following transformations: // -// IN () -// ==> ConstructAny(, , EqOp) +// IN () +// ==> ConstructAny(, , EqOp) // -// NOT IN () -// ==> ConstructNot(ConstructAny(, , EqOp)) +// NOT IN () +// ==> ConstructNot(ConstructAny(, , EqOp)) // -// {SOME|ANY}() -// ==> ConstructAny(, , ) -// -// ALL() -// ==> ConstructNot(ConstructAny(, , Negate())) +// {SOME|ANY}() +// ==> ConstructAny(, , ) // +// ALL() +// ==> ConstructNot(ConstructAny(, , Negate())) func (b *Builder) buildMultiRowSubquery( c *tree.ComparisonExpr, inScope *scope, colRefs *opt.ColSet, ) (out opt.ScalarExpr, outScope *scope) { diff --git a/pkg/sql/opt/optbuilder/union.go b/pkg/sql/opt/optbuilder/union.go index f531d85acb87..64dd7af42b0f 100644 --- a/pkg/sql/opt/optbuilder/union.go +++ b/pkg/sql/opt/optbuilder/union.go @@ -116,7 +116,6 @@ func (b *Builder) buildSetOp( // Throws an error if the scopes don't have the same number of columns, or when // column types don't match 1-1 or can't be cast to a single output type. The // error messages use clauseTag. -// func (b *Builder) typeCheckSetOp( leftScope, rightScope *scope, clauseTag string, ) (setOpTypes []*types.T, leftCastsNeeded, rightCastsNeeded bool) { diff --git a/pkg/sql/opt/optbuilder/update.go b/pkg/sql/opt/optbuilder/update.go index 5386fa8f2cb8..3ec93d7551f7 100644 --- a/pkg/sql/opt/optbuilder/update.go +++ b/pkg/sql/opt/optbuilder/update.go @@ -30,30 +30,30 @@ import ( // provide updated values are projected for each of the SET expressions, as well // as for any computed columns. For example: // -// CREATE TABLE abc (a INT PRIMARY KEY, b INT, c INT) -// UPDATE abc SET b=1 WHERE a=2 +// CREATE TABLE abc (a INT PRIMARY KEY, b INT, c INT) +// UPDATE abc SET b=1 WHERE a=2 // // This would create an input expression similar to this SQL: // -// SELECT a AS oa, b AS ob, c AS oc, 1 AS nb FROM abc WHERE a=2 +// SELECT a AS oa, b AS ob, c AS oc, 1 AS nb FROM abc WHERE a=2 // // The execution engine evaluates this relational expression and uses the // resulting values to form the KV keys and values. // // Tuple SET expressions are decomposed into individual columns: // -// UPDATE abc SET (b, c)=(1, 2) WHERE a=3 -// => -// SELECT a AS oa, b AS ob, c AS oc, 1 AS nb, 2 AS nc FROM abc WHERE a=3 +// UPDATE abc SET (b, c)=(1, 2) WHERE a=3 +// => +// SELECT a AS oa, b AS ob, c AS oc, 1 AS nb, 2 AS nc FROM abc WHERE a=3 // // Subqueries become correlated left outer joins: // -// UPDATE abc SET b=(SELECT y FROM xyz WHERE x=a) -// => -// SELECT a AS oa, b AS ob, c AS oc, y AS nb -// FROM abc -// LEFT JOIN LATERAL (SELECT y FROM xyz WHERE x=a) -// ON True +// UPDATE abc SET b=(SELECT y FROM xyz WHERE x=a) +// => +// SELECT a AS oa, b AS ob, c AS oc, y AS nb +// FROM abc +// LEFT JOIN LATERAL (SELECT y FROM xyz WHERE x=a) +// ON True // // Computed columns result in an additional wrapper projection that can depend // on input columns. @@ -165,17 +165,17 @@ func (mb *mutationBuilder) addTargetColsForUpdate(exprs tree.UpdateExprs) { // addUpdateCols builds nested Project and LeftOuterJoin expressions that // correspond to the given SET expressions: // -// SET a=1 (single-column SET) -// Add as synthesized Project column: -// SELECT , 1 FROM +// SET a=1 (single-column SET) +// Add as synthesized Project column: +// SELECT , 1 FROM // -// SET (a, b)=(1, 2) (tuple SET) -// Add as multiple Project columns: -// SELECT , 1, 2 FROM +// SET (a, b)=(1, 2) (tuple SET) +// Add as multiple Project columns: +// SELECT , 1, 2 FROM // -// SET (a, b)=(SELECT 1, 2) (subquery) -// Wrap input in Max1Row + LeftJoinApply expressions: -// SELECT * FROM LEFT JOIN LATERAL (SELECT 1, 2) ON True +// SET (a, b)=(SELECT 1, 2) (subquery) +// Wrap input in Max1Row + LeftJoinApply expressions: +// SELECT * FROM LEFT JOIN LATERAL (SELECT 1, 2) ON True // // Multiple subqueries result in multiple left joins successively wrapping the // input. A final Project operator is built if any single-column or tuple SET diff --git a/pkg/sql/opt/optbuilder/util.go b/pkg/sql/opt/optbuilder/util.go index 4d921a43ecb6..c210674d376c 100644 --- a/pkg/sql/opt/optbuilder/util.go +++ b/pkg/sql/opt/optbuilder/util.go @@ -191,9 +191,13 @@ func (b *Builder) expandStarAndResolveType( // a synthesized column "x_incr". // // scope The scope is passed in so it can can be updated with the newly bound -// variable. +// +// variable. +// // name This is the name for the new column (e.g., if specified with -// the AS keyword). +// +// the AS keyword). +// // typ The type of the column. // expr The expression this column refers to (if any). // scalar The scalar expression associated with this column (if any). @@ -225,15 +229,15 @@ func (b *Builder) populateSynthesizedColumn(col *scopeColumn, scalar opt.ScalarE // projectColumn projects src by copying its column ID to dst. projectColumn // also copies src.name to dst if an alias is not already set in dst. No other // fields are copied, for the following reasons: -// - We don't copy group, as dst becomes a pass-through column in the new -// scope. dst already has group=0, so keep it as-is. -// - We don't copy hidden, because projecting a column makes it visible. -// dst already has hidden=false, so keep it as-is. -// - We don't copy table, since the table becomes anonymous in the new scope. -// - We don't copy descending, since we don't want to overwrite dst.descending -// if dst is an ORDER BY column. -// - expr, exprStr and typ in dst already correspond to the expression and type -// of the src column. +// - We don't copy group, as dst becomes a pass-through column in the new +// scope. dst already has group=0, so keep it as-is. +// - We don't copy hidden, because projecting a column makes it visible. +// dst already has hidden=false, so keep it as-is. +// - We don't copy table, since the table becomes anonymous in the new scope. +// - We don't copy descending, since we don't want to overwrite dst.descending +// if dst is an ORDER BY column. +// - expr, exprStr and typ in dst already correspond to the expression and type +// of the src column. func (b *Builder) projectColumn(dst *scopeColumn, src *scopeColumn) { if dst.name.IsAnonymous() { dst.name = src.name @@ -279,7 +283,9 @@ func (b *Builder) synthesizeResultColumns(scope *scope, cols colinfo.ResultColum // colIndex takes an expression that refers to a column using an integer, // verifies it refers to a valid target in the SELECT list, and returns the // corresponding column index. For example: -// SELECT a from T ORDER by 1 +// +// SELECT a from T ORDER by 1 +// // Here "1" refers to the first item in the SELECT list, "a". The returned // index is 0. func colIndex(numOriginalCols int, expr tree.Expr, context string) int { diff --git a/pkg/sql/opt/optbuilder/window.go b/pkg/sql/opt/optbuilder/window.go index ab5b3a8c9147..6c34ab63afd6 100644 --- a/pkg/sql/opt/optbuilder/window.go +++ b/pkg/sql/opt/optbuilder/window.go @@ -228,18 +228,19 @@ func (b *Builder) buildWindow(outScope *scope, inScope *scope) { // To support this ordering, we build the aggregate as a window function like below: // // scalar-group-by -// ├── columns: array_agg:2(int[]) -// ├── window partition=() ordering=+1 -// │ ├── columns: col1:1(int!null) array_agg:2(int[]) -// │ ├── scan tab -// │ │ └── columns: col1:1(int!null) -// │ └── windows -// │ └── windows-item: range from unbounded to unbounded [type=int[]] -// │ └── array-agg [type=int[]] -// │ └── variable: col1 [type=int] -// └── aggregations -// └── const-agg [type=int[]] -// └── variable: array_agg [type=int[]] +// +// ├── columns: array_agg:2(int[]) +// ├── window partition=() ordering=+1 +// │ ├── columns: col1:1(int!null) array_agg:2(int[]) +// │ ├── scan tab +// │ │ └── columns: col1:1(int!null) +// │ └── windows +// │ └── windows-item: range from unbounded to unbounded [type=int[]] +// │ └── array-agg [type=int[]] +// │ └── variable: col1 [type=int] +// └── aggregations +// └── const-agg [type=int[]] +// └── variable: array_agg [type=int[]] func (b *Builder) buildAggregationAsWindow( groupingColSet opt.ColSet, having opt.ScalarExpr, fromScope *scope, ) *scope { diff --git a/pkg/sql/opt/optbuilder/with.go b/pkg/sql/opt/optbuilder/with.go index 48f1ef9864bd..7dda378cc0b1 100644 --- a/pkg/sql/opt/optbuilder/with.go +++ b/pkg/sql/opt/optbuilder/with.go @@ -388,7 +388,9 @@ func (b *Builder) getCTECols(cteScope *scope, name tree.AliasClause) physical.Pr } // splitRecursiveCTE splits a CTE statement of the form -// initial_query UNION [ALL] recursive_query +// +// initial_query UNION [ALL] recursive_query +// // into the initial and recursive parts. // If the statement is not of this form, returns ok=false. func (b *Builder) splitRecursiveCTE( diff --git a/pkg/sql/opt/optgen/cmd/langgen/exprs_gen.go b/pkg/sql/opt/optgen/cmd/langgen/exprs_gen.go index f0f36221a9ad..6e31761aed86 100644 --- a/pkg/sql/opt/optgen/cmd/langgen/exprs_gen.go +++ b/pkg/sql/opt/optgen/cmd/langgen/exprs_gen.go @@ -51,9 +51,9 @@ func (g *exprsGen) generate(compiled *lang.CompiledExpr, w io.Writer) { } } -// type SomeExpr struct { -// FieldName FieldType -// } +// type SomeExpr struct { +// FieldName FieldType +// } func (g *exprsGen) genExprType(define *lang.DefineExpr) { exprType := fmt.Sprintf("%sExpr", define.Name) @@ -79,9 +79,9 @@ func (g *exprsGen) genExprType(define *lang.DefineExpr) { } } -// func (e *SomeExpr) Op() Operator { -// return SomeOp -// } +// func (e *SomeExpr) Op() Operator { +// return SomeOp +// } func (g *exprsGen) genOpFunc(define *lang.DefineExpr) { exprType := fmt.Sprintf("%sExpr", define.Name) opType := fmt.Sprintf("%sOp", define.Name) @@ -91,9 +91,9 @@ func (g *exprsGen) genOpFunc(define *lang.DefineExpr) { fmt.Fprintf(g.w, "}\n\n") } -// func (e *SomeExpr) ChildCount() int { -// return 1 -// } +// func (e *SomeExpr) ChildCount() int { +// return 1 +// } func (g *exprsGen) genChildCountFunc(define *lang.DefineExpr) { exprType := fmt.Sprintf("%sExpr", define.Name) @@ -109,13 +109,13 @@ func (g *exprsGen) genChildCountFunc(define *lang.DefineExpr) { fmt.Fprintf(g.w, "}\n\n") } -// func (e *SomeExpr) Child(nth int) Expr { -// switch nth { -// case 0: -// return e.FieldName -// } -// panic(fmt.Sprintf("child index %d is out of range", nth)) -// } +// func (e *SomeExpr) Child(nth int) Expr { +// switch nth { +// case 0: +// return e.FieldName +// } +// panic(fmt.Sprintf("child index %d is out of range", nth)) +// } func (g *exprsGen) genChildFunc(define *lang.DefineExpr) { exprType := fmt.Sprintf("%sExpr", define.Name) @@ -150,13 +150,13 @@ func (g *exprsGen) genChildFunc(define *lang.DefineExpr) { fmt.Fprintf(g.w, "}\n\n") } -// func (e *SomeExpr) ChildName(nth int) string { -// switch nth { -// case 0: -// return "FieldName" -// } -// panic(fmt.Sprintf("child index %d is out of range", nth)) -// } +// func (e *SomeExpr) ChildName(nth int) string { +// switch nth { +// case 0: +// return "FieldName" +// } +// panic(fmt.Sprintf("child index %d is out of range", nth)) +// } func (g *exprsGen) genChildNameFunc(define *lang.DefineExpr) { exprType := fmt.Sprintf("%sExpr", define.Name) @@ -176,9 +176,9 @@ func (g *exprsGen) genChildNameFunc(define *lang.DefineExpr) { fmt.Fprintf(g.w, "}\n\n") } -// func (e *SomeExpr) Value() interface{} { -// return string(*e) -// } +// func (e *SomeExpr) Value() interface{} { +// return string(*e) +// } func (g *exprsGen) genValueFunc(define *lang.DefineExpr) { exprType := fmt.Sprintf("%sExpr", define.Name) @@ -191,13 +191,13 @@ func (g *exprsGen) genValueFunc(define *lang.DefineExpr) { fmt.Fprintf(g.w, "}\n\n") } -// func (e *SomeExpr) Visit(visit VisitFunc) Expr { -// children := visitChildren(e, visit) -// if children != nil { -// return &SomeExpr{FieldName: children[0].(*FieldType)} -// } -// return e -// } +// func (e *SomeExpr) Visit(visit VisitFunc) Expr { +// children := visitChildren(e, visit) +// if children != nil { +// return &SomeExpr{FieldName: children[0].(*FieldType)} +// } +// return e +// } func (g *exprsGen) genVisitFunc(define *lang.DefineExpr) { exprType := fmt.Sprintf("%sExpr", define.Name) @@ -258,9 +258,9 @@ func (g *exprsGen) genVisitFunc(define *lang.DefineExpr) { fmt.Fprintf(g.w, "}\n\n") } -// func (e *SomeExpr) Source() *SourceLoc { -// return e.Src -// } +// func (e *SomeExpr) Source() *SourceLoc { +// return e.Src +// } func (g *exprsGen) genSourceFunc(define *lang.DefineExpr) { exprType := fmt.Sprintf("%sExpr", define.Name) @@ -273,9 +273,9 @@ func (g *exprsGen) genSourceFunc(define *lang.DefineExpr) { fmt.Fprintf(g.w, "}\n\n") } -// func (e *SomeExpr) InferredType() DataType { -// return e.Typ -// } +// func (e *SomeExpr) InferredType() DataType { +// return e.Typ +// } func (g *exprsGen) genInferredType(define *lang.DefineExpr) { exprType := fmt.Sprintf("%sExpr", define.Name) @@ -290,11 +290,11 @@ func (g *exprsGen) genInferredType(define *lang.DefineExpr) { fmt.Fprintf(g.w, "}\n\n") } -// func (e *SomeExpr) String() string { -// var buf bytes.Buffer -// e.Format(&buf, 0) -// return buf.String() -// } +// func (e *SomeExpr) String() string { +// var buf bytes.Buffer +// e.Format(&buf, 0) +// return buf.String() +// } func (g *exprsGen) genStringFunc(define *lang.DefineExpr) { exprType := fmt.Sprintf("%sExpr", define.Name) @@ -305,9 +305,9 @@ func (g *exprsGen) genStringFunc(define *lang.DefineExpr) { fmt.Fprintf(g.w, "}\n\n") } -// func (e *SomeExpr) Format(buf *bytes.Buffer, level int) { -// formatExpr(e, buf, level) -// } +// func (e *SomeExpr) Format(buf *bytes.Buffer, level int) { +// formatExpr(e, buf, level) +// } func (g *exprsGen) genFormatFunc(define *lang.DefineExpr) { exprType := fmt.Sprintf("%sExpr", define.Name) diff --git a/pkg/sql/opt/optgen/cmd/optgen/explorer_gen.go b/pkg/sql/opt/optgen/cmd/optgen/explorer_gen.go index 4d19ff1ae91a..ef0f70d5589d 100644 --- a/pkg/sql/opt/optgen/cmd/optgen/explorer_gen.go +++ b/pkg/sql/opt/optgen/cmd/optgen/explorer_gen.go @@ -47,22 +47,21 @@ func (g *explorerGen) generate(compiled *lang.CompiledExpr, w io.Writer) { // for each define statement that has an explore rule defined. The code is // similar to this: // -// func (_e *explorer) exploreGroupMember( -// state *exploreState, -// member memo.RelExpr, -// ordinal int, -// ) (_fullyExplored bool) { -// switch t := member.(type) { -// case *memo.ScanNode: -// return _e.exploreScan(state, t, ordinal) -// case *memo.SelectNode: -// return _e.exploreSelect(state, t, ordinal) -// } -// -// // No rules for other operator types. -// return true -// } +// func (_e *explorer) exploreGroupMember( +// state *exploreState, +// member memo.RelExpr, +// ordinal int, +// ) (_fullyExplored bool) { +// switch t := member.(type) { +// case *memo.ScanNode: +// return _e.exploreScan(state, t, ordinal) +// case *memo.SelectNode: +// return _e.exploreSelect(state, t, ordinal) +// } // +// // No rules for other operator types. +// return true +// } func (g *explorerGen) genDispatcher() { g.w.nestIndent("func (_e *explorer) exploreGroupMember(\n") g.w.writeIndent("state *exploreState,\n") @@ -91,18 +90,17 @@ func (g *explorerGen) genDispatcher() { // genRuleFuncs generates a method for each operator that has at least one // explore rule defined. The code is similar to this: // -// func (_e *explorer) exploreScan( -// _rootState *exploreState, -// _root *memo.ScanNode, -// _rootOrd int, -// ) (_fullyExplored bool) { -// _fullyExplored = true -// -// ... exploration rule code goes here ... +// func (_e *explorer) exploreScan( +// _rootState *exploreState, +// _root *memo.ScanNode, +// _rootOrd int, +// ) (_fullyExplored bool) { +// _fullyExplored = true // -// return _fullyExplored -// } +// ... exploration rule code goes here ... // +// return _fullyExplored +// } func (g *explorerGen) genRuleFuncs() { for _, define := range g.compiled.Defines { rules := g.compiled.LookupMatchingRules(string(define.Name)).WithTag("Explore") diff --git a/pkg/sql/opt/optgen/cmd/optgen/exprs_gen.go b/pkg/sql/opt/optgen/cmd/optgen/exprs_gen.go index 1a593b3d369a..7cb485848109 100644 --- a/pkg/sql/opt/optgen/cmd/optgen/exprs_gen.go +++ b/pkg/sql/opt/optgen/cmd/optgen/exprs_gen.go @@ -88,13 +88,12 @@ func (g *exprsGen) genExprDef(define *lang.DefineExpr) { // genExprGroupDef generates the group struct definition for a relational // expression, plus its methods: // -// type selectGroup struct { -// mem *Memo -// rel props.Relational -// first SelectExpr -// best bestProps -// } -// +// type selectGroup struct { +// mem *Memo +// rel props.Relational +// first SelectExpr +// best bestProps +// } func (g *exprsGen) genExprGroupDef(define *lang.DefineExpr) { if !define.Tags.Contains("Relational") { return @@ -135,13 +134,12 @@ func (g *exprsGen) genExprGroupDef(define *lang.DefineExpr) { // genPrivateStruct generates the struct for a define tagged as Private: // -// type FunctionPrivate struct { -// Name string -// Typ *types.T -// Properties *tree.FunctionProperties -// Overload *tree.Overload -// } -// +// type FunctionPrivate struct { +// Name string +// Typ *types.T +// Properties *tree.FunctionProperties +// Overload *tree.Overload +// } func (g *exprsGen) genPrivateStruct(define *lang.DefineExpr) { privTyp := g.md.typeOf(define) @@ -167,14 +165,13 @@ func (g *exprsGen) genPrivateStruct(define *lang.DefineExpr) { // genExprStruct generates the struct type definition for an expression: // -// type SelectExpr struct { -// Input RelExpr -// Filters FiltersExpr -// -// grp exprGroup -// next RelExpr -// } +// type SelectExpr struct { +// Input RelExpr +// Filters FiltersExpr // +// grp exprGroup +// next RelExpr +// } func (g *exprsGen) genExprStruct(define *lang.DefineExpr) { opTyp := g.md.typeOf(define) diff --git a/pkg/sql/opt/optgen/cmd/optgen/factory_gen.go b/pkg/sql/opt/optgen/cmd/optgen/factory_gen.go index f11a5f672c1b..f798f8b8b1b3 100644 --- a/pkg/sql/opt/optgen/cmd/optgen/factory_gen.go +++ b/pkg/sql/opt/optgen/cmd/optgen/factory_gen.go @@ -53,18 +53,17 @@ func (g *factoryGen) generate(compiled *lang.CompiledExpr, w io.Writer) { // genConstructFuncs generates the factory Construct functions for each // expression type. The code is similar to this: // -// // ConstructSelect constructs an expression for the Select operator. -// func (_f *Factory) ConstructSelect( -// input memo.RelExpr, -// filters memo.FiltersExpr, -// ) memo.RelExpr { +// // ConstructSelect constructs an expression for the Select operator. +// func (_f *Factory) ConstructSelect( +// input memo.RelExpr, +// filters memo.FiltersExpr, +// ) memo.RelExpr { // -// ... normalization rule code goes here ... -// -// nd := _f.mem.MemoizeSelect(input, filters) -// return _f.onConstructRelational(nd) -// } +// ... normalization rule code goes here ... // +// nd := _f.mem.MemoizeSelect(input, filters) +// return _f.onConstructRelational(nd) +// } func (g *factoryGen) genConstructFuncs() { defines := g.compiled.Defines. WithoutTag("Enforcer"). @@ -473,18 +472,17 @@ func (g *factoryGen) genCopyAndReplaceDefault() { // constructs expressions from a dynamic type and arguments. The code looks // similar to this: // -// func (f *Factory) DynamicConstruct(op opt.Operator, args ...interface{}) opt.Node { -// switch op { -// case opt.ProjectOp: -// return f.ConstructProject( -// args[0].(memo.RelNode), -// *args[1].(*memo.ProjectionsExpr), -// *args[2].(*opt.ColSet), -// ) -// -// ... cases for other ops ... -// } +// func (f *Factory) DynamicConstruct(op opt.Operator, args ...interface{}) opt.Node { +// switch op { +// case opt.ProjectOp: +// return f.ConstructProject( +// args[0].(memo.RelNode), +// *args[1].(*memo.ProjectionsExpr), +// *args[2].(*opt.ColSet), +// ) // +// ... cases for other ops ... +// } func (g *factoryGen) genDynamicConstruct() { g.w.nestIndent("func (f *Factory) DynamicConstruct(op opt.Operator, args ...interface{}) opt.Expr {\n") g.w.writeIndent("switch op {\n") diff --git a/pkg/sql/opt/optgen/cmd/optgen/metadata.go b/pkg/sql/opt/optgen/cmd/optgen/metadata.go index ad0bf4da1507..500e9736584d 100644 --- a/pkg/sql/opt/optgen/cmd/optgen/metadata.go +++ b/pkg/sql/opt/optgen/cmd/optgen/metadata.go @@ -36,20 +36,20 @@ type metadata struct { // Depending on the context, values of a particular Optgen type are represented // as different Go types. Here are the three contexts: // -// field type: Go type used for a struct field having the Optgen type -// param type: Go type used when a value of the Optgen type is passed as a -// strongly-typed parameter (i.e. of same type as itself) -// dynamic param type: Go type used when a value of the Optgen type is passed -// as a dynamic type (i.e. interface{} or opt.Expr) +// field type: Go type used for a struct field having the Optgen type +// param type: Go type used when a value of the Optgen type is passed as a +// strongly-typed parameter (i.e. of same type as itself) +// dynamic param type: Go type used when a value of the Optgen type is passed +// as a dynamic type (i.e. interface{} or opt.Expr) // // Here are some examples: // -// Field Type Param Type Dynamic Param Type -// ScalarExpr opt.ScalarExpr opt.ScalarExpr opt.ScalarExpr -// FiltersExpr memo.FiltersExpr memo.FiltersExpr *memo.FiltersExpr -// ConstExpr *memo.ConstExpr *memo.ConstExpr *memo.ConstExpr -// ScanPrivate memo.ScanPrivate *memo.ScanPrivate *memo.ScanPrivate -// ColSet opt.ColSet opt.ColSet *opt.ColSet +// Field Type Param Type Dynamic Param Type +// ScalarExpr opt.ScalarExpr opt.ScalarExpr opt.ScalarExpr +// FiltersExpr memo.FiltersExpr memo.FiltersExpr *memo.FiltersExpr +// ConstExpr *memo.ConstExpr *memo.ConstExpr *memo.ConstExpr +// ScanPrivate memo.ScanPrivate *memo.ScanPrivate *memo.ScanPrivate +// ColSet opt.ColSet opt.ColSet *opt.ColSet // // The reason for these different representations is to avoid extra Go object // allocations. In particular, when a non-pointer field is cast directly to an @@ -131,8 +131,7 @@ type typeDef struct { // isListType is true if this type is represented as a Go slice. For example: // -// type FiltersExpr []FiltersItem -// +// type FiltersExpr []FiltersItem func (t *typeDef) isListType() bool { return t.listItemType != nil } @@ -140,12 +139,11 @@ func (t *typeDef) isListType() bool { // asField returns the Go type used when this Optgen type is used as a field in // a struct. For example: // -// type SomeExpr struct { -// expr opt.ScalarExpr -// filters FiltersExpr -// var *VariablExpr -// } -// +// type SomeExpr struct { +// expr opt.ScalarExpr +// filters FiltersExpr +// var *VariablExpr +// } func (t *typeDef) asField() string { // If the type is a pointer (but not an interface), then prefix with "*". if t.isPointer && !t.isInterface { @@ -157,9 +155,8 @@ func (t *typeDef) asField() string { // asParam returns the Go type used to pass this Optgen type around as a // parameter. For example: // -// func SomeFunc(expr opt.ScalarExpr, filters FiltersExpr) -// func SomeFunc(scanPrivate *ScanPrivate) -// +// func SomeFunc(expr opt.ScalarExpr, filters FiltersExpr) +// func SomeFunc(scanPrivate *ScanPrivate) func (t *typeDef) asParam() string { // Non-interface pointers and by-ref structs need to be prefixed with "*". if (t.isPointer && !t.isInterface) || !t.passByVal { @@ -172,10 +169,9 @@ func (t *typeDef) asParam() string { // The pkg parameter is used to correctly qualify type names. For example, if // pkg is "memo", then: // -// memo.RelExpr => RelExpr -// opt.ScalarExpr => opt.ScalarExpr -// memo.ScanPrivate => ScanPrivate -// +// memo.RelExpr => RelExpr +// opt.ScalarExpr => opt.ScalarExpr +// memo.ScanPrivate => ScanPrivate func newMetadata(compiled *lang.CompiledExpr, pkg string) *metadata { md := &metadata{ compiled: compiled, @@ -344,16 +340,16 @@ func (m *metadata) lookupType(friendlyName string) *typeDef { // particular, fields named "_" are mapped to the name of a Go embedded field, // which is equal to the field's type name: // -// define Scan { -// _ ScanPrivate -// } +// define Scan { +// _ ScanPrivate +// } // // gets compiled into: // -// type ScanExpr struct { -// ScanPrivate -// ... -// } +// type ScanExpr struct { +// ScanPrivate +// ... +// } // // Note that the field's type name is always a simple alphanumeric identifier // with no package specified (that's only specified in the fullName field of the @@ -370,12 +366,12 @@ func (m *metadata) fieldName(field *lang.DefineFieldExpr) string { // unexported fields are omitted from the result. For example, for the Project // operator: // -// define Project { -// Input RelExpr -// Projections ProjectionsExpr -// Passthrough ColSet -// internalFuncDeps FuncDepSet -// } +// define Project { +// Input RelExpr +// Projections ProjectionsExpr +// Passthrough ColSet +// internalFuncDeps FuncDepSet +// } // // The Input and Projections fields are children, but the Passthrough and // the internalFuncDeps fields will not be returned. @@ -395,11 +391,11 @@ func (m *metadata) childFields(define *lang.DefineExpr) lang.DefineFieldsExpr { // privateField returns the private field for an operator define expression, if // one exists. For example, for the Project operator: // -// define Project { -// Input RelExpr -// Projections ProjectionsExpr -// Passthrough ColSet -// } +// define Project { +// Input RelExpr +// Projections ProjectionsExpr +// Passthrough ColSet +// } // // The Passthrough field is the private field. If no private field exists for // the operator, then privateField returns nil. @@ -451,7 +447,7 @@ func (m *metadata) hasUnexportedFields(define *lang.DefineExpr) bool { // should be taken when loading that field from an instance in order to pass it // elsewhere as a parameter (like to a function). For example: // -// f.ConstructUnion(union.Left, union.Right, &union.SetPrivate) +// f.ConstructUnion(union.Left, union.Right, &union.SetPrivate) // // The Left and Right fields are passed by value, but the SetPrivate is passed // by reference. @@ -465,9 +461,9 @@ func fieldLoadPrefix(typ *typeDef) string { // fieldStorePrefix is the inverse of fieldLoadPrefix, used when a value being // used as a parameter is stored into a field: // -// union.Left = left -// union.Right = right -// union.SetPrivate = *setPrivate +// union.Left = left +// union.Right = right +// union.SetPrivate = *setPrivate // // Since SetPrivate values are passed by reference, they must be dereferenced // before copying them to a target field. @@ -482,11 +478,11 @@ func fieldStorePrefix(typ *typeDef) string { // type should be taken when loading that field from an instance in order to // pass it as a dynamic parameter (like interface{} or opt.Expr). For example: // -// f.ConstructDynamic( -// project.Input, -// &project.Projections, -// &project.Passthrough, -// ) +// f.ConstructDynamic( +// project.Input, +// &project.Projections, +// &project.Passthrough, +// ) // // Note that normally the Projections and Passthrough fields would be passed by // value, but here their addresses are passed in order to avoid Go allocating an @@ -506,11 +502,10 @@ func dynamicFieldLoadPrefix(typ *typeDef) string { // in order to avoid an extra allocation when passing as an interface. For // example: // -// var val interface{} -// project.Input = val.(RelExpr) -// project.Filers = *val.(*FiltersExpr) -// project.Projections = *val.(*opt.ColSet) -// +// var val interface{} +// project.Input = val.(RelExpr) +// project.Filers = *val.(*FiltersExpr) +// project.Projections = *val.(*opt.ColSet) func castFromDynamicParam(param string, typ *typeDef) string { if typ.isInterface { // Interfaces are passed as interfaces for both param and dynamic param diff --git a/pkg/sql/opt/optgen/cmd/optgen/ops_gen.go b/pkg/sql/opt/optgen/cmd/optgen/ops_gen.go index 21cfc08584db..216e62931f27 100644 --- a/pkg/sql/opt/optgen/cmd/optgen/ops_gen.go +++ b/pkg/sql/opt/optgen/cmd/optgen/ops_gen.go @@ -130,7 +130,8 @@ func sortDefines(defines lang.DefineSetExpr) lang.DefineSetExpr { // dashCase converts camel-case identifiers into "dash case", where uppercase // letters in the middle of the identifier are replaced by a dash followed // by the lowercase version the letter. Example: -// InnerJoinApply => inner-join-apply +// +// InnerJoinApply => inner-join-apply func dashCase(s string) string { var buf bytes.Buffer for i, ch := range s { @@ -150,7 +151,8 @@ func dashCase(s string) string { // syntaxCase converts camel-case identifiers into "syntax case", where // uppercase letters in the middle of the identifier are interpreted as new // words and separated by a space from the previous word. Example: -// InnerJoinApply => INNER JOIN APPLY +// +// InnerJoinApply => INNER JOIN APPLY func syntaxCase(s string) string { var buf bytes.Buffer for i, ch := range s { diff --git a/pkg/sql/opt/optgen/cmd/optgen/rule_gen.go b/pkg/sql/opt/optgen/cmd/optgen/rule_gen.go index 926dd0126c73..c2870b58bec1 100644 --- a/pkg/sql/opt/optgen/cmd/optgen/rule_gen.go +++ b/pkg/sql/opt/optgen/cmd/optgen/rule_gen.go @@ -35,33 +35,33 @@ type contextDecl struct { // difference becomes even more pronounced when there are multiple nested // match expressions, such as in: // -// (InnerJoin -// (Select $input:* $filters:*) -// $right:* -// $on:* -// ) +// (InnerJoin +// (Select $input:* $filters:*) +// $right:* +// $on:* +// ) // // If the inner-join group has 3 expressions and the select group has 2 // expressions, then an exploration rule must consider 6 possible combinations. // It does this by generating a loop rather than an if statement (as in the // normalization case), similar to this: // -// var _member memo.RelExpr -// for _ord := 0; _ord < _state.end; _ord++ { -// if _member == nil { -// _member = _innerJoin.Left.FirstExpr() -// } else { -// _member = _member.NextExpr() -// } -// _select, _ := _member.(*SelectExpr) -// if _select != nil { +// var _member memo.RelExpr +// for _ord := 0; _ord < _state.end; _ord++ { +// if _member == nil { +// _member = _innerJoin.Left.FirstExpr() +// } else { +// _member = _member.NextExpr() +// } +// _select, _ := _member.(*SelectExpr) +// if _select != nil { // // If the join contained another match pattern, it would be another loop nested // within that loop. If this was a Normalize rule, then the code would look // like this instead: // -// _select := _innerJoin.Left.(*SelectExpr) -// if _select != nil { +// _select := _innerJoin.Left.(*SelectExpr) +// if _select != nil { // // ruleGen will also do a short-circuiting optimization designed to avoid // duplicate work for exploration rules. The *exploreState passed to each @@ -70,23 +70,22 @@ type contextDecl struct { // skipped. When this optimization is added to the above example, the code would // instead look more like this: // -// _partlyExplored := _innerJoinOrd < _innerJoinState.start -// _state := _e.lookupExploreState(_innerJoin.Left) -// var _member memo.RelExpr -// for _ord := 0; _ord < _state.end; _ord++ { -// if _member == nil { -// _member = _innerJoin.Left.FirstExpr() -// } else { -// _member = _member.NextExpr() -// } -// if !_partlyExplored || _ord >= _state.start { -// _select, _ := _member.(*SelectExpr) -// if _select != nil { +// _partlyExplored := _innerJoinOrd < _innerJoinState.start +// _state := _e.lookupExploreState(_innerJoin.Left) +// var _member memo.RelExpr +// for _ord := 0; _ord < _state.end; _ord++ { +// if _member == nil { +// _member = _innerJoin.Left.FirstExpr() +// } else { +// _member = _member.NextExpr() +// } +// if !_partlyExplored || _ord >= _state.start { +// _select, _ := _member.(*SelectExpr) +// if _select != nil { // // If the inner join expression has already been explored (i.e. if // _partlyExplored is true), then this logic only explores newly added Left // children. -// type newRuleGen struct { compiled *lang.CompiledExpr md *metadata @@ -188,16 +187,16 @@ func (g *newRuleGen) genRule(rule *lang.RuleExpr) { // that is currently being matched against. It also contains the type of that // expression. For example: // -// for i := range elems { -// _item := &elems[i] -// _eq := _item.(*memo.EqExpr) -// if _eq != nil { -// _const := _eq.Left.(*memo.ConstExpr) -// if _const != nil { -// ... -// } -// } -// } +// for i := range elems { +// _item := &elems[i] +// _eq := _item.(*memo.EqExpr) +// if _eq != nil { +// _const := _eq.Left.(*memo.ConstExpr) +// if _const != nil { +// ... +// } +// } +// } // // In that example, the context starts out as "elems", which is the top-level // Tuple operator field that's being list matched. Then, the context recursively @@ -286,13 +285,12 @@ func (g *newRuleGen) genMatch(match lang.Expr, context *contextDecl, noMatch boo // has any number of ListAnyOp children (produced by '...' syntax) arranged // around at most one non-ListAnyOp child. The following variants are possible: // -// match child in any position : [ ... ... ] -// match child in first position: [ ... ] -// match child in last position : [ ... ] -// match singleton list : [ ] -// match empty list : [ ] -// match any list : [ ... ] -// +// match child in any position : [ ... ... ] +// match child in first position: [ ... ] +// match child in last position : [ ... ] +// match singleton list : [ ] +// match empty list : [ ] +// match any list : [ ... ] func (g *newRuleGen) genMatchList(match *lang.ListExpr, context *contextDecl, noMatch bool) { // The list's type should already have been set by the Optgen compiler, and // should be the name of a list type. @@ -392,9 +390,9 @@ func (g *newRuleGen) genMatchList(match *lang.ListExpr, context *contextDecl, no // makeListItemRef returns a list item reference expression. Some list operators // inline children into the list slice, whereas others keep only pointers: // -// _item := &project.Projections[i] -// vs. -// _item := tuple.Elems[i] +// _item := &project.Projections[i] +// vs. +// _item := tuple.Elems[i] // // If the list item type has its own Optgen define, then it is a generated type, // and will be inlined in its owning list slice. Otherwise, the list slice is @@ -808,17 +806,16 @@ func (g *newRuleGen) genExploreReplace(define *lang.DefineExpr, rule *lang.RuleE // bound to variables. Those variables can be used when constructing other parts // of the result tree. For example: // -// (InnerJoin $left:* $right:* $on:*) -// => -// (InnerJoin -// $varname:(SomeFunc $left) -// $varname2:(Select $varname (SomeOtherFunc $right)) -// (MakeOn $varname $varname2) -// ) -// -// varname := _f.funcs.SomeFunc(left) -// varname2 := _f.ConstructSelect(varname, _f.funcs.SomeOtherFunc(right)) +// (InnerJoin $left:* $right:* $on:*) +// => +// (InnerJoin +// $varname:(SomeFunc $left) +// $varname2:(Select $varname (SomeOtherFunc $right)) +// (MakeOn $varname $varname2) +// ) // +// varname := _f.funcs.SomeFunc(left) +// varname2 := _f.ConstructSelect(varname, _f.funcs.SomeOtherFunc(right)) func (g *newRuleGen) genBoundStatements(e lang.Expr) { var visitFunc lang.VisitFunc visitFunc = func(e lang.Expr) lang.Expr { @@ -1026,11 +1023,10 @@ func (g *newRuleGen) genCustomFunc(customFunc *lang.CustomFuncExpr) { // genConstructList generates code to construct an interned list of items: // -// ProjectionsList{ -// _f.ConstructProjectionsItem(elem, 1), -// _f.ConstructProjectionsItem(elem2, 2), -// } -// +// ProjectionsList{ +// _f.ConstructProjectionsItem(elem, 1), +// _f.ConstructProjectionsItem(elem2, 2), +// } func (g *newRuleGen) genConstructList(list *lang.ListExpr) { // The list's type should already have been set by the Optgen compiler, and // should be the name of a list type. diff --git a/pkg/sql/opt/optgen/cmd/optgen/testdata/factory b/pkg/sql/opt/optgen/cmd/optgen/testdata/factory index 9c548f600cdb..d8d87ab19cb2 100644 --- a/pkg/sql/opt/optgen/cmd/optgen/testdata/factory +++ b/pkg/sql/opt/optgen/cmd/optgen/testdata/factory @@ -200,14 +200,14 @@ func (_f *Factory) ConstructKVOptionsItem( // ancestors via a call to the corresponding factory Construct methods. Here // is example usage: // -// var replace func(e opt.Expr) opt.Expr -// replace = func(e opt.Expr) opt.Expr { -// if e.Op() == opt.VariableOp { -// return getReplaceVar(e) -// } -// return factory.Replace(e, replace) -// } -// replace(root, replace) +// var replace func(e opt.Expr) opt.Expr +// replace = func(e opt.Expr) opt.Expr { +// if e.Op() == opt.VariableOp { +// return getReplaceVar(e) +// } +// return factory.Replace(e, replace) +// } +// replace(root, replace) // // Here, all variables in the tree are being replaced by some other expression // in a pre-order traversal of the tree. Post-order traversal is trivially @@ -547,14 +547,14 @@ SKIP_RULES: // ancestors via a call to the corresponding factory Construct methods. Here // is example usage: // -// var replace func(e opt.Expr) opt.Expr -// replace = func(e opt.Expr) opt.Expr { -// if e.Op() == opt.VariableOp { -// return getReplaceVar(e) -// } -// return factory.Replace(e, replace) -// } -// replace(root, replace) +// var replace func(e opt.Expr) opt.Expr +// replace = func(e opt.Expr) opt.Expr { +// if e.Op() == opt.VariableOp { +// return getReplaceVar(e) +// } +// return factory.Replace(e, replace) +// } +// replace(root, replace) // // Here, all variables in the tree are being replaced by some other expression // in a pre-order traversal of the tree. Post-order traversal is trivially @@ -704,14 +704,14 @@ SKIP_RULES: // ancestors via a call to the corresponding factory Construct methods. Here // is example usage: // -// var replace func(e opt.Expr) opt.Expr -// replace = func(e opt.Expr) opt.Expr { -// if e.Op() == opt.VariableOp { -// return getReplaceVar(e) -// } -// return factory.Replace(e, replace) -// } -// replace(root, replace) +// var replace func(e opt.Expr) opt.Expr +// replace = func(e opt.Expr) opt.Expr { +// if e.Op() == opt.VariableOp { +// return getReplaceVar(e) +// } +// return factory.Replace(e, replace) +// } +// replace(root, replace) // // Here, all variables in the tree are being replaced by some other expression // in a pre-order traversal of the tree. Post-order traversal is trivially @@ -898,14 +898,14 @@ SKIP_RULES: // ancestors via a call to the corresponding factory Construct methods. Here // is example usage: // -// var replace func(e opt.Expr) opt.Expr -// replace = func(e opt.Expr) opt.Expr { -// if e.Op() == opt.VariableOp { -// return getReplaceVar(e) -// } -// return factory.Replace(e, replace) -// } -// replace(root, replace) +// var replace func(e opt.Expr) opt.Expr +// replace = func(e opt.Expr) opt.Expr { +// if e.Op() == opt.VariableOp { +// return getReplaceVar(e) +// } +// return factory.Replace(e, replace) +// } +// replace(root, replace) // // Here, all variables in the tree are being replaced by some other expression // in a pre-order traversal of the tree. Post-order traversal is trivially @@ -1158,14 +1158,14 @@ SKIP_RULES: // ancestors via a call to the corresponding factory Construct methods. Here // is example usage: // -// var replace func(e opt.Expr) opt.Expr -// replace = func(e opt.Expr) opt.Expr { -// if e.Op() == opt.VariableOp { -// return getReplaceVar(e) -// } -// return factory.Replace(e, replace) -// } -// replace(root, replace) +// var replace func(e opt.Expr) opt.Expr +// replace = func(e opt.Expr) opt.Expr { +// if e.Op() == opt.VariableOp { +// return getReplaceVar(e) +// } +// return factory.Replace(e, replace) +// } +// replace(root, replace) // // Here, all variables in the tree are being replaced by some other expression // in a pre-order traversal of the tree. Post-order traversal is trivially @@ -1478,14 +1478,14 @@ SKIP_RULES: // ancestors via a call to the corresponding factory Construct methods. Here // is example usage: // -// var replace func(e opt.Expr) opt.Expr -// replace = func(e opt.Expr) opt.Expr { -// if e.Op() == opt.VariableOp { -// return getReplaceVar(e) -// } -// return factory.Replace(e, replace) -// } -// replace(root, replace) +// var replace func(e opt.Expr) opt.Expr +// replace = func(e opt.Expr) opt.Expr { +// if e.Op() == opt.VariableOp { +// return getReplaceVar(e) +// } +// return factory.Replace(e, replace) +// } +// replace(root, replace) // // Here, all variables in the tree are being replaced by some other expression // in a pre-order traversal of the tree. Post-order traversal is trivially diff --git a/pkg/sql/opt/optgen/cmd/optgen/utils.go b/pkg/sql/opt/optgen/cmd/optgen/utils.go index 9cc58b264433..da9a62084b7a 100644 --- a/pkg/sql/opt/optgen/cmd/optgen/utils.go +++ b/pkg/sql/opt/optgen/cmd/optgen/utils.go @@ -61,8 +61,9 @@ func expandFields(compiled *lang.CompiledExpr, define *lang.DefineExpr) lang.Def // converting an Optgen comment to a Go comment. The comments are assumed to // start with the name of an op or field and follow with a description, similar // to this: -// # -// # ... +// +// # +// # ... // // The initial name is replaced with the given replaceName, in order to adapt // it to different enums and structs that are generated. diff --git a/pkg/sql/opt/optgen/exprgen/custom_funcs.go b/pkg/sql/opt/optgen/exprgen/custom_funcs.go index 50e32797d51b..a52da41989f7 100644 --- a/pkg/sql/opt/optgen/exprgen/custom_funcs.go +++ b/pkg/sql/opt/optgen/exprgen/custom_funcs.go @@ -184,11 +184,11 @@ func (c *customFuncs) substituteCols(str string) string { // evaluated in order, and we want to be able to refer to the lookup columns in // the ON expression. For example: // -// (MakeLookupJoin -// (Scan [ (Table "def") (Cols "d,e") ]) -// [ (JoinType "left-join") (Table "abc") (Index "abc@ab") (KeyCols "a") (Cols "a,b") ] -// [ (Gt (Var "a") (Var "e")) ] -// ) +// (MakeLookupJoin +// (Scan [ (Table "def") (Cols "d,e") ]) +// [ (JoinType "left-join") (Table "abc") (Index "abc@ab") (KeyCols "a") (Cols "a,b") ] +// [ (Gt (Var "a") (Var "e")) ] +// ) // // If the order of the last two was swapped, we wouldn't be able to look up // column a. @@ -228,11 +228,12 @@ func (c *customFuncs) NoOrdering() props.OrderingChoice { // Root can be used only at the top level on an expression, to annotate the // root with a presentation and/or required ordering. The operator must be able // to provide the ordering. For example: -// (Root -// ( ... ) -// (Presentation "a,b") -// (OrderingChoice "+a") -// ) +// +// (Root +// ( ... ) +// (Presentation "a,b") +// (OrderingChoice "+a") +// ) func (c *customFuncs) Root( root memo.RelExpr, presentation physical.Presentation, ordering props.OrderingChoice, ) *rootSentinel { diff --git a/pkg/sql/opt/optgen/exprgen/expr_gen.go b/pkg/sql/opt/optgen/exprgen/expr_gen.go index 8d51a1a88520..1759cd5e7e99 100644 --- a/pkg/sql/opt/optgen/exprgen/expr_gen.go +++ b/pkg/sql/opt/optgen/exprgen/expr_gen.go @@ -36,43 +36,44 @@ import ( // // For example, if the input is "(Eq (Const 1) (Const 2))", the output is the // corresponding expression tree: -// eq [type=bool] -// ├── const: 1 [type=int] -// └── const: 2 [type=int] +// +// eq [type=bool] +// ├── const: 1 [type=int] +// └── const: 2 [type=int] // // There are some peculiarities compared to the usual opt-gen replace syntax: // -// - Filters are specified as simply [ ... ]; no FiltersItem is -// necessary. +// - Filters are specified as simply [ ... ]; no FiltersItem is +// necessary. // -// - Various implicit conversions are allowed for convenience, e.g. list of -// columns to ColList/ColSet. +// - Various implicit conversions are allowed for convenience, e.g. list of +// columns to ColList/ColSet. // -// - Operation privates (e.g. ScanPrivate) are specified as lists of fields -// of the form [ (FiledName ) ]. For example: -// [ (Table "abc") (Index "abc@ab") (Cols "a,b") ] -// Implicit conversions are allowed here for column lists, orderings, etc. +// - Operation privates (e.g. ScanPrivate) are specified as lists of fields +// of the form [ (FiledName ) ]. For example: +// [ (Table "abc") (Index "abc@ab") (Cols "a,b") ] +// Implicit conversions are allowed here for column lists, orderings, etc. // -// - A Root custom function is used to set the physical properties of the root. -// Setting the physical properties (in particular the presentation) is always -// necessary for the plan to be run via PREPARE .. AS OPT PLAN '..'. +// - A Root custom function is used to set the physical properties of the root. +// Setting the physical properties (in particular the presentation) is always +// necessary for the plan to be run via PREPARE .. AS OPT PLAN '..'. // // Some examples of valid inputs: -// (Tuple [ (True) (False) ] "tuple{bool, bool}" ) // -// (Root -// (Scan [ (Table "abc") (Index "abc@ab") (Cols "a,b") ]) -// (Presentation "a,b") -// (OrderingChoice "+a,+b") -// ) +// (Tuple [ (True) (False) ] "tuple{bool, bool}" ) // -// (Select -// (Scan [ (Table "abc") (Cols "a,b,c") ]) -// [ (Eq (Var "a") (Const 1)) ] -// ) +// (Root +// (Scan [ (Table "abc") (Index "abc@ab") (Cols "a,b") ]) +// (Presentation "a,b") +// (OrderingChoice "+a,+b") +// ) // -// For more examples, see the various testdata/ files. +// (Select +// (Scan [ (Table "abc") (Cols "a,b,c") ]) +// [ (Eq (Var "a") (Const 1)) ] +// ) // +// For more examples, see the various testdata/ files. func Build(catalog cat.Catalog, factory *norm.Factory, input string) (_ opt.Expr, err error) { return buildAndOptimize(catalog, nil /* optimizer */, factory, input) } diff --git a/pkg/sql/opt/optgen/exprgen/private.go b/pkg/sql/opt/optgen/exprgen/private.go index 5f925bacd8b7..546d1b630910 100644 --- a/pkg/sql/opt/optgen/exprgen/private.go +++ b/pkg/sql/opt/optgen/exprgen/private.go @@ -28,16 +28,17 @@ import ( ) // evalPrivate evaluates a list of the form -// [ (FieldName ) ... ] +// +// [ (FieldName ) ... ] +// // into an operation private of the given type (e.g. ScanPrivate, etc). // // Various implicit conversions are supported. Examples: -// - table ID: "table" -// - index ordinal: "table@index" -// - column lists or sets: "a,b,c" -// - orderings and ordering choices: "+a,-b" -// - operators: "inner-join" -// +// - table ID: "table" +// - index ordinal: "table@index" +// - column lists or sets: "a,b,c" +// - orderings and ordering choices: "+a,-b" +// - operators: "inner-join" func (eg *exprGen) evalPrivate(privType reflect.Type, expr lang.Expr) interface{} { if expr.Op() != lang.ListOp { panic(errorf("private must be a list of the form [ (FieldName Value) ... ]")) diff --git a/pkg/sql/opt/optgen/lang/compiler.go b/pkg/sql/opt/optgen/lang/compiler.go index 4dcaf7368193..863abf42a980 100644 --- a/pkg/sql/opt/optgen/lang/compiler.go +++ b/pkg/sql/opt/optgen/lang/compiler.go @@ -55,8 +55,9 @@ func (c *CompiledExpr) LookupMatchingDefines(name string) DefineSetExpr { // LookupMatchingRules returns the set of rules that match the given opname at // the top-level, or nil if none do. For example, "InnerJoin" would match this // rule: -// [CommuteJoin] -// (InnerJoin $r:* $s:*) => (InnerJoin $s $r) +// +// [CommuteJoin] +// (InnerJoin $r:* $s:*) => (InnerJoin $s $r) func (c *CompiledExpr) LookupMatchingRules(name string) RuleSetExpr { return c.matchIndex[name] } @@ -748,10 +749,11 @@ func (c *ruleContentCompiler) compileOpName(fn *FuncExpr) (_ Expr, ok bool) { // addDisallowedErr creates an error prefixed by one of the following strings, // depending on the context: -// match pattern -// replace pattern -// custom match function -// custom replace function +// +// match pattern +// replace pattern +// custom match function +// custom replace function func (c *ruleContentCompiler) addDisallowedErr(loc Expr, disallowed string) { if c.matchPattern { if c.customFunc { diff --git a/pkg/sql/opt/optgen/lang/data_type.go b/pkg/sql/opt/optgen/lang/data_type.go index 5773e086cc1a..0c685527477b 100644 --- a/pkg/sql/opt/optgen/lang/data_type.go +++ b/pkg/sql/opt/optgen/lang/data_type.go @@ -15,7 +15,7 @@ import "bytes" // AnyDataType is a data type about which nothing is known, and so could be any // data type. Among other uses, it is assigned to custom functions: // -// (Scan $def:*) => (ConstrainScan $def) +// (Scan $def:*) => (ConstrainScan $def) // // The ConstrainScan custom function has the AnyDataType assigned to it, since // the return type of the function is not known. @@ -24,7 +24,7 @@ var AnyDataType = &ExternalDataType{Name: ""} // ListDataType indicates that a pattern matches or constructs a list of // expressions. For example: // -// (Tuple $list:[ $item:* ]) => $item +// (Tuple $list:[ $item:* ]) => $item // // The $list binding will have the ListDataType. var ListDataType = &ExternalDataType{Name: ""} @@ -46,7 +46,7 @@ type DataType interface { // DefineSetDataType indicates that a pattern matches or constructs one of // several possible defined operators. For example: // -// (Eq | Ne $left:* $right:*) => (True) +// (Eq | Ne $left:* $right:*) => (True) // // The top-level match pattern would have a DefineSetDataType that referenced // the defines for the Eq and Ne operators. @@ -78,11 +78,11 @@ func (d *DefineSetDataType) String() string { // ExternalDataType indicates that a pattern matches or constructs a non- // operator type referenced in a Define. For example: // -// define Scan { -// Def ScanDef -// } +// define Scan { +// Def ScanDef +// } // -// (Scan $def:*) => (ConstrainScan $def) +// (Scan $def:*) => (ConstrainScan $def) // // Here, $def will have an ExternalDataType with Name equal to "ScanDef". type ExternalDataType struct { diff --git a/pkg/sql/opt/optgen/lang/doc.go b/pkg/sql/opt/optgen/lang/doc.go index 7164d28c352f..e705d051b5ae 100644 --- a/pkg/sql/opt/optgen/lang/doc.go +++ b/pkg/sql/opt/optgen/lang/doc.go @@ -14,13 +14,13 @@ generator". Optgen is a domain-specific language (DSL) that provides an intuitive syntax for defining, matching, and replacing nodes in a target expression tree. Here is an example: - [NormalizeEq] - (Eq - $left:^(Variable) - $right:(Variable) - ) - => - (Eq $right $left) + [NormalizeEq] + (Eq + $left:^(Variable) + $right:(Variable) + ) + => + (Eq $right $left) The expression above the arrow is called the "match pattern" and the expression below the arrow is called the "replace pattern". If a node in the target @@ -33,15 +33,15 @@ definition names and describes one of the nodes that the target expression tree may contain. Match and replace patterns can recognize and construct these nodes. Here is an example: - define Eq { - Left Expr - Right Expr - } + define Eq { + Left Expr + Right Expr + } The following sections provide more detail on the Optgen language syntax and semantics, as well as some implementation notes. -Definitions +# Definitions Optgen language input files may contain any number of definitions, in any order. Each definition describes a node in the target expression tree. A @@ -57,21 +57,21 @@ other operator or group of operators. Here is the syntax for an operator definition: - define { - - - ... - } + define { + + + ... + } And here is an example: - define Join { - Left Expr - Right Expr - On Expr - } + define Join { + Left Expr + Right Expr + On Expr + } -Definition Tags +# Definition Tags A "definition tag" is an opaque identifier that describes some property of the defined node. Definitions can have multiple tags or no tags at all, and the @@ -83,19 +83,19 @@ Names" section). Here is the definition tagging syntax: - [, , ...] - define { - } + [, , ...] + define { + } And here is an example: - [Comparison, Inequality] - define Lt { - Left Expr - Right Expr - } + [Comparison, Inequality] + define Lt { + Left Expr + Right Expr + } -Rules +# Rules Optgen language input files may contain any number of rules, in any order. Each rule has a unique name and consists of a match pattern and a corresponding @@ -109,7 +109,6 @@ or low priority as it is depicted below: [InlineConstVar, Normalize, HighPriority] - Note that this is just a conceptual description. Optgen does not actually do any of this matching or replacing itself. Other components use the Optgen library to generate code. These components are free to match however they want, @@ -122,37 +121,37 @@ generator. Here is the partial rule syntax (see Syntax section for full syntax): - [, , , ...] - ( - - - ... - ) - => - ( - - - ... - ) - -Match Patterns + [, , , ...] + ( + + + ... + ) + => + ( + + + ... + ) + +# Match Patterns The top-level match pattern matches the name and children of one or more nodes in the target expression tree. For example: - (Eq * *) + (Eq * *) The "*" character is the "wildcard matcher", which matches a child of any kind. Therefore, this pattern matches any node named "Eq" that has at least two children. Matchers can be nested within one another in order to match children, grandchildren, etc. For example: - (Eq (Variable) (Const)) + (Eq (Variable) (Const)) This pattern matches an "Eq" node with a "Variable" node as its left child and a "Const" node as its right child. -Binding +# Binding Child patterns within match and replace patterns can be "bound" to a named variable. These variables can then be referenced later in the match pattern or @@ -160,15 +159,15 @@ in the replace pattern. This is a critical part of the Optgen language, since virtually every pattern constructs its replacement pattern based on parts of the match pattern. For example: - [EliminateSelect] - (Select $input:* (True)) => $input + [EliminateSelect] + (Select $input:* (True)) => $input The $input variable is bound to the first child of the "Select" node. If the second child is a "True" node, then the "Select" node will be replaced by its input. Variables can also be passed as arguments to custom matchers, which are described below. -Let Expression +# Let Expression A let expression can be used for binding multiple variables to the result of a custom function with multiple return values. This expression consists of two @@ -178,14 +177,14 @@ variable reference which is the value of the expression when evaluated. For example: - [SplitSelect] - (Select - $input:* - $filters:* & - (Let ($filterA $filterB $ok):(SplitFilters $filters) $ok) - ) - => - (Select (Select $input $filterA) $filterB) + [SplitSelect] + (Select + $input:* + $filters:* & + (Let ($filterA $filterB $ok):(SplitFilters $filters) $ok) + ) + => + (Select (Select $input $filterA) $filterB) The "($filtersA $filtersB $ok):(SplitFilters $filters)" part indicates that $filtersA $filtersB and $ok are bound to the three return values of @@ -193,33 +192,33 @@ $filtersA $filtersB and $ok are bound to the three return values of A let expression can also be used in a replace pattern. For example: - [AlterSelect] - (Select $input:* $filters:*) - => - (Select - (Let ($newInput $newFilters):(AlterSelect $input $filters) $newInput) - $newFilters - ) + [AlterSelect] + (Select $input:* $filters:*) + => + (Select + (Let ($newInput $newFilters):(AlterSelect $input $filters) $newInput) + $newFilters + ) -Matching Names +# Matching Names In addition to simple name matching, a node matcher can match tag names. Any node type which has the named tag is matched. For example: - [Inequality] - define Lt { - Left Expr - Right Expr - } + [Inequality] + define Lt { + Left Expr + Right Expr + } - [Inequality] - define Gt - { - Left Expr - Right Expr - } + [Inequality] + define Gt + { + Left Expr + Right Expr + } - (Inequality (Variable) (Const)) + (Inequality (Variable) (Const)) This pattern matches either "Lt" or "Gt" nodes. This is useful for writing patterns that apply to multiple kinds of nodes, without need for duplicate @@ -229,87 +228,87 @@ The node matcher also supports multiple names in the match list, separated by '|' characters. The node's name is allowed to match any of the names in the list. For example: - (Eq | Ne | Inequality) + (Eq | Ne | Inequality) This pattern matches "Eq", "Ne", "Lt", or "Gt" nodes. -Matching Primitive Types +# Matching Primitive Types String and numeric constant nodes in the tree can be matched against literals. A literal string or number in a match pattern is interpreted as a matcher of that type, and will be tested for equality with the child node. For example: - [EliminateConcat] - (Concat $left:* (Const "")) => $left + [EliminateConcat] + (Concat $left:* (Const "")) => $left If Concat's right operand is a constant expression with the empty string as its value, then the pattern matches. Similarly, a constant numeric expression can be matched like this: - [LimitScan] - (Limit (Scan $def:*) (Const 1)) => (ScanOneRow $def) + [LimitScan] + (Limit (Scan $def:*) (Const 1)) => (ScanOneRow $def) -Matching Lists +# Matching Lists Nodes can have a child that is a list of nodes rather than a single node. As an example, a function call node might have two children: the name of the function and the list of arguments to the function: - define FuncCall { - Name Expr - Args ExprList - } + define FuncCall { + Name Expr + Args ExprList + } There are several kinds of list matchers, each of which uses a variant of the list matching bracket syntax. The ellipses signify that 0 or more items can match at either the beginning or end of the list. The item pattern can be any legal match pattern, and can be bound to a variable. - [ ... ... ] + [ ... ... ] - ANY: Matches if any item in the list matches the item pattern. If multiple items match, then the list matcher binds to the first match. - [ ... $item:* ... ] + [ ... $item:* ... ] - FIRST: Matches if the first item in the list matches the item pattern (and there is at least one item in the list). - [ $item:* ... ] + [ $item:* ... ] - LAST: Matches if the last item in the list matches the item pattern (and there is at least one item). - [ ... $item:* ] + [ ... $item:* ] - SINGLE: Matches if there is exactly one item in the list, and it matches the item pattern. - [ $item:* ] + [ $item:* ] - EMPTY: Matches if there are zero items in the list. - [] + [] Following is a more complete example. The ANY list matcher in the example searches through the Filter child's list, looking for a Subquery node. If a matching node is found, then the list matcher succeeds and binds the node to the $item variable. - (Select - $input:* - (Filter [ ... $item:(Subquery) ... ]) - ) + (Select + $input:* + (Filter [ ... $item:(Subquery) ... ]) + ) -Custom Matching +# Custom Matching When the supported matching syntax is insufficient, Optgen provides an escape mechanism. Custom matching functions can invoke Go functions, passing previously bound variables as arguments, and checking the boolean result for a match. For example: - [EliminateFilters] - (Filters $items:* & (IsEmptyList $items)) => (True) + [EliminateFilters] + (Filters $items:* & (IsEmptyList $items)) => (True) This pattern passes the $items child node to the IsEmptyList function. If that returns true, then the pattern matches. @@ -319,33 +318,33 @@ be combined with other matchers using boolean operators (see the Boolean Expressions section for more details). While variable references are the most common argument, it is also legal to nest function invocations: - (Project - $input:* - $projections:* & ^(IsEmpty (FindUnusedColumns $projections)) - ) + (Project + $input:* + $projections:* & ^(IsEmpty (FindUnusedColumns $projections)) + ) -Boolean Expressions +# Boolean Expressions Multiple match expressions of any type can be combined using the boolean & (AND) operator. All must match in order for the overall match to succeed: - (Not - $input:(Comparison) & (Inequality) & (CanInvert $input) - ) + (Not + $input:(Comparison) & (Inequality) & (CanInvert $input) + ) The boolean ^ (NOT) operator negates the result of a boolean match expression. It can be used with any kind of matcher, including custom match functions: - (JoinApply - $left:^(Select) - $right:* & ^(IsCorrelated $right $left) - $on:* - ) + (JoinApply + $left:^(Select) + $right:* & ^(IsCorrelated $right $left) + $on:* + ) This pattern matches only if the left child is not a Select node, and if the IsCorrelated custom function returns false. -Replace Patterns +# Replace Patterns Once a matching node is found, the replace pattern produces a single substitution node. The most common replace pattern involves constructing one or @@ -354,17 +353,17 @@ A construction expression specifies the name of the node as its first operand and its children as subsequent arguments. Construction expressions can be nested within one another to any depth. For example: - [HoistSelectExists] - (Select - $input:* - $filter:(Exists $subquery:*) - ) - => - (SemiJoinApply - $input - $subquery - (True) - ) + [HoistSelectExists] + (Select + $input:* + $filter:(Exists $subquery:*) + ) + => + (SemiJoinApply + $input + $subquery + (True) + ) The replace pattern in this rule constructs a new SemiJoinApply node, with its first two children bound in the match pattern. The third child is a newly @@ -373,27 +372,27 @@ constructed True node. The replace pattern can also consist of a single variable reference, in the case where the substitution node was already present in the match pattern: - [EliminateAnd] - (And $left:* (True)) => $left + [EliminateAnd] + (And $left:* (True)) => $left -Custom Construction +# Custom Construction When Optgen syntax cannot easily produce a result, custom construction functions allow results to be derived in Go code. If a construction expression's name is not recognized as a node name, then it is assumed to be the name of a custom function. For example: - [MergeSelectJoin] - (Select - (InnerJoin $r:* $s:* $on:*) - $filter:* - ) - => - (InnerJoin - $r - $s - (ConcatFilters $on $filter) - ) + [MergeSelectJoin] + (Select + (InnerJoin $r:* $s:* $on:*) + $filter:* + ) + => + (InnerJoin + $r + $s + (ConcatFilters $on $filter) + ) Here, the ConcatFilters custom function is invoked in order to concatenate two filter lists together. Function parameters can include nodes, lists (see the @@ -402,38 +401,38 @@ and the results of nested custom function calls. While custom functions typically return a node, they can return other types if they are parameters to other custom functions. -Constructing Lists +# Constructing Lists Lists can be constructed and passed as parameters to node construction expressions or custom replace functions. A list consists of multiple items that can be of any parameter type, including nodes, strings, custom function invocations, or lists. Here is an example: - [MergeSelectJoin] - (Select - (InnerJoin $left:* $right:* $on:*) - $filters:* - ) - => - (InnerJoin - $left - $right - (And [$on $filters]) - ) + [MergeSelectJoin] + (Select + (InnerJoin $left:* $right:* $on:*) + $filters:* + ) + => + (InnerJoin + $left + $right + (And [$on $filters]) + ) -Dynamic Construction +# Dynamic Construction Sometimes the name of a constructed node can be one of several choices. The built-in "OpName" function can be used to dynamically construct the right kind of node. For example: - [NormalizeVar] - (Eq | Ne - $left:^(Variable) - $right:(Variable) - ) - => - ((OpName) $right $left) + [NormalizeVar] + (Eq | Ne + $left:^(Variable) + $right:(Variable) + ) + => + ((OpName) $right $left) In this pattern, the name of the constructed result is either Eq or Ne, depending on which is matched. When the OpName function has no arguments, then @@ -441,39 +440,39 @@ it is bound to the name of the node matched at the top-level. The OpName function can also take a single variable reference argument. In that case, it refers to the name of the node bound to that variable: - [PushDownSelect] - (Select - $input:(Join $left:* $right:* $on:*) - $filter:* & ^(IsCorrelated $filter $right) - ) - => - ((OpName $input) - (Select $left $filter) - $right - $on - ) + [PushDownSelect] + (Select + $input:(Join $left:* $right:* $on:*) + $filter:* & ^(IsCorrelated $filter $right) + ) + => + ((OpName $input) + (Select $left $filter) + $right + $on + ) In this pattern, Join is a tag that refers to a group of nodes. The replace expression will construct a node having the same name as the matched join node. -Name Parameters +# Name Parameters The OpName built-in function can also be a parameter to a custom match or replace function which needs to know which name matched. For example: - [FoldBinaryNull] - (Binary $left:* (Null) & ^(IsCalledOnNullInput (OpName))) - => - (Null) + [FoldBinaryNull] + (Binary $left:* (Null) & ^(IsCalledOnNullInput (OpName))) + => + (Null) The name of the matched Binary node (e.g. Plus, In, Contains) is passed to the IsCalledOnNullInput function as a symbolic identifier. Here is an example that uses a custom replace function and the OpName function with an argument: - [NegateComparison] - (Not $input:(Comparison $left:* $right:*)) - => - (InvertComparison (OpName $input) $left $right) + [NegateComparison] + (Not $input:(Comparison $left:* $right:*)) + => + (InvertComparison (OpName $input) $left $right) As described in the previous section, adding the argument enables OpName to return a name that was matched deeper in the pattern. @@ -482,26 +481,26 @@ In addition to a name returned by the OpName function, custom match and replace functions can accept literal operator names as parameters. The Minus operator name is passed as a parameter to two functions in this example: - [FoldMinus] - (UnaryMinus - (Minus $left $right) & (OverloadExists Minus $right $left) - ) - => - (ConstructBinary Minus $right $left) + [FoldMinus] + (UnaryMinus + (Minus $left $right) & (OverloadExists Minus $right $left) + ) + => + (ConstructBinary Minus $right $left) -Type Inference +# Type Inference Expressions in both the match and replace patterns are assigned a data type that describes the kind of data that will be returned by the expression. These types are inferred using a combination of top-down and bottom-up type inference rules. For example: - define Select { - Input Expr - Filter Expr - } + define Select { + Input Expr + Filter Expr + } - (Select $input:(LeftJoin | RightJoin) $filter:*) => $input + (Select $input:(LeftJoin | RightJoin) $filter:*) => $input The type of $input is inferred as "LeftJoin | RightJoin" by bubbling up the type of the bound expression. That type is propagated to the $input reference in the @@ -513,7 +512,7 @@ When multiple types are inferred for an expression using different type inference rules, the more restrictive type is assigned to the expression. For example: - (Select $input:* & (LeftJoin)) => $input + (Select $input:* & (LeftJoin)) => $input Here, the left input to the And expression was inferred to have type "Expr" and the right input to have type "LeftJoin". Since "LeftJoin" is the more @@ -523,12 +522,12 @@ restrictive type, the And expression and the $input binding are typed as Type inference detects and reports type contradictions, which occur when multiple incompatible types are inferred for an expression. For example: - (Select $input:(InnerJoin) & (LeftJoin)) => $input + (Select $input:(InnerJoin) & (LeftJoin)) => $input Because the input cannot be both an InnerJoin and a LeftJoin, Optgen reports a type contradiction error. -Syntax +# Syntax This section describes the Optgen language syntax in a variant of extended Backus-Naur form. The non-terminals correspond to methods in the parser. The @@ -536,49 +535,49 @@ terminals correspond to tokens returned by the scanner. Whitespace and comment tokens can be freely interleaved between other tokens in the grammar. - root = tags (define | rule) - tags = '[' IDENT (',' IDENT)* ']' - - define = 'define' define-name '{' define-field* '}' - define-name = IDENT - define-field = field-name field-type - field-name = IDENT - field-type = IDENT - - rule = func '=>' replace - match = func - replace = func | ref - func = '(' func-name arg* ')' - func-name = names | func - names = name ('|' name)* - arg = bind and | ref | and - and = expr ('&' and) - expr = func | not | let | list | any | name | STRING | NUMBER - not = '^' expr - list = '[' list-child* ']' - list-child = list-any | arg - list-any = '...' - bind = '$' label ':' and - let = '(' 'Let' '(' '$' label ('$' label)* ')' ':' func ref ')' - ref = '$' label - any = '*' - name = IDENT - label = IDENT + root = tags (define | rule) + tags = '[' IDENT (',' IDENT)* ']' + + define = 'define' define-name '{' define-field* '}' + define-name = IDENT + define-field = field-name field-type + field-name = IDENT + field-type = IDENT + + rule = func '=>' replace + match = func + replace = func | ref + func = '(' func-name arg* ')' + func-name = names | func + names = name ('|' name)* + arg = bind and | ref | and + and = expr ('&' and) + expr = func | not | let | list | any | name | STRING | NUMBER + not = '^' expr + list = '[' list-child* ']' + list-child = list-any | arg + list-any = '...' + bind = '$' label ':' and + let = '(' 'Let' '(' '$' label ('$' label)* ')' ':' func ref ')' + ref = '$' label + any = '*' + name = IDENT + label = IDENT Here are the pseudo-regex definitions for the lexical tokens that aren't represented as single-quoted strings above: - STRING = " [^"\n]* " - NUMBER = UnicodeDigit+ - IDENT = (UnicodeLetter | '_') (UnicodeLetter | '_' | UnicodeNumber)* - COMMENT = '#' .* \n - WHITESPACE = UnicodeSpace+ + STRING = " [^"\n]* " + NUMBER = UnicodeDigit+ + IDENT = (UnicodeLetter | '_') (UnicodeLetter | '_' | UnicodeNumber)* + COMMENT = '#' .* \n + WHITESPACE = UnicodeSpace+ The support directory contains syntax coloring files for several editors, including Vim, TextMate, and Visual Studio Code. JetBrains editor (i.e. GoLand) can also import TextMate bundles to provide syntax coloring. -Components +# Components The lang package contains a scanner that breaks input files into lexical tokens, a parser that assembles an abstract syntax tree (AST) from the tokens, diff --git a/pkg/sql/opt/optgen/lang/expr.go b/pkg/sql/opt/optgen/lang/expr.go index 79101755cf2a..210877fe8932 100644 --- a/pkg/sql/opt/optgen/lang/expr.go +++ b/pkg/sql/opt/optgen/lang/expr.go @@ -183,9 +183,9 @@ func (e RuleSetExpr) Sort(less func(left, right *RuleExpr) bool) { // construct several different operators; which it constructs is not known until // runtime. For example: // -// (Select $input:(Left | InnerJoin $left:* $right:* $on)) -// => -// ((OpName $input) $left $right $on) +// (Select $input:(Left | InnerJoin $left:* $right:* $on)) +// => +// ((OpName $input) $left $right $on) // // The replace pattern uses a constructor function that dynamically constructs // either a Left or InnerJoin operator. diff --git a/pkg/sql/opt/ordering/doc.go b/pkg/sql/opt/ordering/doc.go index 639c44f0c25d..244127609add 100644 --- a/pkg/sql/opt/ordering/doc.go +++ b/pkg/sql/opt/ordering/doc.go @@ -16,7 +16,7 @@ their children, etc. The package provides generic APIs that can be called on any RelExpr, as well as operator-specific APIs in some cases. -Required orderings +# Required orderings A Required ordering is part of the physical properties with respect to which an expression was optimized. It effectively describes a set of orderings, any of @@ -30,7 +30,7 @@ way. This package implements the logic that decides whether each operator can provide a Required ordering, as well as what Required orderings on its input(s) are necessary. -Provided orderings +# Provided orderings In a single-node serial execution model, the Required ordering would be sufficient to configure execution. But in a distributed setting, even if an @@ -41,7 +41,9 @@ single node. We must know exactly what order must be maintained on the streams (i.e. along which columns we should perform the comparisons). Consider a Scan operator that is scanning an index on a,b. In query: - SELECT a, b FROM abc ORDER BY a, b + + SELECT a, b FROM abc ORDER BY a, b + the Scan has Required ordering "+a,+b". Now consider another case where (as part of some more complicated query) we have the same Scan operator but with Required ordering "+b opt(a)"¹, which means that any of "+b", "+b,±a", "±a,+b" are diff --git a/pkg/sql/opt/ordering/lookup_join.go b/pkg/sql/opt/ordering/lookup_join.go index 3ed6f0667fa7..4cc5a26c1b98 100644 --- a/pkg/sql/opt/ordering/lookup_join.go +++ b/pkg/sql/opt/ordering/lookup_join.go @@ -208,9 +208,11 @@ func lookupJoinBuildProvided(expr memo.RelExpr, required *props.OrderingChoice) // implementation details, currently the ordering columns from the index must be // ASC. The following is a case where a lookup join could maintain an ordering // over both input and index columns: -// CREATE TABLE ab (a INT, b INT, PRIMARY KEY(a, b)); -// CREATE TABLE xyz (x INT, y INT, z INT, PRIMARY KEY(x, y, z DESC)); -// SELECT * FROM ab INNER LOOKUP JOIN xy ON a = x ORDER BY a, b, x, y; +// +// CREATE TABLE ab (a INT, b INT, PRIMARY KEY(a, b)); +// CREATE TABLE xyz (x INT, y INT, z INT, PRIMARY KEY(x, y, z DESC)); +// SELECT * FROM ab INNER LOOKUP JOIN xy ON a = x ORDER BY a, b, x, y; +// // Note that in this example the 'a' and 'b' columns form a key over the // input of the lookup join. Additionally, the 'x' column alone is not a key // for the 'xy' table, so each lookup may return multiple rows (which need diff --git a/pkg/sql/opt/ordering/row_number.go b/pkg/sql/opt/ordering/row_number.go index 810df92b169a..eaf6e85241d9 100644 --- a/pkg/sql/opt/ordering/row_number.go +++ b/pkg/sql/opt/ordering/row_number.go @@ -35,9 +35,10 @@ func ordinalityCanProvideOrdering(expr memo.RelExpr, required *props.OrderingCho // (i.e. OrdinalityPrivate.Ordering) is also ordered by the ordinality column. // For example, if the internal ordering is +a,+b, then the ord column numbers // rows in the +a,+b order and any of these required orderings can be provided: -// +ord -// +a,+ord -// +a,+b,+ord +// +// +ord +// +a,+ord +// +a,+b,+ord // // As long as normalization rules are enabled, they will have already reduced // the ordering required of this operator to take into account that the diff --git a/pkg/sql/opt/partialidx/implicator.go b/pkg/sql/opt/partialidx/implicator.go index 1ecfe18daa53..6584dd156436 100644 --- a/pkg/sql/opt/partialidx/implicator.go +++ b/pkg/sql/opt/partialidx/implicator.go @@ -43,18 +43,18 @@ import ( // The logic is as follows, where "=>" means "implies" and an "atom" is any // expression that is not a logical conjunction or disjunction. // -// atom A => atom B iff: B contains A -// atom A => AND-expr B iff: A => each of B's children -// atom A => OR-expr B iff: A => any of B's children +// atom A => atom B iff: B contains A +// atom A => AND-expr B iff: A => each of B's children +// atom A => OR-expr B iff: A => any of B's children // -// AND-expr A => atom B iff: any of A's children => B -// AND-expr A => AND-expr B iff: A => each of B's children -// AND-expr A => OR-expr B iff: A => any of B's children OR -// any of A's children => B +// AND-expr A => atom B iff: any of A's children => B +// AND-expr A => AND-expr B iff: A => each of B's children +// AND-expr A => OR-expr B iff: A => any of B's children OR +// any of A's children => B // -// OR-expr A => atom B iff: each of A's children => B -// OR-expr A => AND-expr B iff: A => each of B's children -// OR-expr A => OR-expr B iff: each of A's children => any of B's children +// OR-expr A => atom B iff: each of A's children => B +// OR-expr A => AND-expr B iff: A => each of B's children +// OR-expr A => OR-expr B iff: each of A's children => any of B's children // // II. Remaining Filters // @@ -71,29 +71,29 @@ import ( // We can safely remove an expression from the filters if all of the following // are true: // -// 1. The expression exactly matches an expression in the predicate. This -// prevents returning empty remaining filters for the implication below. The -// original filters must be applied on top of a partial index scan with the -// a > 0 predicate to filter out rows where a is between 0 and 10. +// 1. The expression exactly matches an expression in the predicate. This +// prevents returning empty remaining filters for the implication below. The +// original filters must be applied on top of a partial index scan with the +// a > 0 predicate to filter out rows where a is between 0 and 10. // // a > 10 // => // a > 0 // -// 2. The expression does not reside within a disjunction in the predicate. -// This prevents the function from returning empty remaining filters for the -// implication below. The original filters must be applied on top of a partial -// index scan with the predicate to filter out rows where a > 0 but -// b != 'foo'. +// 2. The expression does not reside within a disjunction in the predicate. +// This prevents the function from returning empty remaining filters for the +// implication below. The original filters must be applied on top of a partial +// index scan with the predicate to filter out rows where a > 0 but +// b != 'foo'. // // b = 'foo' // => // a > 0 OR b = 'foo' // -// 3. The expression does not reside within a disjunction in the filters. This -// prevents the function from incorrectly reducing the filters for the -// implication below. The original filters must be applied in this case to -// filter out rows where a is false and c is true, but b is false. +// 3. The expression does not reside within a disjunction in the filters. This +// prevents the function from incorrectly reducing the filters for the +// implication below. The original filters must be applied in this case to +// filter out rows where a is false and c is true, but b is false. // // a OR (b AND c) // => @@ -103,9 +103,9 @@ import ( // the remaining filters in some cases in which it is theoretically possible to // simplify the filters. For example, consider the implication below. // -// a OR b -// => -// b OR a +// a OR b +// => +// b OR a // // In this case, the remaining filters could be empty, but they are not, because // of the asymmetry of the expressions. Individually a and b are exact matches @@ -430,9 +430,9 @@ func (im *Implicator) orExprImpliesPredicate(e *memo.OrExpr, pred opt.ScalarExpr // pred. It is similar to atomImpliesPredicate, except that it handles a special // case where e is an IN expression and pred is an OR expression, such as: // -// a IN (1, 2) -// => -// a = 1 OR a = 2 +// a IN (1, 2) +// => +// a = 1 OR a = 2 // // Bespoke logic for IN filters and OR predicates in this form is required // because the OrExpr must be considered as a single atomic unit in order to @@ -789,12 +789,11 @@ func (im *Implicator) simplifyScalarExpr(e opt.ScalarExpr, exactMatches exprSet) // // For example, the input: // -// a OR (b AND c) OR (d OR e) +// a OR (b AND c) OR (d OR e) // // Results in: // -// [a, (b AND c), d, e] -// +// [a, (b AND c), d, e] func flattenOrExpr(or *memo.OrExpr) []opt.ScalarExpr { ors := make([]opt.ScalarExpr, 0, 2) diff --git a/pkg/sql/opt/partialidx/implicator_test.go b/pkg/sql/opt/partialidx/implicator_test.go index baa119d2b9ca..a0d1199790e6 100644 --- a/pkg/sql/opt/partialidx/implicator_test.go +++ b/pkg/sql/opt/partialidx/implicator_test.go @@ -38,18 +38,17 @@ import ( // // - predtest vars=(var1 type1, var2 type2, ...)" // -// The vars argument sets the names and types of the variables in the -// expressions. +// The vars argument sets the names and types of the variables in the +// expressions. // -// The test input must be in the format: +// The test input must be in the format: // -// [filter expression] -// => -// [predicate expression] -// -// The "=>" symbol denotes implication. For example, "a => b" tests if -// expression a implies expression b. +// [filter expression] +// => +// [predicate expression] // +// The "=>" symbol denotes implication. For example, "a => b" tests if +// expression a implies expression b. func TestImplicator(t *testing.T) { defer leaktest.AfterTest(t)() diff --git a/pkg/sql/opt/partition/locality_test.go b/pkg/sql/opt/partition/locality_test.go index 72119e0fe50e..d5ecd98fa827 100644 --- a/pkg/sql/opt/partition/locality_test.go +++ b/pkg/sql/opt/partition/locality_test.go @@ -133,7 +133,8 @@ func TestPrefixSorter(t *testing.T) { // parsePartitionKeys parses a PARTITION BY LIST representation with integer // values like: -// "[/1] [/1/2] [/1/3] [/1/3/5]" +// +// "[/1] [/1/2] [/1/3] [/1/3/5]" func parsePartitionKeys(evalCtx *eval.Context, str string) []tree.Datums { if str == "" { return []tree.Datums{} diff --git a/pkg/sql/opt/props/col_stats_map.go b/pkg/sql/opt/props/col_stats_map.go index 105d7dea604a..3c38852dc1a7 100644 --- a/pkg/sql/opt/props/col_stats_map.go +++ b/pkg/sql/opt/props/col_stats_map.go @@ -50,19 +50,19 @@ type colStatVal struct { // value plus a prefix id that uniquely identifies the set of smaller values. // For example, if an opt.ColSet contains (2, 3, 6), then its index looks like: // -// (prefix: 0, id: 2) => (prefix: 1, pos: -1) -// └── (prefix: 1, id: 3) => (prefix: 2, pos: -1) -// └── (prefix: 2, id: 6) => (prefix: 3, pos: 0) +// (prefix: 0, id: 2) => (prefix: 1, pos: -1) +// └── (prefix: 1, id: 3) => (prefix: 2, pos: -1) +// └── (prefix: 2, id: 6) => (prefix: 3, pos: 0) // // Where pos is the ordinal position of the statistic in ColStatsMap, and pos=-1 // signifies that there is not yet any statistic for that column set. If an // additional opt.ColSet containing (2, 4) is added to the index, then it shares // the initial lookup node, but then diverges: // -// (prefix: 0, id: 2) => (prefix: 1, pos: -1) -// ├── (prefix: 1, id: 3) => (prefix: 2, pos: -1) -// │ └── (prefix: 2, id: 6) => (prefix: 3, pos: 0) -// └── (prefix: 1, id: 4) => (prefix: 4, pos: 1) +// (prefix: 0, id: 2) => (prefix: 1, pos: -1) +// ├── (prefix: 1, id: 3) => (prefix: 2, pos: -1) +// │ └── (prefix: 2, id: 6) => (prefix: 3, pos: 0) +// └── (prefix: 1, id: 4) => (prefix: 4, pos: 1) // // This algorithm can be implemented by a single Go map that uses efficient // int64 keys and values. It requires O(N) accesses to add and find a column @@ -97,9 +97,10 @@ func (m *ColStatsMap) Count() int { // Get returns the nth statistic in the map, by its ordinal position. This // position is stable across calls to Get or Add (but not RemoveIntersecting). // NOTE: The returned *ColumnStatistic is only valid until this ColStatsMap is -// updated via a call to Add() or RemoveIntersecting(). At that point, -// the address of the statistic may have changed, so it must be fetched -// again using another call to Get() or Lookup(). +// +// updated via a call to Add() or RemoveIntersecting(). At that point, +// the address of the statistic may have changed, so it must be fetched +// again using another call to Get() or Lookup(). func (m *ColStatsMap) Get(nth int) *ColumnStatistic { if nth < initialColStatsCap { return &m.initial[nth] @@ -110,9 +111,10 @@ func (m *ColStatsMap) Get(nth int) *ColumnStatistic { // Lookup returns the column statistic indexed by the given column set. If no // such statistic exists in the map, then ok=false. // NOTE: The returned *ColumnStatistic is only valid until this ColStatsMap is -// updated via a call to Add() or RemoveIntersecting(). At that point, -// the address of the statistic may have changed, so it must be fetched -// again using another call to Lookup() or Get(). +// +// updated via a call to Add() or RemoveIntersecting(). At that point, +// the address of the statistic may have changed, so it must be fetched +// again using another call to Lookup() or Get(). func (m *ColStatsMap) Lookup(cols opt.ColSet) (colStat *ColumnStatistic, ok bool) { // Scan the inlined statistics if there are only a few statistics in the map. if m.count <= initialColStatsCap { @@ -157,9 +159,10 @@ func (m *ColStatsMap) Lookup(cols opt.ColSet) (colStat *ColumnStatistic, ok bool // and returns it, along with added=true. Otherwise, Add returns the existing // ColumnStatistic with added=false. // NOTE: The returned *ColumnStatistic is only valid until this ColStatsMap is -// updated via another call to Add() or RemoveIntersecting(). At that -// point, the address of the statistic may have changed, so it must be -// fetched again using Lookup() or Get(). +// +// updated via another call to Add() or RemoveIntersecting(). At that +// point, the address of the statistic may have changed, so it must be +// fetched again using Lookup() or Get(). func (m *ColStatsMap) Add(cols opt.ColSet) (_ *ColumnStatistic, added bool) { // Only add column set if it is not already present in the map. colStat, ok := m.Lookup(cols) diff --git a/pkg/sql/opt/props/func_dep.go b/pkg/sql/opt/props/func_dep.go index 144d172866b2..983a27f50b7d 100644 --- a/pkg/sql/opt/props/func_dep.go +++ b/pkg/sql/opt/props/func_dep.go @@ -26,12 +26,12 @@ import ( // then those two rows will also have equal values for columns in B. For // example, where columns (a1, a2) are in set A, and column (b1) is in set B: // -// a1 a2 b1 -// -------- -// 1 2 5 -// 1 2 5 -// 3 4 6 -// 3 4 6 +// a1 a2 b1 +// -------- +// 1 2 5 +// 1 2 5 +// 3 4 6 +// 3 4 6 // // The left side of a functional dependency is called the "determinant", and // the right side is called the "dependant". Each side can contain zero or more @@ -42,12 +42,12 @@ import ( // When a dependant contains multiple columns, it is equivalent to splitting // the single FD into multiple FDs, each with a single column dependant: // -// (a)-->(b,c) +// (a)-->(b,c) // // is equivalent to these two FDs: // -// (a)-->(b) -// (a)-->(c) +// (a)-->(b) +// (a)-->(c) // // When a determinant contains zero columns, as in ()-->A, then A is fully // determined without reference to any other columns. An equivalent statement is @@ -55,20 +55,20 @@ import ( // And both of these statements are just another way of saying that columns in A // are constant: // -// a1 a2 b1 c1 -// ---------------- -// 1 NULL 3 3 -// 1 NULL 3 NULL -// 1 NULL 4 NULL +// a1 a2 b1 c1 +// ---------------- +// 1 NULL 3 3 +// 1 NULL 3 NULL +// 1 NULL 4 NULL // // When a determinant contains multiple columns, then the functional dependency // holds for the *composite* value of those columns. For example: // -// a1 a2 b1 -// -------- -// 1 2 5 -// 1 2 5 -// 1 3 4 +// a1 a2 b1 +// -------- +// 1 2 5 +// 1 2 5 +// 1 3 4 // // These are valid values, even though a1 has the same values for all three // rows, because it is only the combination of (a1,a2) that determines (b1). @@ -78,9 +78,9 @@ import ( // columns that are functionally dependent on those columns, either directly or // indirectly. Consider this set of FD's: // -// (a)-->(b,c,d) -// (b,c,e)-->(f) -// (d)-->(e) +// (a)-->(b,c,d) +// (b,c,e)-->(f) +// (d)-->(e) // // The transitive closure of (a) is (a,b,c,d,e,f). To start, (a) determines // (b,c,d). From there, (d) transitively determines (e). And now that (b,c,e) @@ -89,25 +89,25 @@ import ( // duplicates, since all other columns will be equal. And if there are no // duplicate rows, then (a) is a key for the relation. // -// Deriving FD Sets +// # Deriving FD Sets // // Base table primary keys can be trivially mapped into an FD set, since the // primary key always uniquely determines the other columns: // -// CREATE TABLE t (a INT PRIMARY KEY, b INT, c INT) -// (a)-->(b,c) +// CREATE TABLE t (a INT PRIMARY KEY, b INT, c INT) +// (a)-->(b,c) // // Each SQL relational operator derives its own FD set from the FD sets of its // inputs. For example, the Select operator augments the FD set of its input, // based on its filter condition: // -// SELECT * FROM t WHERE a=1 +// SELECT * FROM t WHERE a=1 // // Equating a column to a constant value constructs a new FD with an empty // determinant, so that the augmented FD set becomes: // -// (a)-->(b,c) -// ()-->(a) +// (a)-->(b,c) +// ()-->(a) // // Since the value of column "a" is always the same, and since "a" functionally // determines "b" and "c", the values of all columns are constants. Furthermore, @@ -118,14 +118,14 @@ import ( // including eliminating unnecessary DISTINCT operators, simplifying ORDER BY // columns, removing Max1Row operators, and mapping semi-joins to inner-joins. // -// NULL Values +// # NULL Values // // FDs become more complex when the possibility of NULL values is introduced. // SQL semantics often treat a NULL value as an "unknown" value that is not // equal to any other value, including another NULL value. For example, SQL // unique indexes exhibit this behavior: // -// CREATE TABLE t (a INT PRIMARY KEY, b INT, c INT, UNIQUE (b)) +// CREATE TABLE t (a INT PRIMARY KEY, b INT, c INT, UNIQUE (b)) // // Here, "b" column values are unique...except for the case of multiple NULL // values, which are allowed because each NULL is treated as if it was a @@ -139,53 +139,53 @@ import ( // "are these two columns equal". The semantics are identical to what this SQL // expression returns: // -// ((c1 = c2) OR (c1 IS NULL AND c2 IS NULL)) IS True +// ((c1 = c2) OR (c1 IS NULL AND c2 IS NULL)) IS True // // And here are some examples: // -// c1 c2 NULL= -// ----------------- -// 1 1 true -// NULL NULL true -// 1 2 false -// 1 NULL false -// NULL 1 false +// c1 c2 NULL= +// ----------------- +// 1 1 true +// NULL NULL true +// 1 2 false +// 1 NULL false +// NULL 1 false // // So now for the definition of A-->B that incorporates NULL values: // -// for any two rows r1 and r2 in the relation: -// A(r1) NULL= A(r2) ==> B(r1) NULL= B(r2) +// for any two rows r1 and r2 in the relation: +// A(r1) NULL= A(r2) ==> B(r1) NULL= B(r2) // // Intuitively, if two different rows have equal values for A using "NULLs are // equal" semantics, then those rows will also have equal values for B using // those same semantics. As an example, the following sets of rows would be // valid for the dependency (b)-->(c): // -// b c -// ---------- -// 1 NULL -// 1 NULL -// NULL 1 -// NULL 1 -// 2 3 -// 2 3 +// b c +// ---------- +// 1 NULL +// 1 NULL +// NULL 1 +// NULL 1 +// 2 3 +// 2 3 // -// b c -// ---------- -// NULL NULL -// NULL NULL +// b c +// ---------- +// NULL NULL +// NULL NULL // // but these sets of rows would be invalid: // -// b c -// ---------- -// NULL 1 -// NULL NULL +// b c +// ---------- +// NULL 1 +// NULL NULL // -// b c -// ---------- -// NULL 1 -// NULL 2 +// b c +// ---------- +// NULL 1 +// NULL 2 // // Unique constraints allow the latter cases, however, and therefore it is // desirable to somehow encode these weaker dependencies as FDs, because they @@ -197,19 +197,19 @@ import ( // false, then the FD is a "lax" dependency. Lax dependencies use "squiggly" // arrow notation to differentiate them from the strict variant: // -// A~~>B +// A~~>B // // In contrast to strict dependencies, lax dependencies treat NULLs on // determinant columns as distinct from one another, with equality semantics // identical to this SQL expression: // -// (c1 = c2) IS True +// (c1 = c2) IS True // // In other words, if either c1 or c2 is NULL, or both are NULL, then c1 is // considered not equal to c2. The definition for A~~>B follows from that: // -// for any two rows r1 and r2 in the relation: -// (A(r1) = A(r2)) IS True ==> B(r1) NULL= B(r2) +// for any two rows r1 and r2 in the relation: +// (A(r1) = A(r2)) IS True ==> B(r1) NULL= B(r2) // // In other words, if two different non-NULL rows have equal values for A, then // those rows will also have equal values for B using NULL= semantics. Note that @@ -217,21 +217,21 @@ import ( // the columns of A are not-NULL. The example row sets shown above that were // invalid for a strict dependency are valid for a lax dependency: // -// b c -// ---------- -// NULL 1 -// NULL NULL +// b c +// ---------- +// NULL 1 +// NULL NULL // -// b c -// ---------- -// NULL 1 -// NULL 2 +// b c +// ---------- +// NULL 1 +// NULL 2 // // To continue the CREATE TABLE example shown above, another FD can now be // derived from that statement, in addition to the primary key FD: // -// (a)-->(b,c) -// (b)~~>(a,c) +// (a)-->(b,c) +// (b)~~>(a,c) // // Lax dependencies are *not* transitive, and have limited usefulness as-is. // However, some operators (like Select) can "reject" NULL values, which means @@ -240,13 +240,13 @@ import ( // dependency (recall that the both have identical semantics when NULLs are not // present), as in this example: // -// SELECT * FROM t WHERE b>5 +// SELECT * FROM t WHERE b>5 // // The ">" operator rejects NULL values, which means that the Select operator // can convert the lax dependency to a strict dependency: // -// (a)-->(b,c) -// (b)-->(a,c) +// (a)-->(b,c) +// (b)-->(a,c) // // Now, either the "a" or "b" column determines the values of all other columns, // and both are keys for the relation. @@ -254,8 +254,8 @@ import ( // Another thing to note is that a lax dependency with an empty determinant is // the same as the corresponding strict dependency: // -// ()~~>(a,b) -// ()-->(a,b) +// ()~~>(a,b) +// ()-->(a,b) // // As described above, a strict dependency differs from a lax dependency only in // terms of what values are allowed in determinant columns. Since the @@ -263,18 +263,18 @@ import ( // For that reason, this library automatically maps lax constant dependencies to // strict constant dependencies. // -// Keys +// # Keys // // A key is a set of columns that have a unique composite value for every row in // the relation. There are two kinds of keys, strict and lax, that parallel the // two kinds of functional dependencies. Strict keys treat NULL values in key // columns as equal to one another: // -// b c -// -------- -// 1 10 -// 2 20 -// NULL 30 +// b c +// -------- +// 1 10 +// 2 20 +// NULL 30 // // Here, "b" is a key for the relation, even though it contains a NULL value, // because there is only one such value. Multiple NULL values would violate the @@ -286,12 +286,12 @@ import ( // By contrast, lax keys treat NULL values in key columns as distinct from one // another, and so considers column "b" as unique in the following example: // -// b c -// -------- -// 1 10 -// 2 20 -// NULL 30 -// NULL 40 +// b c +// -------- +// 1 10 +// 2 20 +// NULL 30 +// NULL 40 // // Note that both strict and lax keys treat non-NULL values identically; values // from two different rows must never compare equal to one another. In addition, @@ -299,8 +299,8 @@ import ( // with the key as determinant and all other columns in the relation as // dependants. Here is an example assuming a table with columns (a,b,c,d): // -// lax-key(a,b) => (a,b)~~>(c,d) -// strict-key(a,b) => (a,b)-->(c,d) +// lax-key(a,b) => (a,b)~~>(c,d) +// strict-key(a,b) => (a,b)-->(c,d) // // The "empty key" is a special key that has zero columns. It is used when the // relation is guaranteed to have at most one row. In this special case, every @@ -344,23 +344,23 @@ import ( // relatively short key is needed (e.g. during decorrelation), FuncDepSet has // one ready to go. // -// Equivalent Columns +// # Equivalent Columns // // FD sets encode "equivalent columns", which are pairs of columns that always // have equal values using the SQL equality operator with NULL= semantics. Two // columns a and b are equivalent if the following expression returns true: // -// ((a = b) OR (a IS NULL AND b IS NULL)) IS True +// ((a = b) OR (a IS NULL AND b IS NULL)) IS True // // Equivalent columns are typically derived from a Select filter condition, and // are represented as two FDs with each column acting as both determinant and // dependant: // -// SELECT * FROM t WHERE b=c -// (a)-->(b,c) -// (b)~~>(a,c) -// (b)==(c) -// (c)==(b) +// SELECT * FROM t WHERE b=c +// (a)-->(b,c) +// (b)~~>(a,c) +// (b)==(c) +// (c)==(b) // // In the common case shown above, the WHERE clause rejects NULL values, so the // equivalency will always be strict, which means it retains all the same @@ -368,56 +368,55 @@ import ( // possible, the library currently maps them into regular lax dependencies to // simplify implementation. // -// Theory to Practice +// # Theory to Practice // // For a more rigorous examination of functional dependencies and their // interaction with various SQL operators, see the following Master's Thesis: // -// Norman Paulley, Glenn. (2000). -// Exploiting Functional Dependence in Query Optimization. -// https://cs.uwaterloo.ca/research/tr/2000/11/CS-2000-11.thesis.pdf +// Norman Paulley, Glenn. (2000). +// Exploiting Functional Dependence in Query Optimization. +// https://cs.uwaterloo.ca/research/tr/2000/11/CS-2000-11.thesis.pdf // // While this paper served as the inspiration for this library, a number of // details differ, including (but not limited to): // -// 1. Most importantly, the definition of "lax" used in the paper differs from -// the definition used by this library. For a lax dependency A~~>B, the -// paper allows this set of rows: -// -// a b -// ------- -// 1 1 -// 1 NULL -// -// This library disallows that, since it requires that if the determinant -// of a lax dependency is not-null, then it is equivalent to a strict -// dependency. This alternate definition is briefly covered in section -// 2.5.3.2 of the paper (see definition 2.19). The reason for this change -// is to allow a lax dependency to be upgraded to a strict dependency more -// readily, needing only the determinant columns to be not-null rather than -// both determinant and dependant columns. -// -// 2. The paper simplifies FD sets so that dependants never contain more than -// one column. This library allows multiple dependent columns, since they -// can be so efficiently stored and processed as ColSets. -// -// 3. The paper deliberately avoids all simplifications when a SQL operator -// adds new FDs to an existing FD set, in order to avoid unneeded work and -// expensive reductions. This library performs quite a few simplifications -// in order to keep the FD set more manageable and understandable. -// -// 4. The paper "colors" columns black when they are no longer part of a -// derived relation. Rather than marking removed columns, this library -// actually removes them from the FD set. -// -// 5. In order to ensure a unique key for every relation, the paper uses a -// special "tuple identifier" that acts like a virtual column and can be -// both a determinant and a dependant. If the transitive closure of any set -// of columns includes the tuple identifier column, then that set of -// columns is a super key for the relation. As described in the Keys -// section above, this library takes a simplified approach so that it -// doesn't need to allocate virtual columns in property derivation code. -// +// 1. Most importantly, the definition of "lax" used in the paper differs from +// the definition used by this library. For a lax dependency A~~>B, the +// paper allows this set of rows: +// +// a b +// ------- +// 1 1 +// 1 NULL +// +// This library disallows that, since it requires that if the determinant +// of a lax dependency is not-null, then it is equivalent to a strict +// dependency. This alternate definition is briefly covered in section +// 2.5.3.2 of the paper (see definition 2.19). The reason for this change +// is to allow a lax dependency to be upgraded to a strict dependency more +// readily, needing only the determinant columns to be not-null rather than +// both determinant and dependant columns. +// +// 2. The paper simplifies FD sets so that dependants never contain more than +// one column. This library allows multiple dependent columns, since they +// can be so efficiently stored and processed as ColSets. +// +// 3. The paper deliberately avoids all simplifications when a SQL operator +// adds new FDs to an existing FD set, in order to avoid unneeded work and +// expensive reductions. This library performs quite a few simplifications +// in order to keep the FD set more manageable and understandable. +// +// 4. The paper "colors" columns black when they are no longer part of a +// derived relation. Rather than marking removed columns, this library +// actually removes them from the FD set. +// +// 5. In order to ensure a unique key for every relation, the paper uses a +// special "tuple identifier" that acts like a virtual column and can be +// both a determinant and a dependant. If the transitive closure of any set +// of columns includes the tuple identifier column, then that set of +// columns is a super key for the relation. As described in the Keys +// section above, this library takes a simplified approach so that it +// doesn't need to allocate virtual columns in property derivation code. type FuncDepSet struct { // deps contains the functional dependencies that have a non-trivial // determinant and dependant (i.e. not empty, with no overlapping columns): @@ -576,13 +575,12 @@ func (f *FuncDepSet) RemapFrom(fdset *FuncDepSet, fromCols, toCols opt.ColList) // (a,b) is a strict key for the following relation, but (a) is not (because // there are multiple rows where a=1 and a=NULL): // -// a b c -// ---------------- -// NULL NULL NULL -// NULL 1 1 -// 1 NULL 1 -// 1 1 1 -// +// a b c +// ---------------- +// NULL NULL NULL +// NULL 1 1 +// 1 NULL 1 +// 1 1 1 func (f *FuncDepSet) ColsAreStrictKey(cols opt.ColSet) bool { return f.colsAreKey(cols, strictKey) } @@ -594,17 +592,16 @@ func (f *FuncDepSet) ColsAreStrictKey(cols opt.ColSet) bool { // following relation, but (a) is not (because there are multiple rows where // a=1): // -// a b c -// ---------------- -// NULL NULL NULL -// NULL NULL 1 -// NULL NULL 2 -// NULL 1 1 -// NULL 1 2 -// 1 NULL 1 -// 1 NULL 2 -// 1 1 1 -// +// a b c +// ---------------- +// NULL NULL NULL +// NULL NULL 1 +// NULL NULL 2 +// NULL 1 1 +// NULL 1 2 +// 1 NULL 1 +// 1 NULL 2 +// 1 1 1 func (f *FuncDepSet) ColsAreLaxKey(cols opt.ColSet) bool { return f.colsAreKey(cols, laxKey) } @@ -653,9 +650,9 @@ func (f *FuncDepSet) InClosureOf(cols, in opt.ColSet) bool { // includes the input columns plus all columns that are functionally dependent // on those columns, either directly or indirectly. Consider this set of FD's: // -// (a)-->(b,c,d) -// (b,c,e)-->(f) -// (d)-->(e) +// (a)-->(b,c,d) +// (b,c,e)-->(f) +// (d)-->(e) // // The strict closure of (a) is (a,b,c,d,e,f), because (a) determines all other // columns. Therefore, if two rows have the same value for (a), then the rows @@ -697,10 +694,10 @@ func (f *FuncDepSet) AreColsEquiv(col1, col2 opt.ColumnID) bool { // closure includes the input columns plus all columns that are equivalent to // any of these columns, either directly or indirectly. For example: // -// (a)==(b) -// (b)==(c) -// (a)==(d) -// (e)==(f) +// (a)==(b) +// (b)==(c) +// (a)==(d) +// (e)==(f) // // The equivalence closure for (a,e) is (a,b,c,d,e,f) because all these columns // are transitively equal to either a or e. Therefore, all columns must have @@ -725,7 +722,7 @@ func (f *FuncDepSet) ComputeEquivClosure(cols opt.ColSet) opt.ColSet { // same set of values in the rest of the relation's columns. For key columns // (a,b) and relation columns (a,b,c,d), an FD like this is created: // -// (a,b)-->(c,d) +// (a,b)-->(c,d) // // If the resulting candidate key has fewer columns than the current key, then // the new key is adopted in its place. @@ -752,8 +749,7 @@ func (f *FuncDepSet) AddStrictKey(keyCols, allCols opt.ColSet) { // have the same values in other non-key columns. For key columns (a,b) and // relation columns (a,b,c,d), and FD like this is created: // -// (a,b)~~>(c,d) -// +// (a,b)~~>(c,d) func (f *FuncDepSet) AddLaxKey(keyCols, allCols opt.ColSet) { if !keyCols.SubsetOf(allCols) { panic(errors.AssertionFailedf("allCols does not include keyCols")) @@ -784,17 +780,16 @@ func (f *FuncDepSet) AddLaxKey(keyCols, allCols opt.ColSet) { // optimization. For a relation with columns (a, b), the following FD is // created in the set: // -// ()-->(a,b) +// ()-->(a,b) // // If f has equivalence dependencies of columns that are a subset of cols, those // dependencies are retained in f. This prevents losing additional information // about the columns, which a single FD with an empty key cannot describe. For // example: // -// f: (a)-->(b,c), (a)==(b), (b)==(a), (a)==(c), (c)==(a) -// cols: (a,c) -// result: ()-->(a,c), (a)==(c), (c)==(a) -// +// f: (a)-->(b,c), (a)==(b), (b)==(a), (a)==(c), (c)==(a) +// cols: (a,c) +// result: ()-->(a,c), (a)==(c), (c)==(a) func (f *FuncDepSet) MakeMax1Row(cols opt.ColSet) { // Remove all FDs except for equivalency FDs with columns that are a subset // of cols. @@ -877,9 +872,8 @@ func (f *FuncDepSet) MakeNotNull(notNullCols opt.ColSet) { // semantics, or else "a" is NULL and "b" is NULL. The following FDs are // created in the set: // -// (a)==(b) -// (b)==(a) -// +// (a)==(b) +// (b)==(a) func (f *FuncDepSet) AddEquivalency(a, b opt.ColumnID) { if a == b { return @@ -896,7 +890,7 @@ func (f *FuncDepSet) AddEquivalency(a, b opt.ColumnID) { // its value may be NULL, but then the column must be NULL for all rows. For // column "a", the FD looks like this: // -// ()-->(a) +// ()-->(a) // // Since it is a constant, any set of determinant columns (including the empty // set) trivially determines the value of "a". @@ -958,12 +952,11 @@ func (f *FuncDepSet) AddConstants(cols opt.ColSet) { // column in a projection list. The synthesized column is often derived from // other columns, in which case AddSynthesizedCol creates a new FD like this: // -// (a,b)-->(c) +// (a,b)-->(c) // // Or it may be a constant column, like this: // -// ()-->(c) -// +// ()-->(c) func (f *FuncDepSet) AddSynthesizedCol(from opt.ColSet, col opt.ColumnID) { if from.Contains(col) { panic(errors.AssertionFailedf("synthesized column cannot depend upon itself")) @@ -1184,26 +1177,25 @@ func (f *FuncDepSet) MakeProduct(inner *FuncDepSet) { // longer hold and some other dependencies need to be augmented in order to be // valid for the apply join operator. Consider this example: // -// SELECT * -// FROM a -// INNER JOIN LATERAL (SELECT * FROM b WHERE b.y=a.y) -// ON True -// -// 1. The constant dependency created from the outer column reference b.y=a.y -// does not hold for the Apply operator, since b.y is no longer constant at -// this level. In general, constant dependencies cannot be retained, because -// they may have been generated from outer column equivalencies. -// 2. If a strict dependency (b.x,b.y)-->(b.z) held, it would have been reduced -// to (b.x)-->(b.z) because (b.y) is constant in the inner query. However, -// (b.x)-->(b.z) does not hold for the Apply operator, since (b.y) is not -// constant in that case. However, the dependency *does* hold as long as its -// determinant is augmented by the left input's key columns (if key exists). -// 3. Lax dependencies follow the same rules as #2. -// 4. Equivalence dependencies in the inner query still hold for the Apply -// operator. -// 5. If both the outer and inner inputs of the apply join have keys, then the -// concatenation of those keys is a key on the apply join result. -// +// SELECT * +// FROM a +// INNER JOIN LATERAL (SELECT * FROM b WHERE b.y=a.y) +// ON True +// +// 1. The constant dependency created from the outer column reference b.y=a.y +// does not hold for the Apply operator, since b.y is no longer constant at +// this level. In general, constant dependencies cannot be retained, because +// they may have been generated from outer column equivalencies. +// 2. If a strict dependency (b.x,b.y)-->(b.z) held, it would have been reduced +// to (b.x)-->(b.z) because (b.y) is constant in the inner query. However, +// (b.x)-->(b.z) does not hold for the Apply operator, since (b.y) is not +// constant in that case. However, the dependency *does* hold as long as its +// determinant is augmented by the left input's key columns (if key exists). +// 3. Lax dependencies follow the same rules as #2. +// 4. Equivalence dependencies in the inner query still hold for the Apply +// operator. +// 5. If both the outer and inner inputs of the apply join have keys, then the +// concatenation of those keys is a key on the apply join result. func (f *FuncDepSet) MakeApply(inner *FuncDepSet) { for i := range inner.deps { fd := &inner.deps[i] @@ -1581,16 +1573,15 @@ func (f *FuncDepSet) ensureKeyClosure(cols opt.ColSet) { // Verify runs consistency checks against the FD set, in order to ensure that it // conforms to several invariants: // -// 1. An FD determinant should not intersect its dependants. -// 2. If a constant FD is present, it's the first FD in the set. -// 3. A constant FD must be strict. -// 4. Lax equivalencies should be reduced to lax dependencies. -// 5. Equivalence determinant should be exactly one column. -// 6. The dependants of an equivalence is always its closure. -// 7. If FD set has a key, it should be a candidate key (already reduced). -// 8. Closure of key should include all known columns in the FD set. -// 9. If FD set has no key then key columns should be empty. -// +// 1. An FD determinant should not intersect its dependants. +// 2. If a constant FD is present, it's the first FD in the set. +// 3. A constant FD must be strict. +// 4. Lax equivalencies should be reduced to lax dependencies. +// 5. Equivalence determinant should be exactly one column. +// 6. The dependants of an equivalence is always its closure. +// 7. If FD set has a key, it should be a candidate key (already reduced). +// 8. Closure of key should include all known columns in the FD set. +// 9. If FD set has no key then key columns should be empty. func (f *FuncDepSet) Verify() { for i := range f.deps { fd := &f.deps[i] diff --git a/pkg/sql/opt/props/func_dep_rand_test.go b/pkg/sql/opt/props/func_dep_rand_test.go index 8fb8f4f76651..b3bc5ec71357 100644 --- a/pkg/sql/opt/props/func_dep_rand_test.go +++ b/pkg/sql/opt/props/func_dep_rand_test.go @@ -109,11 +109,10 @@ type testRelation []testRow // String prints out the test relation in the following format: // -// 1 2 3 -// ------------- -// NULL 1 2 -// 3 NULL 4 -// +// 1 2 3 +// ------------- +// NULL 1 2 +// 3 NULL 4 func (tr testRelation) String() string { if len(tr) == 0 { return " \n" @@ -249,9 +248,9 @@ func (tr testRelation) notNullCols(numCols int) opt.ColSet { // joinTestRelations creates a possible result of joining two testRelations, // specifically: -// - an inner join if both leftOuter and rightOuter are false; -// - a left/right outer join if one of them is true; -// - a full outer join if both are true. +// - an inner join if both leftOuter and rightOuter are false; +// - a left/right outer join if one of them is true; +// - a full outer join if both are true. func joinTestRelations( numLeftCols int, left testRelation, @@ -720,14 +719,14 @@ func (ts *testState) format(b *strings.Builder) { // String describes the chain of operations and corresponding FDs. // For example: -// initial numCols=3 valRange=3 -// => MakeNotNull(2) -// FDs: -// => AddConstants(1,3) values {NULL,1} -// FDs: ()-->(1,3) -// => AddLaxKey(3) -// FDs: ()-->(1-3) // +// initial numCols=3 valRange=3 +// => MakeNotNull(2) +// FDs: +// => AddConstants(1,3) values {NULL,1} +// FDs: ()-->(1,3) +// => AddLaxKey(3) +// FDs: ()-->(1-3) func (ts *testState) String() string { var b strings.Builder ts.format(&b) @@ -781,7 +780,6 @@ func (ts *testState) child(t *testing.T, op testOp) *testState { // To reuse work, instead of generating one chain of operations at a time, we // generate a tree of operations; each path from root to a leaf is a chain that // is getting tested. -// func TestFuncDepOpsRandom(t *testing.T) { type testParams struct { testConfig diff --git a/pkg/sql/opt/props/func_dep_test.go b/pkg/sql/opt/props/func_dep_test.go index e162f8c3d729..d074a364596a 100644 --- a/pkg/sql/opt/props/func_dep_test.go +++ b/pkg/sql/opt/props/func_dep_test.go @@ -1263,8 +1263,9 @@ func TestFuncDeps_RemapFrom(t *testing.T) { } // Construct base table FD from figure 3.3, page 114: -// CREATE TABLE abcde (a INT PRIMARY KEY, b INT, c INT, d INT, e INT) -// CREATE UNIQUE INDEX ON abcde (b, c) +// +// CREATE TABLE abcde (a INT PRIMARY KEY, b INT, c INT, d INT, e INT) +// CREATE UNIQUE INDEX ON abcde (b, c) func makeAbcdeFD(t *testing.T) *props.FuncDepSet { // Set Key to all cols to start, and ensure it's overridden in AddStrictKey. allCols := c(1, 2, 3, 4, 5) @@ -1283,7 +1284,8 @@ func makeAbcdeFD(t *testing.T) *props.FuncDepSet { } // Construct base table FD from figure 3.3, page 114: -// CREATE TABLE mnpq (m INT, n INT, p INT, q INT, PRIMARY KEY (m, n)) +// +// CREATE TABLE mnpq (m INT, n INT, p INT, q INT, PRIMARY KEY (m, n)) func makeMnpqFD(t *testing.T) *props.FuncDepSet { allCols := c(10, 11, 12, 13) mnpq := &props.FuncDepSet{} @@ -1297,8 +1299,9 @@ func makeMnpqFD(t *testing.T) *props.FuncDepSet { } // Construct cartesian product FD from figure 3.6, page 122: -// CREATE TABLE mnpq (m INT, n INT, p INT, q INT, PRIMARY KEY (m, n)) -// SELECT * FROM abcde, mnpq +// +// CREATE TABLE mnpq (m INT, n INT, p INT, q INT, PRIMARY KEY (m, n)) +// SELECT * FROM abcde, mnpq func makeProductFD(t *testing.T) *props.FuncDepSet { product := makeAbcdeFD(t) product.MakeProduct(makeMnpqFD(t)) @@ -1312,7 +1315,8 @@ func makeProductFD(t *testing.T) *props.FuncDepSet { } // Construct inner join FD: -// SELECT * FROM abcde, mnpq WHERE a=m +// +// SELECT * FROM abcde, mnpq WHERE a=m func makeJoinFD(t *testing.T) *props.FuncDepSet { // Start with cartesian product FD and add equivalency to it. join := makeProductFD(t) diff --git a/pkg/sql/opt/props/histogram.go b/pkg/sql/opt/props/histogram.go index 13be0a07ecf0..7f18a53ea797 100644 --- a/pkg/sql/opt/props/histogram.go +++ b/pkg/sql/opt/props/histogram.go @@ -555,9 +555,10 @@ func makeSpanFromBucket(iter *histogramIter, prefix []tree.Datum) (span constrai // values are integers). // // The following spans will filter the bucket as shown: -// [/0 - /5] => {NumEq: 1, NumRange: 5, UpperBound: 5} -// [/2 - /10] => {NumEq: 5, NumRange: 8, UpperBound: 10} -// [/20 - /30] => error +// +// [/0 - /5] => {NumEq: 1, NumRange: 5, UpperBound: 5} +// [/2 - /10] => {NumEq: 5, NumRange: 8, UpperBound: 10} +// [/20 - /30] => error // // Note that the calculations for NumEq and NumRange depend on the data type. // For discrete data types such as integers and dates, it is always possible @@ -567,14 +568,13 @@ func makeSpanFromBucket(iter *histogramIter, prefix []tree.Datum) (span constrai // bound. For example, given the same bucket as in the above example, but with // floating point values instead of integers: // -// [/0 - /5] => {NumEq: 0, NumRange: 5, UpperBound: 5.0} -// [/2 - /10] => {NumEq: 5, NumRange: 8, UpperBound: 10.0} -// [/20 - /30] => error +// [/0 - /5] => {NumEq: 0, NumRange: 5, UpperBound: 5.0} +// [/2 - /10] => {NumEq: 5, NumRange: 8, UpperBound: 10.0} +// [/20 - /30] => error // // For non-numeric types such as strings, it is not possible to estimate // the size of NumRange if the bucket is cut off in the middle. In this case, // we use the heuristic that NumRange is reduced by half. -// func getFilteredBucket( iter *histogramIter, keyCtx *constraint.KeyContext, filteredSpan *constraint.Span, colOffset int, ) *cat.HistogramBucket { @@ -694,36 +694,35 @@ func getFilteredBucket( // below, where [\bear - \bobcat] represents the before range and // [\bluejay - \boar] represents the after range. // -// bear := [18 98 101 97 114 0 1 ] -// => [101 97 114 0 0 0 0 0 ] +// bear := [18 98 101 97 114 0 1 ] +// => [101 97 114 0 0 0 0 0 ] // -// bluejay := [18 98 108 117 101 106 97 121 0 1] -// => [108 117 101 106 97 121 0 0 ] +// bluejay := [18 98 108 117 101 106 97 121 0 1] +// => [108 117 101 106 97 121 0 0 ] // -// boar := [18 98 111 97 114 0 1 ] -// => [111 97 114 0 0 0 0 0 ] +// boar := [18 98 111 97 114 0 1 ] +// => [111 97 114 0 0 0 0 0 ] // -// bobcat := [18 98 111 98 99 97 116 0 1 ] -// => [111 98 99 97 116 0 0 0 ] +// bobcat := [18 98 111 98 99 97 116 0 1 ] +// => [111 98 99 97 116 0 0 0 ] // // We can now find the range before/after by finding the difference between // the lower and upper bounds: // -// rangeBefore := [111 98 99 97 116 0 1 0] - -// [101 97 114 0 1 0 0 0] +// rangeBefore := [111 98 99 97 116 0 1 0] - +// [101 97 114 0 1 0 0 0] // -// rangeAfter := [111 97 114 0 1 0 0 0] - -// [108 117 101 106 97 121 0 1] +// rangeAfter := [111 97 114 0 1 0 0 0] - +// [108 117 101 106 97 121 0 1] // // Subtracting the uint64 representations of the byte arrays, the resulting // rangeBefore and rangeAfter are: // -// rangeBefore := 8,026,086,756,136,779,776 - 7,305,245,414,897,221,632 -// := 720,841,341,239,558,100 -// -// rangeAfter := 8,025,821,355,276,500,992 - 7,815,264,235,947,622,400 -// := 210,557,119,328,878,600 +// rangeBefore := 8,026,086,756,136,779,776 - 7,305,245,414,897,221,632 +// := 720,841,341,239,558,100 // +// rangeAfter := 8,025,821,355,276,500,992 - 7,815,264,235,947,622,400 +// := 210,557,119,328,878,600 func getRangesBeforeAndAfter( beforeLowerBound, beforeUpperBound, afterLowerBound, afterUpperBound tree.Datum, swap bool, ) (rangeBefore, rangeAfter float64, ok bool) { @@ -932,11 +931,15 @@ func getFixedLenArr(byteArr []byte, ind, fixLen int) []byte { } // histogramWriter prints histograms with the following formatting: -// NumRange1 NumEq1 NumRange2 NumEq2 .... +// +// NumRange1 NumEq1 NumRange2 NumEq2 .... +// // <----------- UpperBound1 ----------- UpperBound2 .... // // For example: -// 0 1 90 10 0 20 +// +// 0 1 90 10 0 20 +// // <--- 0 ---- 100 --- 200 // // This describes a histogram with 3 buckets. The first bucket contains 1 value diff --git a/pkg/sql/opt/props/multiplicity.go b/pkg/sql/opt/props/multiplicity.go index dc5c77053e15..1f69ae0c1939 100644 --- a/pkg/sql/opt/props/multiplicity.go +++ b/pkg/sql/opt/props/multiplicity.go @@ -51,9 +51,9 @@ const ( // inputs. Left and right input rows can be duplicated and/or filtered by the // join. As an example: // -// CREATE TABLE xy (x INT PRIMARY KEY, y INT); -// CREATE TABLE uv (u INT PRIMARY KEY, v INT); -// SELECT * FROM xy FULL JOIN uv ON x=u; +// CREATE TABLE xy (x INT PRIMARY KEY, y INT); +// CREATE TABLE uv (u INT PRIMARY KEY, v INT); +// SELECT * FROM xy FULL JOIN uv ON x=u; // // 1. Are rows from xy or uv being duplicated by the join? // 2. Are any rows being filtered from the join output? @@ -71,7 +71,7 @@ const ( // can be statically proven that no rows from the given input will be duplicated // or filtered respectively. As an example, take the following query: // -// SELECT * FROM xy INNER JOIN uv ON y = v; +// SELECT * FROM xy INNER JOIN uv ON y = v; // // At execution time, it may be that every row from xy will be included in the // join output exactly once. However, since this cannot be proven before diff --git a/pkg/sql/opt/props/ordering_choice.go b/pkg/sql/opt/props/ordering_choice.go index 1631f0803996..f4495a8c001f 100644 --- a/pkg/sql/opt/props/ordering_choice.go +++ b/pkg/sql/opt/props/ordering_choice.go @@ -28,11 +28,11 @@ import ( // these parts specify a simple pattern that can match one or more candidate // orderings. Here are some examples: // -// +1 ORDER BY a -// +1,-2 ORDER BY a,b DESC -// +(1|2) ORDER BY a | ORDER BY b -// +(1|2),+3 ORDER BY a,c | ORDER BY b, c -// -(3|4),+5 opt(1,2) ORDER BY c DESC,e | ORDER BY a,d DESC,b DESC,e | ... +// +1 ORDER BY a +// +1,-2 ORDER BY a,b DESC +// +(1|2) ORDER BY a | ORDER BY b +// +(1|2),+3 ORDER BY a,c | ORDER BY b, c +// -(3|4),+5 opt(1,2) ORDER BY c DESC,e | ORDER BY a,d DESC,b DESC,e | ... // // Each column in the ordering sequence forms the corresponding column of the // sort key, from most significant to least significant. Each column has a sort @@ -46,12 +46,12 @@ import ( // the group can be used to form the corresponding column in the sort key. The // equivalent group columns come from SQL expressions like: // -// a=b +// a=b // // The optional column set contains columns that can appear anywhere (or // nowhere) in the ordering. Optional columns come from SQL expressions like: // -// a=1 +// a=1 // // Another case for optional columns is when we are grouping along a set of // columns and only care about the intra-group ordering. @@ -100,9 +100,9 @@ var optRegex, ordColRegex *regexp.Regexp // ParseOrderingChoice parses the string representation of an OrderingChoice for // testing purposes. Here are some examples of the string format: // -// +1 -// -(1|2),+3 -// +(1|2),+3 opt(5,6) +// +1 +// -(1|2),+3 +// +(1|2),+3 opt(5,6) // // The input string is expected to be valid; ParseOrderingChoice will panic if // it is not. @@ -247,8 +247,7 @@ func (oc *OrderingChoice) ToOrdering() opt.Ordering { // ColSet returns the set of all non-optional columns that are part of this // instance. For example, (1,2,3) will be returned if the OrderingChoice is: // -// +1,(2|3) opt(4,5) -// +// +1,(2|3) opt(4,5) func (oc *OrderingChoice) ColSet() opt.ColSet { var cs opt.ColSet for i := range oc.Columns { @@ -264,30 +263,29 @@ func (oc *OrderingChoice) ColSet() opt.ColSet { // // Examples: // -// implies -// +1 implies (given set is prefix) -// +1 implies +1 -// +1,-2 implies +1 (given set is prefix) -// +1,-2 implies +1,-2 -// +1 implies +1 opt(2) (unused optional col is ignored) -// -2,+1 implies +1 opt(2) (optional col is ignored) -// +1 implies +(1|2) (subset of choice) -// +(1|2) implies +(1|2|3) (subset of choice) -// +(1|2),-4 implies +(1|2|3),-(4|5) -// +(1|2) opt(4) implies +(1|2|3) opt(4) -// +1,+2,+3 implies +(1|2),+3 (unused group columns become optional) -// -// !implies +1 -// +1 !implies -1 (direction mismatch) -// +1 !implies +1,-2 (prefix matching not commutative) -// +1 opt(2) !implies +1 (extra optional cols not allowed) -// +1 opt(2) !implies +1 opt(3) -// +(1|2) !implies -(1|2) (direction mismatch) -// +(1|2) !implies +(3|4) (no intersection) -// +(1|2) !implies +(2|3) (intersects, but not subset) -// +(1|2|3) !implies +(1|2) (subset of choice not commutative) -// +(1|2) !implies +1 opt(2) +// implies +// +1 implies (given set is prefix) +// +1 implies +1 +// +1,-2 implies +1 (given set is prefix) +// +1,-2 implies +1,-2 +// +1 implies +1 opt(2) (unused optional col is ignored) +// -2,+1 implies +1 opt(2) (optional col is ignored) +// +1 implies +(1|2) (subset of choice) +// +(1|2) implies +(1|2|3) (subset of choice) +// +(1|2),-4 implies +(1|2|3),-(4|5) +// +(1|2) opt(4) implies +(1|2|3) opt(4) +// +1,+2,+3 implies +(1|2),+3 (unused group columns become optional) // +// !implies +1 +// +1 !implies -1 (direction mismatch) +// +1 !implies +1,-2 (prefix matching not commutative) +// +1 opt(2) !implies +1 (extra optional cols not allowed) +// +1 opt(2) !implies +1 opt(3) +// +(1|2) !implies -(1|2) (direction mismatch) +// +(1|2) !implies +(3|4) (no intersection) +// +(1|2) !implies +(2|3) (intersects, but not subset) +// +(1|2|3) !implies +(1|2) (subset of choice not commutative) +// +(1|2) !implies +1 opt(2) func (oc *OrderingChoice) Implies(other *OrderingChoice) bool { if !oc.Optional.SubsetOf(other.Optional) { return false @@ -358,14 +356,16 @@ func (oc *OrderingChoice) Intersects(other *OrderingChoice) bool { // Intersection returns an OrderingChoice that Implies both ordering choices. // Can only be called if Intersects is true. Some examples: // -// +1 ∩ = +1 -// +1 ∩ +1,+2 = +1,+2 -// +1,+2 opt(3) ∩ +1,+3 = +1,+3,+2 +// +1 ∩ = +1 +// +1 ∩ +1,+2 = +1,+2 +// +1,+2 opt(3) ∩ +1,+3 = +1,+3,+2 // // In general, OrderingChoice is not expressive enough to represent the // intersection. In such cases, an OrderingChoice representing a subset of the // intersection is returned. For example, -// +1 opt(2) ∩ +2 opt(1) +// +// +1 opt(2) ∩ +2 opt(1) +// // can be either +1,+2 or +2,+1; only one of these is returned. Note that // the function may not be commutative in this case. In practice, such cases are // unlikely. @@ -451,16 +451,15 @@ func (oc *OrderingChoice) Intersection(other *OrderingChoice) OrderingChoice { // are non-intersecting. Instead, it returns the longest prefix of intersecting // columns. Some examples: // -// +1 common prefix = -// +1 common prefix +1,+2 = +1 -// +1,+2 opt(3) common prefix +1,+3 = +1,+3 +// +1 common prefix = +// +1 common prefix +1,+2 = +1 +// +1,+2 opt(3) common prefix +1,+3 = +1,+3 // // Note that CommonPrefix is asymmetric: optional columns of oc will be used to // match trailing columns of other, but the reverse is not true. For example: // -// +1 opt(2) common prefix +1,+2 = +1,+2 -// +1,+2 common prefix +1 opt(2) = +1 -// +// +1 opt(2) common prefix +1,+2 = +1,+2 +// +1,+2 common prefix +1 opt(2) = +1 func (oc *OrderingChoice) CommonPrefix(other *OrderingChoice) OrderingChoice { if oc.Any() || other.Any() { return OrderingChoice{} @@ -612,16 +611,15 @@ func (oc *OrderingChoice) SubsetOfCols(cs opt.ColSet) bool { // CanProjectCols is true if at least one column in each ordering column group is // part of the given column set. For example, if the OrderingChoice is: // -// +1,-(2|3) opt(4,5) +// +1,-(2|3) opt(4,5) // // then CanProjectCols will behave as follows for these input sets: // -// (1,2) => true -// (1,3) => true -// (1,2,4) => true -// (1) => false -// (3,4) => false -// +// (1,2) => true +// (1,3) => true +// (1,2,4) => true +// (1) => false +// (3,4) => false func (oc *OrderingChoice) CanProjectCols(cs opt.ColSet) bool { for i := range oc.Columns { if !oc.Group(i).Intersects(cs) { @@ -719,21 +717,21 @@ func (oc *OrderingChoice) CanSimplify(fdset *FuncDepSet) bool { // Simplify uses the given FD set to streamline the orderings allowed by this // instance. It can both increase and decrease the number of allowed orderings: // -// 1. Constant columns add additional optional column choices. +// 1. Constant columns add additional optional column choices. // -// 2. Equivalent columns allow additional choices within an ordering column -// group. +// 2. Equivalent columns allow additional choices within an ordering column +// group. // -// 3. Non-equivalent columns in an ordering column group are removed. +// 3. Non-equivalent columns in an ordering column group are removed. // -// 4. If the columns in a group are functionally determined by columns from -// previous groups, the group can be dropped. This technique is described -// in the "Reduce Order" section of this paper: +// 4. If the columns in a group are functionally determined by columns from +// previous groups, the group can be dropped. This technique is described +// in the "Reduce Order" section of this paper: // -// Simmen, David & Shekita, Eugene & Malkemus, Timothy. (1996). -// Fundamental Techniques for Order Optimization. -// Sigmod Record. Volume 25 Issue 2, June 1996. Pages 57-67. -// https://cs.uwaterloo.ca/~gweddell/cs798/p57-simmen.pdf +// Simmen, David & Shekita, Eugene & Malkemus, Timothy. (1996). +// Fundamental Techniques for Order Optimization. +// Sigmod Record. Volume 25 Issue 2, June 1996. Pages 57-67. +// https://cs.uwaterloo.ca/~gweddell/cs798/p57-simmen.pdf // // This logic should be changed in concert with the CanSimplify logic. func (oc *OrderingChoice) Simplify(fdset *FuncDepSet) { @@ -784,11 +782,10 @@ func (oc *OrderingChoice) Simplify(fdset *FuncDepSet) { // Truncate removes all ordering columns beyond the given index. For example, // +1,+(2|3),-4 opt(5,6) would be truncated to: // -// prefix=0 => opt(5,6) -// prefix=1 => +1 opt(5,6) -// prefix=2 => +1,+(2|3) opt(5,6) -// prefix=3+ => +1,+(2|3),-4 opt(5,6) -// +// prefix=0 => opt(5,6) +// prefix=1 => +1 opt(5,6) +// prefix=2 => +1,+(2|3) opt(5,6) +// prefix=3+ => +1,+(2|3),-4 opt(5,6) func (oc *OrderingChoice) Truncate(prefix int) { if prefix < len(oc.Columns) { oc.Columns = oc.Columns[:prefix] @@ -833,22 +830,21 @@ func (oc *OrderingChoice) RestrictToCols(cols opt.ColSet) { } // PrefixIntersection computes an OrderingChoice which: -// - implies (this instance), and -// - implies a "segmented ordering", which is any ordering which starts with a -// permutation of all columns in followed by the ordering. +// - implies (this instance), and +// - implies a "segmented ordering", which is any ordering which starts with a +// permutation of all columns in followed by the ordering. // // Note that and cannot have any columns in common. // // Such an ordering can be computed via the following rules: // -// - if and are empty: return this instance. +// - if and are empty: return this instance. // -// - if is empty: generate an arbitrary segmented ordering. -// -// - if the first column of is either in or is the first column -// of while is empty: this column is the first column of -// the result; calculate the rest recursively. +// - if is empty: generate an arbitrary segmented ordering. // +// - if the first column of is either in or is the first column +// of while is empty: this column is the first column of +// the result; calculate the rest recursively. func (oc OrderingChoice) PrefixIntersection( prefix opt.ColSet, suffix []OrderingColumnChoice, ) (_ OrderingChoice, ok bool) { @@ -939,12 +935,11 @@ func (oc OrderingChoice) String() string { // Format writes the OrderingChoice to the given buffer in a human-readable // string representation that can also be parsed by ParseOrderingChoice: // -// +1 -// +1,-2 -// +(1|2) -// +(1|2),+3 -// -(3|4),+5 opt(1,2) -// +// +1 +// +1,-2 +// +(1|2) +// +(1|2),+3 +// -(3|4),+5 opt(1,2) func (oc OrderingChoice) Format(buf *bytes.Buffer) { for g := range oc.Columns { group := &oc.Columns[g] diff --git a/pkg/sql/opt/props/physical/required.go b/pkg/sql/opt/props/physical/required.go index c468f144f7ce..6c44d40fff93 100644 --- a/pkg/sql/opt/props/physical/required.go +++ b/pkg/sql/opt/props/physical/required.go @@ -137,7 +137,8 @@ func (p *Required) LimitHintInt64() int64 { // While it cannot add unique columns, Presentation can rename, reorder, // duplicate and discard columns. If Presentation is not defined, then no // particular column presentation is required or provided. For example: -// a.y:2 a.x:1 a.y:2 column1:3 +// +// a.y:2 a.x:1 a.y:2 column1:3 type Presentation []opt.AliasedColumn // Any is true if any column presentation is allowed or can be provided. diff --git a/pkg/sql/opt/props/statistics.go b/pkg/sql/opt/props/statistics.go index a8d09b84e3bd..b48f34b5e7ef 100644 --- a/pkg/sql/opt/props/statistics.go +++ b/pkg/sql/opt/props/statistics.go @@ -33,7 +33,7 @@ import ( // lazily, and only as needed to determine the row count for the current // expression or a parent expression. For example: // -// SELECT y FROM a WHERE x=1 +// SELECT y FROM a WHERE x=1 // // The only column that affects the row count of this query is x, since the // distribution of values in x is what determines the selectivity of the diff --git a/pkg/sql/opt/props/verify.go b/pkg/sql/opt/props/verify.go index 35e440d15605..20f068b041ce 100644 --- a/pkg/sql/opt/props/verify.go +++ b/pkg/sql/opt/props/verify.go @@ -19,10 +19,9 @@ import ( // Verify runs consistency checks against the shared properties, in order to // ensure that they conform to several invariants: // -// 1. The properties must have been built. -// 2. If HasCorrelatedSubquery is true, then HasSubquery must be true as well. -// 3. If Mutate is true, then VolatilitySet must contain Volatile. -// +// 1. The properties must have been built. +// 2. If HasCorrelatedSubquery is true, then HasSubquery must be true as well. +// 3. If Mutate is true, then VolatilitySet must contain Volatile. func (s *Shared) Verify() { if !buildutil.CrdbTestBuild { return @@ -41,12 +40,11 @@ func (s *Shared) Verify() { // Verify runs consistency checks against the relational properties, in order to // ensure that they conform to several invariants: // -// 1. Functional dependencies are internally consistent. -// 2. Not null columns are a subset of output columns. -// 3. Outer columns do not intersect output columns. -// 4. If functional dependencies indicate that the relation can have at most -// one row, then the cardinality reflects that as well. -// +// 1. Functional dependencies are internally consistent. +// 2. Not null columns are a subset of output columns. +// 3. Outer columns do not intersect output columns. +// 4. If functional dependencies indicate that the relation can have at most +// one row, then the cardinality reflects that as well. func (r *Relational) Verify() { if !buildutil.CrdbTestBuild { return @@ -102,8 +100,7 @@ func (r *Relational) VerifyAgainst(other *Relational) { // Verify runs consistency checks against the relational properties, in order to // ensure that they conform to several invariants: // -// 1. Functional dependencies are internally consistent. -// +// 1. Functional dependencies are internally consistent. func (s *Scalar) Verify() { if !buildutil.CrdbTestBuild { return diff --git a/pkg/sql/opt/props/volatility.go b/pkg/sql/opt/props/volatility.go index 8d24c50fcc6a..9b0011fdc65f 100644 --- a/pkg/sql/opt/props/volatility.go +++ b/pkg/sql/opt/props/volatility.go @@ -19,70 +19,71 @@ import "github.com/cockroachdb/cockroach/pkg/sql/sem/volatility" // for plan caching purposes, we want to distinguish the case when a stable // operator is used - regardless of whether a volatile operator is used. For // example, consider these two statements: -// (1) INSERT INTO t VALUES (gen_random_uuid(), '2020-10-09') -// (2) INSERT INTO t VALUES (gen_random_uuid(), now()) +// +// (1) INSERT INTO t VALUES (gen_random_uuid(), '2020-10-09') +// (2) INSERT INTO t VALUES (gen_random_uuid(), now()) +// // For (1) we can cache the final optimized plan. For (2), we can only cache the // memo if we don't constant fold stable operators, and subsequently fold them // each time we try to execute an instance of the query. // // The optimizer makes *only* the following side-effect related guarantees: // -// 1. CASE/IF branches are only evaluated if the branch condition is true or -// if all operators are Leakproof. Therefore, the following is guaranteed -// to never raise a divide by zero error, regardless of how cleverly the -// optimizer rewrites the expression: +// 1. CASE/IF branches are only evaluated if the branch condition is true or +// if all operators are Leakproof. Therefore, the following is guaranteed +// to never raise a divide by zero error, regardless of how cleverly the +// optimizer rewrites the expression: // -// CASE WHEN divisor<>0 THEN dividend / divisor ELSE NULL END +// CASE WHEN divisor<>0 THEN dividend / divisor ELSE NULL END // -// While this example is trivial, a more complex example might have -// correlated subqueries that cannot be hoisted outside the CASE -// expression in the usual way, since that would trigger premature -// evaluation. +// While this example is trivial, a more complex example might have +// correlated subqueries that cannot be hoisted outside the CASE +// expression in the usual way, since that would trigger premature +// evaluation. // -// However, there is a notable exception to this guarantee. When a branch -// is an uncorrelated subquery, it will be evaluated if a previous -// conditional does not evaluate to true at optimization-time. This is due -// to the fact that subqueries are eagerly evaluated when query execution -// begins. See #20298. +// However, there is a notable exception to this guarantee. When a branch +// is an uncorrelated subquery, it will be evaluated if a previous +// conditional does not evaluate to true at optimization-time. This is due +// to the fact that subqueries are eagerly evaluated when query execution +// begins. See #20298. // -// 2. Volatile expressions are never treated as constant expressions, even -// though they do not depend on other columns in the query: +// 2. Volatile expressions are never treated as constant expressions, even +// though they do not depend on other columns in the query: // -// SELECT * FROM xy ORDER BY random() +// SELECT * FROM xy ORDER BY random() // -// If the random() expression were treated as a constant, then the ORDER -// BY could be dropped by the optimizer, since ordering by a constant is -// a no-op. Instead, the optimizer treats it like it would an expression -// that depends upon a column. +// If the random() expression were treated as a constant, then the ORDER +// BY could be dropped by the optimizer, since ordering by a constant is +// a no-op. Instead, the optimizer treats it like it would an expression +// that depends upon a column. // -// 3. A common table expression (CTE) containing Volatile operators will only -// be evaluated one time. This will typically prevent inlining of the CTE -// into the query body. For example: +// 3. A common table expression (CTE) containing Volatile operators will only +// be evaluated one time. This will typically prevent inlining of the CTE +// into the query body. For example: // -// WITH a AS (INSERT ... RETURNING ...) SELECT * FROM a, a +// WITH a AS (INSERT ... RETURNING ...) SELECT * FROM a, a // -// Although the "a" CTE is referenced twice, it must be evaluated only -// one time (and its results cached to satisfy the second reference). +// Although the "a" CTE is referenced twice, it must be evaluated only +// one time (and its results cached to satisfy the second reference). // // As long as the optimizer provides these guarantees, it is free to rewrite, // reorder, duplicate, and eliminate as if no side effects were present. As an // example, the optimizer is free to eliminate the unused "nextval" column in // this query: // -// SELECT x FROM (SELECT nextval(seq), x FROM xy) -// => -// SELECT x FROM xy +// SELECT x FROM (SELECT nextval(seq), x FROM xy) +// => +// SELECT x FROM xy // // It's also allowed to duplicate side-effecting expressions during predicate // pushdown: // -// SELECT * FROM xy INNER JOIN xz ON xy.x=xz.x WHERE xy.x=random() -// => -// SELECT * -// FROM (SELECT * FROM xy WHERE xy.x=random()) -// INNER JOIN (SELECT * FROM xz WHERE xz.x=random()) -// ON xy.x=xz.x -// +// SELECT * FROM xy INNER JOIN xz ON xy.x=xz.x WHERE xy.x=random() +// => +// SELECT * +// FROM (SELECT * FROM xy WHERE xy.x=random()) +// INNER JOIN (SELECT * FROM xz WHERE xz.x=random()) +// ON xy.x=xz.x type VolatilitySet uint8 // Add a volatility to the set. diff --git a/pkg/sql/opt/table_meta.go b/pkg/sql/opt/table_meta.go index a7e2c8b625d9..ddee17f5de87 100644 --- a/pkg/sql/opt/table_meta.go +++ b/pkg/sql/opt/table_meta.go @@ -40,7 +40,8 @@ const ( // in the table. // // NOTE: This method cannot do bounds checking, so it's up to the caller to -// ensure that a column really does exist at this ordinal position. +// +// ensure that a column really does exist at this ordinal position. func (t TableID) ColumnID(ord int) ColumnID { return t.firstColID() + ColumnID(ord) } @@ -55,7 +56,8 @@ func (t TableID) IndexColumnID(idx cat.Index, idxOrd int) ColumnID { // table. // // NOTE: This method cannot do complete bounds checking, so it's up to the -// caller to ensure that this column is really in the given base table. +// +// caller to ensure that this column is really in the given base table. func (t TableID) ColumnOrdinal(id ColumnID) int { if buildutil.CrdbTestBuild && id < t.firstColID() { panic(errors.AssertionFailedf("ordinal cannot be negative")) @@ -94,10 +96,10 @@ func (t TableID) index() int { // phase. The returned TableAnnID never clashes with other annotations on the // same table. Here is a usage example: // -// var myAnnID = NewTableAnnID() +// var myAnnID = NewTableAnnID() // -// md.SetTableAnnotation(TableID(1), myAnnID, "foo") -// ann := md.TableAnnotation(TableID(1), myAnnID) +// md.SetTableAnnotation(TableID(1), myAnnID, "foo") +// ann := md.TableAnnotation(TableID(1), myAnnID) // // Currently, the following annotations are in use: // - FuncDeps: functional dependencies derived from the base table diff --git a/pkg/sql/opt/testutils/opttester/opt_steps.go b/pkg/sql/opt/testutils/opttester/opt_steps.go index 414f91d04264..3ba60e474c81 100644 --- a/pkg/sql/opt/testutils/opttester/opt_steps.go +++ b/pkg/sql/opt/testutils/opttester/opt_steps.go @@ -19,37 +19,37 @@ import ( // command. See the OptTester.OptSteps comment for more details on the command. // // The algorithm works as follows: -// 1. The first time optSteps.next() is called, optSteps returns the starting -// expression tree, with no transformations applied to it. // -// 2. Each optSteps.next() call after that will perform N+1 transformations, -// where N is the number of steps performed during the previous call -// (starting at 0 with the first call). +// 1. The first time optSteps.next() is called, optSteps returns the starting +// expression tree, with no transformations applied to it. // -// 3. Each optSteps.next() call will build the expression tree from scratch -// and re-run all transformations that were run in the previous call, plus -// one additional transformation (N+1). Therefore, the output expression -// tree from each call will differ from the previous call only by the last -// transformation's changes. +// 2. Each optSteps.next() call after that will perform N+1 transformations, +// where N is the number of steps performed during the previous call +// (starting at 0 with the first call). // -// 4. optSteps hooks the optimizer's MatchedRule event in order to limit the -// number of transformations that can be applied, as well as to record the -// name of the last rule that was applied, for later output. +// 3. Each optSteps.next() call will build the expression tree from scratch +// and re-run all transformations that were run in the previous call, plus +// one additional transformation (N+1). Therefore, the output expression +// tree from each call will differ from the previous call only by the last +// transformation's changes. // -// 5. While this works well for normalization rules, exploration rules are -// more difficult. This is because exploration rules are not guaranteed to -// produce a lower cost tree. Unless extra measures are taken, the returned -// Expr would not include the changed portion of the Memo, since Expr only -// shows the lowest cost path through the Memo. +// 4. optSteps hooks the optimizer's MatchedRule event in order to limit the +// number of transformations that can be applied, as well as to record the +// name of the last rule that was applied, for later output. // -// 6. To address this issue, optSteps hooks the optimizer's AppliedRule event -// and records the expression(s) that the last transformation has affected. -// It then re-runs the optimizer, but this time using a special Coster -// implementation that fools the optimizer into thinking that the new -// expression(s) have the lowest cost. The coster does this by assigning an -// infinite cost to all other expressions in the same group as the new -// expression(s), as well as in all ancestor groups. +// 5. While this works well for normalization rules, exploration rules are +// more difficult. This is because exploration rules are not guaranteed to +// produce a lower cost tree. Unless extra measures are taken, the returned +// Expr would not include the changed portion of the Memo, since Expr only +// shows the lowest cost path through the Memo. // +// 6. To address this issue, optSteps hooks the optimizer's AppliedRule event +// and records the expression(s) that the last transformation has affected. +// It then re-runs the optimizer, but this time using a special Coster +// implementation that fools the optimizer into thinking that the new +// expression(s) have the lowest cost. The coster does this by assigning an +// infinite cost to all other expressions in the same group as the new +// expression(s), as well as in all ancestor groups. type optSteps struct { tester *OptTester diff --git a/pkg/sql/opt/testutils/opttester/opt_tester.go b/pkg/sql/opt/testutils/opttester/opt_tester.go index d1b746577262..cd472f16fe25 100644 --- a/pkg/sql/opt/testutils/opttester/opt_tester.go +++ b/pkg/sql/opt/testutils/opttester/opt_tester.go @@ -291,241 +291,254 @@ func New(catalog cat.Catalog, sql string) *OptTester { // RunCommand implements commands that are used by most tests: // -// - exec-ddl +// - exec-ddl // -// Runs a SQL DDL statement to build the test catalog. Only a small number -// of DDL statements are supported, and those not fully. This is only -// available when using a TestCatalog. +// Runs a SQL DDL statement to build the test catalog. Only a small number +// of DDL statements are supported, and those not fully. This is only +// available when using a TestCatalog. // -// - build [flags] +// - build [flags] // -// Builds an expression tree from a SQL query and outputs it without any -// optimizations applied to it. +// Builds an expression tree from a SQL query and outputs it without any +// optimizations applied to it. // -// - norm [flags] +// - norm [flags] // -// Builds an expression tree from a SQL query, applies normalization -// optimizations, and outputs it without any exploration optimizations -// applied to it. +// Builds an expression tree from a SQL query, applies normalization +// optimizations, and outputs it without any exploration optimizations +// applied to it. // -// - opt [flags] +// - opt [flags] // -// Builds an expression tree from a SQL query, fully optimizes it using the -// memo, and then outputs the lowest cost tree. +// Builds an expression tree from a SQL query, fully optimizes it using the +// memo, and then outputs the lowest cost tree. // -// - assign-placeholders-build query-args=(...) +// - assign-placeholders-build query-args=(...) // -// Builds a query that has placeholders (with normalization disabled), then -// assigns placeholders to the given query arguments. Normalization rules are -// disabled when assigning placeholders. +// Builds a query that has placeholders (with normalization disabled), then +// assigns placeholders to the given query arguments. Normalization rules are +// disabled when assigning placeholders. // -// - assign-placeholders-norm query-args=(...) +// - assign-placeholders-norm query-args=(...) // -// Builds a query that has placeholders (with normalization enabled), then -// assigns placeholders to the given query arguments. Normalization rules are -// enabled when assigning placeholders. +// Builds a query that has placeholders (with normalization enabled), then +// assigns placeholders to the given query arguments. Normalization rules are +// enabled when assigning placeholders. // -// - assign-placeholders-opt query-args=(...) +// - assign-placeholders-opt query-args=(...) // -// Builds a query that has placeholders (with normalization enabled), then -// assigns placeholders to the given query arguments and fully optimizes it. +// Builds a query that has placeholders (with normalization enabled), then +// assigns placeholders to the given query arguments and fully optimizes it. // -// - placeholder-fast-path [flags] +// - placeholder-fast-path [flags] // -// Builds an expression tree from a SQL query which contains placeholders and -// attempts to use the placeholder fast path to obtain a fully optimized -// expression with placeholders. +// Builds an expression tree from a SQL query which contains placeholders and +// attempts to use the placeholder fast path to obtain a fully optimized +// expression with placeholders. // -// - build-cascades [flags] +// - build-cascades [flags] // -// Builds a query and then recursively builds cascading queries. Outputs all -// unoptimized plans. +// Builds a query and then recursively builds cascading queries. Outputs all +// unoptimized plans. // -// - optsteps [flags] +// - optsteps [flags] // -// Outputs the lowest cost tree for each step in optimization using the -// standard unified diff format. Used for debugging the optimizer. +// Outputs the lowest cost tree for each step in optimization using the +// standard unified diff format. Used for debugging the optimizer. // -// - optstepsweb [flags] +// - optstepsweb [flags] // -// Similar to optsteps, but outputs a URL which displays the results. +// Similar to optsteps, but outputs a URL which displays the results. // -// - exploretrace [flags] +// - exploretrace [flags] // -// Outputs information about exploration rule application. Used for debugging -// the optimizer. +// Outputs information about exploration rule application. Used for debugging +// the optimizer. // -// - memo [flags] +// - memo [flags] // -// Builds an expression tree from a SQL query, fully optimizes it using the -// memo, and then outputs the memo containing the forest of trees. +// Builds an expression tree from a SQL query, fully optimizes it using the +// memo, and then outputs the memo containing the forest of trees. // -// - rulestats [flags] +// - rulestats [flags] // -// Performs the optimization and outputs statistics about applied rules. +// Performs the optimization and outputs statistics about applied rules. // -// - expr +// - expr // -// Builds an expression directly from an opt-gen-like string; see -// exprgen.Build. +// Builds an expression directly from an opt-gen-like string; see +// exprgen.Build. // -// - exprnorm +// - exprnorm // -// Builds an expression directly from an opt-gen-like string (see -// exprgen.Build), applies normalization optimizations, and outputs the tree -// without any exploration optimizations applied to it. +// Builds an expression directly from an opt-gen-like string (see +// exprgen.Build), applies normalization optimizations, and outputs the tree +// without any exploration optimizations applied to it. // -// - expropt +// - expropt // -// Builds an expression directly from an opt-gen-like string (see -// exprgen.Optimize), applies normalization and exploration optimizations, -// and outputs the tree. +// Builds an expression directly from an opt-gen-like string (see +// exprgen.Optimize), applies normalization and exploration optimizations, +// and outputs the tree. // -// - stats-quality [flags] +// - stats-quality [flags] // -// Fully optimizes the given query and saves the subexpressions as tables -// in the test catalog with their estimated statistics injected. -// If rewriteActualFlag=true, also executes the given query against a -// running database and saves the intermediate results as tables. -// Compares estimated statistics for a relational expression with the actual -// statistics calculated by calling CREATE STATISTICS on the output of the -// expression. If rewriteActualFlag=false, stats-quality must have been run -// previously with rewriteActualFlag=true to save the statistics as tables. +// Fully optimizes the given query and saves the subexpressions as tables +// in the test catalog with their estimated statistics injected. +// If rewriteActualFlag=true, also executes the given query against a +// running database and saves the intermediate results as tables. +// Compares estimated statistics for a relational expression with the actual +// statistics calculated by calling CREATE STATISTICS on the output of the +// expression. If rewriteActualFlag=false, stats-quality must have been run +// previously with rewriteActualFlag=true to save the statistics as tables. // -// - reorderjoins [flags] +// - reorderjoins [flags] // -// Fully optimizes the given query and outputs information from -// joinOrderBuilder during join reordering. See the ReorderJoins comment in -// reorder_joins.go for information on the output format. +// Fully optimizes the given query and outputs information from +// joinOrderBuilder during join reordering. See the ReorderJoins comment in +// reorder_joins.go for information on the output format. // -// - import file=... +// - import file=... // -// Imports a file containing exec-ddl commands in order to add tables and/or -// stats to the catalog. This allows commonly-used schemas such as TPC-C or -// TPC-H to be used by multiple test files without copying the schemas and -// stats multiple times. The file name must be provided with the file flag. -// The path of the file should be relative to -// testutils/opttester/testfixtures. +// Imports a file containing exec-ddl commands in order to add tables and/or +// stats to the catalog. This allows commonly-used schemas such as TPC-C or +// TPC-H to be used by multiple test files without copying the schemas and +// stats multiple times. The file name must be provided with the file flag. +// The path of the file should be relative to +// testutils/opttester/testfixtures. // -// - inject-stats file=... table=... +// - inject-stats file=... table=... // -// Injects table statistics from a json file. +// Injects table statistics from a json file. // -// - check-size [rule-limit=...] [group-limit=...] [suppress-report] +// - check-size [rule-limit=...] [group-limit=...] [suppress-report] // -// Fully optimizes the given query and outputs the number of rules applied -// and memo groups created. If the rule-limit or group-limit flags are set, -// check-size will result in a test error if the rule application or memo -// group count exceeds the corresponding limit. If either the rule-limit or -// group-limit options are used the suppress-report option suppresses -// printing of the number of rules and groups explored. +// Fully optimizes the given query and outputs the number of rules applied +// and memo groups created. If the rule-limit or group-limit flags are set, +// check-size will result in a test error if the rule application or memo +// group count exceeds the corresponding limit. If either the rule-limit or +// group-limit options are used the suppress-report option suppresses +// printing of the number of rules and groups explored. // -// - index-candidates +// - index-candidates // -// Walks through the SQL statement to determine candidates for index -// recommendation. See the indexrec package. +// Walks through the SQL statement to determine candidates for index +// recommendation. See the indexrec package. // -// - index-recommendations +// - index-recommendations // -// Walks through the SQL statement and recommends indexes to add in order to -// speed up its execution, if these indexes exist. See the indexrec package. +// Walks through the SQL statement and recommends indexes to add in order to +// speed up its execution, if these indexes exist. See the indexrec package. // // Supported flags: // -// - format: controls the formatting of expressions for build, opt, and -// optsteps commands. Format flags are of the form -// (show|hide)-(all|miscprops|constraints|scalars|types|...) -// See formatFlags for all flags. Multiple flags can be specified; each flag -// modifies the existing set of the flags. +// - format: controls the formatting of expressions for build, opt, and +// optsteps commands. Format flags are of the form +// (show|hide)-(all|miscprops|constraints|scalars|types|...) +// See formatFlags for all flags. Multiple flags can be specified; each flag +// modifies the existing set of the flags. // -// - no-stable-folds: disallows constant folding for stable operators; only -// used with "norm". +// - no-stable-folds: disallows constant folding for stable operators; only +// used with "norm". // -// - fully-qualify-names: fully qualify all column names in the test output. +// - fully-qualify-names: fully qualify all column names in the test output. // -// - expect: fail the test if the rules specified by name are not "applied". -// For normalization rules, "applied" means that the rule's pattern matched -// an expression. For exploration rules, "applied" means that the rule's -// pattern matched an expression and the rule generated one or more new -// expressions in the memo. +// - expect: fail the test if the rules specified by name are not "applied". +// For normalization rules, "applied" means that the rule's pattern matched +// an expression. For exploration rules, "applied" means that the rule's +// pattern matched an expression and the rule generated one or more new +// expressions in the memo. // -// - expect-not: fail the test if the rules specified by name are "applied". +// - expect-not: fail the test if the rules specified by name are "applied". // -// - disable: disables optimizer rules by name. Examples: -// opt disable=ConstrainScan -// norm disable=(NegateOr,NegateAnd) +// - disable: disables optimizer rules by name. Examples: +// opt disable=ConstrainScan +// norm disable=(NegateOr,NegateAnd) // -// - rule: used with exploretrace; the value is the name of a rule. When -// specified, the exploretrace output is filtered to only show expression -// changes due to that specific rule. +// - rule: used with exploretrace; the value is the name of a rule. When +// specified, the exploretrace output is filtered to only show expression +// changes due to that specific rule. // -// - skip-no-op: used with exploretrace; hide instances of rules that don't -// generate any new expressions. +// - skip-no-op: used with exploretrace; hide instances of rules that don't +// generate any new expressions. // -// - colstat: requests the calculation of a column statistic on the top-level -// expression. The value is a column or a list of columns. The flag can -// be used multiple times to request different statistics. +// - colstat: requests the calculation of a column statistic on the top-level +// expression. The value is a column or a list of columns. The flag can +// be used multiple times to request different statistics. // -// - perturb-cost: used to randomly perturb the estimated cost of each -// expression in the query tree for the purpose of creating alternate query -// plans in the optimizer. +// - perturb-cost: used to randomly perturb the estimated cost of each +// expression in the query tree for the purpose of creating alternate query +// plans in the optimizer. // -// - locality: used to set the locality of the node that plans the query. This -// can affect costing when there are multiple possible indexes to choose -// from, each in different localities. +// - locality: used to set the locality of the node that plans the query. This +// can affect costing when there are multiple possible indexes to choose +// from, each in different localities. // -// - database: used to set the current database used by the query. This is -// used by the stats-quality command when rewriteActualFlag=true. +// - database: used to set the current database used by the query. This is +// used by the stats-quality command when rewriteActualFlag=true. // -// - table: used to set the current table used by the command. This is used by -// the inject-stats command. +// - table: used to set the current table used by the command. This is used by +// the inject-stats command. // -// - stats-quality-prefix: must be used with the stats-quality command. If -// rewriteActualFlag=true, indicates that a table should be created with the -// given prefix for the output of each subexpression in the query. Otherwise, -// outputs the name of the table that would be created for each -// subexpression. +// - stats-quality-prefix: must be used with the stats-quality command. If +// rewriteActualFlag=true, indicates that a table should be created with the +// given prefix for the output of each subexpression in the query. Otherwise, +// outputs the name of the table that would be created for each +// subexpression. // -// - ignore-tables: specifies the set of stats tables for which stats quality -// comparisons should not be outputted. Only used with the stats-quality -// command. Note that tables can always be added to the `ignore-tables` set -// without necessitating a run with `rewrite-actual-stats=true`, because the -// now-ignored stats outputs will simply be removed. However, the reverse is -// not possible. So, the best way to rewrite a stats quality test for which -// the plan has changed is to first remove the `ignore-tables` flag, then add -// it back and do a normal rewrite to remove the superfluous tables. +// - ignore-tables: specifies the set of stats tables for which stats quality +// comparisons should not be outputted. Only used with the stats-quality +// command. Note that tables can always be added to the `ignore-tables` set +// without necessitating a run with `rewrite-actual-stats=true`, because the +// now-ignored stats outputs will simply be removed. However, the reverse is +// not possible. So, the best way to rewrite a stats quality test for which +// the plan has changed is to first remove the `ignore-tables` flag, then add +// it back and do a normal rewrite to remove the superfluous tables. // -// - file: specifies a file, used for the following commands: -// - import: the file path is relative to opttester/testfixtures; -// - inject-stats: the file path is relative to the test file. +// - file: specifies a file, used for the following commands: // -// - cascade-levels: used to limit the depth of recursive cascades for -// build-cascades. +// - import: the file path is relative to opttester/testfixtures; // -// - index-version: controls the version of the index descriptor created in -// the test catalog. This is used by the exec-ddl command for CREATE INDEX -// statements. +// - inject-stats: the file path is relative to the test file. // -// - split-diff: replaces the unified diff output of the optsteps command with -// a split diff where the before and after expressions are printed in their -// entirety. This is only used by the optsteps command. +// - join-limit: sets the value for SessionData.ReorderJoinsLimit, which +// indicates the number of joins at which the optimizer should stop +// attempting to reorder. // -// - rule-limit: used with check-size to set a max limit on the number of rules -// that can be applied before a testing error is returned. +// - prefer-lookup-joins-for-fks: sets SessionData.PreferLookupJoinsForFKs to +// true, causing foreign key operations to prefer lookup joins. // -// - group-limit: used with check-size to set a max limit on the number of -// groups that can be added to the memo before a testing error is returned. +// - null-ordered-last: sets SessionData.NullOrderedLast to true, which orders +// NULL values last in ascending order. // -// - memo-cycles: used with memo to search the memo for cycles and output a -// path with a cycle if one is found. +// - cascade-levels: used to limit the depth of recursive cascades for +// build-cascades. // -// - skip-race: skips the test if the race detector is enabled. +// - index-version: controls the version of the index descriptor created in +// the test catalog. This is used by the exec-ddl command for CREATE INDEX +// statements. // -// - set: sets the session setting for the given SQL statement, for example: -// build set=prefer_lookup_joins_for_fks=true -// DELETE FROM parent WHERE p = 3 -// ---- +// - split-diff: replaces the unified diff output of the optsteps command with +// a split diff where the before and after expressions are printed in their +// entirety. This is only used by the optsteps command. // +// - rule-limit: used with check-size to set a max limit on the number of rules +// that can be applied before a testing error is returned. +// +// - skip-race: skips the test if the race detector is enabled. +// +// - group-limit: used with check-size to set a max limit on the number of +// groups that can be added to the memo before a testing error is returned. +// +// - memo-cycles: used with memo to search the memo for cycles and output a +// path with a cycle if one is found. +// +// - skip-race: skips the test if the race detector is enabled. +// +// - set: sets the session setting for the given SQL statement, for example: +// build set=prefer_lookup_joins_for_fks=true +// DELETE FROM parent WHERE p = 3 +// ---- func (ot *OptTester) RunCommand(tb testing.TB, d *datadriven.TestData) string { // Allow testcases to override the flags. for _, a := range d.CmdArgs { @@ -1397,17 +1410,18 @@ func (ot *OptTester) RuleStats() (string, error) { // transformation. The output of each step is diff'd against the output of a // previous step, using the standard unified diff format. // -// CREATE TABLE a (x INT PRIMARY KEY, y INT, UNIQUE INDEX (y)) +// CREATE TABLE a (x INT PRIMARY KEY, y INT, UNIQUE INDEX (y)) // -// SELECT x FROM a WHERE x=1 +// SELECT x FROM a WHERE x=1 // // At the time of this writing, this query triggers 6 rule applications: -// EnsureSelectFilters Wrap Select predicate with Filters operator -// FilterUnusedSelectCols Do not return unused "y" column from Scan -// EliminateProject Remove unneeded Project operator -// GenerateIndexScans Explore scanning "y" index to get "x" values -// ConstrainScan Explore pushing "x=1" into "x" index Scan -// ConstrainScan Explore pushing "x=1" into "y" index Scan +// +// EnsureSelectFilters Wrap Select predicate with Filters operator +// FilterUnusedSelectCols Do not return unused "y" column from Scan +// EliminateProject Remove unneeded Project operator +// GenerateIndexScans Explore scanning "y" index to get "x" values +// ConstrainScan Explore pushing "x=1" into "x" index Scan +// ConstrainScan Explore pushing "x=1" into "y" index Scan // // Some steps produce better plans that have a lower execution cost. Other steps // don't. However, it's useful to see both kinds of steps. The optsteps output @@ -1415,7 +1429,6 @@ func (ot *OptTester) RuleStats() (string, error) { // a better plan has been found, and weaker "----" header delimiters when not. // In both cases, the output shows the expressions that were changed or added by // the rule, even if the total expression tree cost worsened. -// func (ot *OptTester) OptSteps() (string, error) { var prevBest, prev, next string ot.builder.Reset() diff --git a/pkg/sql/opt/testutils/opttester/reorder_joins.go b/pkg/sql/opt/testutils/opttester/reorder_joins.go index 8ea9437216f4..ba712309d4db 100644 --- a/pkg/sql/opt/testutils/opttester/reorder_joins.go +++ b/pkg/sql/opt/testutils/opttester/reorder_joins.go @@ -24,15 +24,16 @@ import ( // ReorderJoins optimizes the given query and outputs intermediate steps taken // during join enumeration. For each call to joinOrderBuilder.Reorder, the // output is as follows: -// 1. The original join tree that is used to form the join graph. -// 2. The vertexes of the join graph (as well as compact aliases that will be -// used to output joins added to the memo). -// 3. The edges of the join graph. -// 4. The joins which joinOrderBuilder attempts to add to the memo. An output -// like 'AB CD' means a join tree containing relations A and B is being -// joined to a join tree containing relations C and D. There is also a -// 'refs' field containing all relations that are referenced by the join's -// ON condition. +// 1. The original join tree that is used to form the join graph. +// 2. The vertexes of the join graph (as well as compact aliases that will be +// used to output joins added to the memo). +// 3. The edges of the join graph. +// 4. The joins which joinOrderBuilder attempts to add to the memo. An output +// like 'AB CD' means a join tree containing relations A and B is being +// joined to a join tree containing relations C and D. There is also a +// 'refs' field containing all relations that are referenced by the join's +// ON condition. +// // The final optimized plan is then output. func (ot *OptTester) ReorderJoins() (string, error) { ot.builder.Reset() diff --git a/pkg/sql/opt/testutils/opttester/stats_tester.go b/pkg/sql/opt/testutils/opttester/stats_tester.go index b347dde28ff9..70570dcbcd6e 100644 --- a/pkg/sql/opt/testutils/opttester/stats_tester.go +++ b/pkg/sql/opt/testutils/opttester/stats_tester.go @@ -60,7 +60,6 @@ type statsTester struct { // table to recalculate the actual statistics. Otherwise, it will reuse the // actual stats in the test output (calculated previously) to compare against // the estimated stats. -// func (st statsTester) testStats( catalog *testcat.Catalog, prevOutputs []string, tableName, headingSep string, ) (_ string, err error) { @@ -166,9 +165,9 @@ func (st statsTester) testStats( // getActualStats gets the actual statistics from the test output or // recalculates them if rewriteActualStats is true. // Returns: -// 1. The actual statistics as a slice of strings (one for each row) -// 2. A map from column names to statistic for comparison with the estimated -// stats. +// 1. The actual statistics as a slice of strings (one for each row) +// 2. A map from column names to statistic for comparison with the estimated +// stats. func (st statsTester) getActualStats( prevOutput string, tableName string, sep string, ) ([]string, map[string]statistic, error) { @@ -315,7 +314,7 @@ func (st statsTester) getActualStatsMap(actualStats []string) (map[string]statis // qErr calculates the q-error for the given estimated and actual values. // q-error is symmetric and multiplicative, and satisfies the formula: // -// (1/q) * actual <= estimated <= q * actual +// (1/q) * actual <= estimated <= q * actual // // A q-error of 1 is a perfect estimate, and a q-error <= 1.9 is considered // acceptable. diff --git a/pkg/sql/opt/testutils/scalar_vars.go b/pkg/sql/opt/testutils/scalar_vars.go index 417af8b67555..1a9a953232ac 100644 --- a/pkg/sql/opt/testutils/scalar_vars.go +++ b/pkg/sql/opt/testutils/scalar_vars.go @@ -32,10 +32,10 @@ type ScalarVars struct { // and initializes the ScalarVars. // // Each definition string is of the form: -// " type1 [not null] // -// The not-null columns can be retrieved via NotNullCols(). +// " type1 [not null] // +// The not-null columns can be retrieved via NotNullCols(). func (sv *ScalarVars) Init(md *opt.Metadata, vars []string) error { // This initialization pattern ensures that fields are not unwittingly // reused. Field reuse must be explicit. diff --git a/pkg/sql/opt/testutils/testcat/alter_table.go b/pkg/sql/opt/testutils/testcat/alter_table.go index a97a9c7f7a7c..f90f5e3aacf4 100644 --- a/pkg/sql/opt/testutils/testcat/alter_table.go +++ b/pkg/sql/opt/testutils/testcat/alter_table.go @@ -26,9 +26,8 @@ import ( // AlterTable is a partial implementation of the ALTER TABLE statement. // // Supported commands: -// - INJECT STATISTICS: imports table statistics from a JSON object. -// - ADD CONSTRAINT FOREIGN KEY: add a foreign key reference. -// +// - INJECT STATISTICS: imports table statistics from a JSON object. +// - ADD CONSTRAINT FOREIGN KEY: add a foreign key reference. func (tc *Catalog) AlterTable(stmt *tree.AlterTable) { tn := stmt.Table.ToTableName() // Update the table name to include catalog and schema if not provided. diff --git a/pkg/sql/opt/testutils/testexpr/test_expr.go b/pkg/sql/opt/testutils/testexpr/test_expr.go index c3d9a6d0ba31..f04d22a1b1ed 100644 --- a/pkg/sql/opt/testutils/testexpr/test_expr.go +++ b/pkg/sql/opt/testutils/testexpr/test_expr.go @@ -21,11 +21,10 @@ import ( // extracted via that interface. It can be initialized with whatever subset of // fields are required for the particular test; for example: // -// e := &testexpr.Instance{ -// Rel: &props.Relational{...}, -// Provided: &physical.Provided{...}, -// } -// +// e := &testexpr.Instance{ +// Rel: &props.Relational{...}, +// Provided: &physical.Provided{...}, +// } type Instance struct { Rel *props.Relational Required *physical.Required diff --git a/pkg/sql/opt/xform/coster.go b/pkg/sql/opt/xform/coster.go index b44c0b792ec9..6ef608f5e730 100644 --- a/pkg/sql/opt/xform/coster.go +++ b/pkg/sql/opt/xform/coster.go @@ -38,11 +38,11 @@ import ( // operators constitute the "cost model". A given cost model can be designed to // maximize any optimization goal, such as: // -// 1. Max aggregate cluster throughput (txns/sec across cluster) -// 2. Min transaction latency (time to commit txns) -// 3. Min latency to first row (time to get first row of txns) -// 4. Min memory usage -// 5. Some weighted combination of #1 - #4 +// 1. Max aggregate cluster throughput (txns/sec across cluster) +// 2. Min transaction latency (time to commit txns) +// 3. Min latency to first row (time to get first row of txns) +// 4. Min memory usage +// 5. Some weighted combination of #1 - #4 // // The cost model in this file targets #1 as the optimization goal. However, // note that #2 is implicitly important to that goal, since overall cluster @@ -192,7 +192,9 @@ const ( // yet. Although function costs differ based on the overload (due to // arguments), here we are using the minimum from similar functions based on // postgres' pg_proc table. The following query can be used to generate this table: -// SELECT proname, min(procost) FROM pg_proc WHERE proname LIKE 'st\_%' AND procost > 1 GROUP BY proname ORDER BY proname +// +// SELECT proname, min(procost) FROM pg_proc WHERE proname LIKE 'st\_%' AND procost > 1 GROUP BY proname ORDER BY proname +// // TODO(mjibson): Add costs directly to overloads. When that is done, we should // also add a test that ensures those costs match postgres. var fnCost = map[string]memo.Cost{ @@ -1442,17 +1444,17 @@ func (c *coster) rowScanCost( // rowBufferCost adds a cost for buffering rows according to a ramp function: // -// cost -// factor +// cost +// factor // -// | spillRowCount -// spillCostFactor _| ___________ _ _ _ -// | / -// | / -// | / -// 0 _| _ _ _________/______________________ row -// | count -// noSpillRowCount +// | spillRowCount +// spillCostFactor _| ___________ _ _ _ +// | / +// | / +// | / +// 0 _| _ _ _________/______________________ row +// | count +// noSpillRowCount // // This function models the fact that operators that buffer rows become more // expensive the more rows they need to buffer, since eventually they will need @@ -1500,42 +1502,42 @@ func (c *coster) largeCardinalityCostPenalty( // leaseholder preferences, with 0.0 indicating 0% and 1.0 indicating 100%. This // is the basic algorithm: // -// t = total # of locality tiers +// t = total # of locality tiers // -// Match each locality tier against the constraint set, and compute a value -// for each tier: +// Match each locality tier against the constraint set, and compute a value +// for each tier: // -// 0 = key not present in constraint set or key matches prohibited -// constraint, but value doesn't match -// +1 = key matches required constraint, and value does match -// -1 = otherwise +// 0 = key not present in constraint set or key matches prohibited +// constraint, but value doesn't match +// +1 = key matches required constraint, and value does match +// -1 = otherwise // -// m = length of longest locality prefix that ends in a +1 value and doesn't -// contain a -1 value. +// m = length of longest locality prefix that ends in a +1 value and doesn't +// contain a -1 value. // -// Compute "m" for both the ReplicaConstraints constraints set, as well as for -// the LeasePreferences constraints set: +// Compute "m" for both the ReplicaConstraints constraints set, as well as for +// the LeasePreferences constraints set: // -// constraint-score = m / t -// lease-pref-score = m / t +// constraint-score = m / t +// lease-pref-score = m / t // -// if there are no lease preferences, then final-score = lease-pref-score -// else final-score = (constraint-score * 2 + lease-pref-score) / 3 +// if there are no lease preferences, then final-score = lease-pref-score +// else final-score = (constraint-score * 2 + lease-pref-score) / 3 // // Here are some scoring examples: // -// Locality = region=us,dc=east -// 0.0 = [] // No constraints to match -// 0.0 = [+region=eu,+dc=uk] // None of the tiers match -// 0.0 = [+region=eu,+dc=east] // 2nd tier matches, but 1st tier doesn't -// 0.0 = [-region=us,+dc=east] // 1st tier matches PROHIBITED constraint -// 0.0 = [-region=eu] // 1st tier PROHIBITED and non-matching -// 0.5 = [+region=us] // 1st tier matches -// 0.5 = [+region=us,-dc=east] // 1st tier matches, 2nd tier PROHIBITED -// 0.5 = [+region=us,+dc=west] // 1st tier matches, but 2nd tier doesn't -// 1.0 = [+region=us,+dc=east] // Both tiers match -// 1.0 = [+dc=east] // 2nd tier matches, no constraints for 1st -// 1.0 = [+region=us,+dc=east,+rack=1,-ssd] // Extra constraints ignored +// Locality = region=us,dc=east +// 0.0 = [] // No constraints to match +// 0.0 = [+region=eu,+dc=uk] // None of the tiers match +// 0.0 = [+region=eu,+dc=east] // 2nd tier matches, but 1st tier doesn't +// 0.0 = [-region=us,+dc=east] // 1st tier matches PROHIBITED constraint +// 0.0 = [-region=eu] // 1st tier PROHIBITED and non-matching +// 0.5 = [+region=us] // 1st tier matches +// 0.5 = [+region=us,-dc=east] // 1st tier matches, 2nd tier PROHIBITED +// 0.5 = [+region=us,+dc=west] // 1st tier matches, but 2nd tier doesn't +// 1.0 = [+region=us,+dc=east] // Both tiers match +// 1.0 = [+dc=east] // 2nd tier matches, no constraints for 1st +// 1.0 = [+region=us,+dc=east,+rack=1,-ssd] // Extra constraints ignored // // Note that constraints need not be specified in any particular order, so all // constraints are scanned when matching each locality tier. In cases where diff --git a/pkg/sql/opt/xform/explorer.go b/pkg/sql/opt/xform/explorer.go index 8ad5b9ab952b..b6ffcf5cdeae 100644 --- a/pkg/sql/opt/xform/explorer.go +++ b/pkg/sql/opt/xform/explorer.go @@ -28,14 +28,14 @@ import ( // rule efficiently enumerates all possible combinations of its sub-expressions // in order to look for matches. For example: // -// // [AssociateJoin] -// (InnerJoin -// (InnerJoin $r:* $s:* $lowerOn:*) -// $t:* -// $upperOn:* -// ) -// => -// ... +// // [AssociateJoin] +// (InnerJoin +// (InnerJoin $r:* $s:* $lowerOn:*) +// $t:* +// $upperOn:* +// ) +// => +// ... // // Say the memo group containing the upper inner-join has 3 expressions in it, // and the memo group containing the lower inner-join has 4 expressions. Then @@ -57,22 +57,22 @@ import ( // For each expression combination that matches, a replace expression is // constructed and added to the same memo group as the matched expression: // -// // [AssociateJoin] -// (InnerJoin -// (InnerJoin $r:* $s:* $lowerOn:*) -// $t:* -// $upperOn:* -// ) -// => -// (InnerJoin -// (InnerJoin -// $r -// $t -// (ConstructFiltersNotUsing $s $lowerOn $upperOn) -// ) -// $s -// (ConstructFiltersUsing $s $lowerOn $upperOn) -// ) +// // [AssociateJoin] +// (InnerJoin +// (InnerJoin $r:* $s:* $lowerOn:*) +// $t:* +// $upperOn:* +// ) +// => +// (InnerJoin +// (InnerJoin +// $r +// $t +// (ConstructFiltersNotUsing $s $lowerOn $upperOn) +// ) +// $s +// (ConstructFiltersUsing $s $lowerOn $upperOn) +// ) // // In this example, if the upper and lower groups each contain two InnerJoin // expressions, then four new expressions will be added to the memo group of the @@ -113,16 +113,16 @@ func (e *explorer) init(o *Optimizer) { // pass. Each time exploreGroup is called, the end of the previous pass becomes // the start of the next pass. For example: // -// pass1 pass2 pass3 -// <-start -// e0 e0 e0 -// <-end <-start -// e1 (new) e1 e1 +// pass1 pass2 pass3 +// <-start +// e0 e0 e0 +// <-end <-start +// e1 (new) e1 e1 // -// e2 (new) e2 e2 -// <-end <-start -// e3 (new) e3 -// <-end +// e2 (new) e2 e2 +// <-end <-start +// e3 (new) e3 +// <-end // // For rules which match one or more sub-expressions in addition to the top- // level expression, there is extra complexity because every combination needs @@ -136,11 +136,11 @@ func (e *explorer) init(o *Optimizer) { // Optgen. Each non-scalar match pattern or sub-pattern uses a loop to // enumerate the expressions in the corresponding memo group. For example: // -// $join:(InnerJoin -// $left:(InnerJoin) -// $right:(Select) -// $on:* -// ) +// $join:(InnerJoin +// $left:(InnerJoin) +// $right:(Select) +// $on:* +// ) // // This match pattern would be implemented with 3 nested loops: 1 each for the // $join, $left, and $right memo groups. If $join had 2 expressions, $left had @@ -148,12 +148,11 @@ func (e *explorer) init(o *Optimizer) { // be considered. The innermost loop can skip iteration if all outer loops are // bound to expressions which have already been explored in previous passes: // -// for e1 in memo-exprs($join): -// for e2 in memo-exprs($left): -// for e3 in memo-exprs($right): -// if ordinal(e3) >= state.start: -// ... explore (e1, e2, e3) combo ... -// +// for e1 in memo-exprs($join): +// for e2 in memo-exprs($left): +// for e3 in memo-exprs($right): +// if ordinal(e3) >= state.start: +// ... explore (e1, e2, e3) combo ... func (e *explorer) exploreGroup(grp memo.RelExpr) *exploreState { // Do nothing if this group has already been fully explored. state := e.ensureExploreState(grp) diff --git a/pkg/sql/opt/xform/general_funcs.go b/pkg/sql/opt/xform/general_funcs.go index f7e7a05f17a8..47dfec651627 100644 --- a/pkg/sql/opt/xform/general_funcs.go +++ b/pkg/sql/opt/xform/general_funcs.go @@ -157,14 +157,15 @@ func (c *CustomFuncs) remapJoinColsInScalarExpr( // indexes to be constrained and used. Consider the following example: // // CREATE TABLE abc ( -// a INT PRIMARY KEY, -// b INT NOT NULL, -// c STRING NOT NULL, -// CHECK (a < 10 AND a > 1), -// CHECK (b < 10 AND b > 1), -// CHECK (c in ('first', 'second')), -// INDEX secondary (b, a), -// INDEX tertiary (c, b, a)) +// +// a INT PRIMARY KEY, +// b INT NOT NULL, +// c STRING NOT NULL, +// CHECK (a < 10 AND a > 1), +// CHECK (b < 10 AND b > 1), +// CHECK (c in ('first', 'second')), +// INDEX secondary (b, a), +// INDEX tertiary (c, b, a)) // // Now consider the query: SELECT a, b WHERE a > 5 // @@ -174,14 +175,15 @@ func (c *CustomFuncs) remapJoinColsInScalarExpr( // indexes. In fact, for the above query we can do the following: // // select -// ├── columns: a:1(int!null) b:2(int!null) -// ├── scan abc@tertiary -// │ ├── columns: a:1(int!null) b:2(int!null) -// │ └── constraint: /3/2/1: [/'first'/2/6 - /'first'/9/9] [/'second'/2/6 - /'second'/9/9] -// └── filters -// └── gt [type=bool] -// ├── variable: a [type=int] -// └── const: 5 [type=int] +// +// ├── columns: a:1(int!null) b:2(int!null) +// ├── scan abc@tertiary +// │ ├── columns: a:1(int!null) b:2(int!null) +// │ └── constraint: /3/2/1: [/'first'/2/6 - /'first'/9/9] [/'second'/2/6 - /'second'/9/9] +// └── filters +// └── gt [type=bool] +// ├── variable: a [type=int] +// └── const: 5 [type=int] // // Similarly, the secondary index could also be used. All such index scans // will be added to the memo group. @@ -236,10 +238,10 @@ func (c *CustomFuncs) initIdxConstraintForIndex( // computed column expressions from the given table. A computed column can be // used as a filter when it has a constant value. That is true when: // -// 1. All other columns it references are constant, because other filters in -// the query constrain them to be so. -// 2. All functions in the computed column expression can be folded into -// constants (i.e. they do not have problematic side effects). +// 1. All other columns it references are constant, because other filters in +// the query constrain them to be so. +// 2. All functions in the computed column expression can be folded into +// constants (i.e. they do not have problematic side effects). // // Note that computed columns can depend on other computed columns; in general // the dependencies form an acyclic directed graph. computedColFilters will @@ -251,13 +253,13 @@ func (c *CustomFuncs) initIdxConstraintForIndex( // filters may allow some indexes to be constrained and used. Consider the // following example: // -// CREATE TABLE t ( -// k INT NOT NULL, -// hash INT AS (k % 4) STORED, -// PRIMARY KEY (hash, k) -// ) +// CREATE TABLE t ( +// k INT NOT NULL, +// hash INT AS (k % 4) STORED, +// PRIMARY KEY (hash, k) +// ) // -// SELECT * FROM t WHERE k = 5 +// SELECT * FROM t WHERE k = 5 // // Notice that the filter provided explicitly wouldn't allow the optimizer to // seek using the primary index (it would have to fall back to a table scan). @@ -265,11 +267,11 @@ func (c *CustomFuncs) initIdxConstraintForIndex( // it's dependent on column "k", which has the constant value of 5. This enables // usage of the primary index: // -// scan t -// ├── columns: k:1(int!null) hash:2(int!null) -// ├── constraint: /2/1: [/1/5 - /1/5] -// ├── key: (2) -// └── fd: ()-->(1) +// scan t +// ├── columns: k:1(int!null) hash:2(int!null) +// ├── constraint: /2/1: [/1/5 - /1/5] +// ├── key: (2) +// └── fd: ()-->(1) // // The values of both columns in that index are known, enabling a single value // constraint to be generated. @@ -314,16 +316,17 @@ func (c *CustomFuncs) computedColFilters( // logically equal but not identical values, like the decimals 1.0 and 1.00. // // For example: -// CREATE TABLE t ( -// d DECIMAL, -// c DECIMAL AS (d*10) STORED -// ); -// INSERT INTO t VALUES (1.0), (1.00), (1.000); -// SELECT c::STRING FROM t WHERE d=1; -// ---- -// 10.0 -// 10.00 -// 10.000 +// +// CREATE TABLE t ( +// d DECIMAL, +// c DECIMAL AS (d*10) STORED +// ); +// INSERT INTO t VALUES (1.0), (1.00), (1.000); +// SELECT c::STRING FROM t WHERE d=1; +// ---- +// 10.0 +// 10.00 +// 10.000 // // We can infer that c has a constant value of 1 but we can't replace it with 1 // in any expression. @@ -334,7 +337,7 @@ type constColsMap map[opt.ColumnID]opt.ScalarExpr // given lists of filters and finding expressions that constrain columns to a // single constant value. For example: // -// x = 5 AND y = 'foo' +// x = 5 AND y = 'foo' // // This would add a mapping from x => 5 and y => 'foo', which constants can // then be used to prove that dependent computed columns are also constant. @@ -753,7 +756,6 @@ func (c *CustomFuncs) wrapScanInLimitedSelect( // keyLength: 1, // => // hasSequence: False, reverse: False -// func indexHasOrderingSequence( md *opt.Metadata, scan memo.RelExpr, diff --git a/pkg/sql/opt/xform/index_scan_builder.go b/pkg/sql/opt/xform/index_scan_builder.go index bcc3b2789d00..c57a4caf822f 100644 --- a/pkg/sql/opt/xform/index_scan_builder.go +++ b/pkg/sql/opt/xform/index_scan_builder.go @@ -23,20 +23,19 @@ import ( // Any filters are created as close to the scan as possible, and index joins can // be used to scan a non-covering index. For example, in order to construct: // -// (IndexJoin -// (Select (Scan $scanPrivate) $filters) -// $indexJoinPrivate -// ) +// (IndexJoin +// (Select (Scan $scanPrivate) $filters) +// $indexJoinPrivate +// ) // // make the following calls: // -// var sb indexScanBuilder -// sb.Init(c, tabID) -// sb.SetScan(scanPrivate) -// sb.AddSelect(filters) -// sb.AddIndexJoin(cols) -// expr := sb.Build() -// +// var sb indexScanBuilder +// sb.Init(c, tabID) +// sb.SetScan(scanPrivate) +// sb.AddSelect(filters) +// sb.AddIndexJoin(cols) +// expr := sb.Build() type indexScanBuilder struct { c *CustomFuncs f *norm.Factory diff --git a/pkg/sql/opt/xform/join_funcs.go b/pkg/sql/opt/xform/join_funcs.go index 5c6d5cb86c29..8ff5e46168c1 100644 --- a/pkg/sql/opt/xform/join_funcs.go +++ b/pkg/sql/opt/xform/join_funcs.go @@ -157,11 +157,10 @@ func (c *CustomFuncs) GenerateMergeJoins( // 1. The index has all the columns we need; this is the simple case, where we // generate a LookupJoin expression in the current group: // -// Join LookupJoin(t@idx) -// / \ | -// / \ -> | -// Input Scan(t) Input -// +// Join LookupJoin(t@idx) +// / \ | +// / \ -> | +// Input Scan(t) Input // // 2. The index is not covering, but we can fully evaluate the ON condition // using the index, or we are doing an InnerJoin. We have to generate @@ -170,18 +169,18 @@ func (c *CustomFuncs) GenerateMergeJoins( // columns from one table, whereas we also need to output columns from // Input. // -// Join LookupJoin(t@primary) -// / \ | -// / \ -> | -// Input Scan(t) LookupJoin(t@idx) -// | -// | -// Input +// Join LookupJoin(t@primary) +// / \ | +// / \ -> | +// Input Scan(t) LookupJoin(t@idx) +// | +// | +// Input // // For example: -// CREATE TABLE abc (a INT PRIMARY KEY, b INT, c INT) -// CREATE TABLE xyz (x INT PRIMARY KEY, y INT, z INT, INDEX (y)) -// SELECT * FROM abc JOIN xyz ON a=y +// CREATE TABLE abc (a INT PRIMARY KEY, b INT, c INT) +// CREATE TABLE xyz (x INT PRIMARY KEY, y INT, z INT, INDEX (y)) +// SELECT * FROM abc JOIN xyz ON a=y // // We want to first join abc with the index on y (which provides columns y, x) // and then use a lookup join to retrieve column z. The "index join" (top @@ -201,7 +200,7 @@ func (c *CustomFuncs) GenerateMergeJoins( // behave accordingly. // // For example, using the same tables in the example for case 2: -// SELECT * FROM abc LEFT JOIN xyz ON a=y AND b=z +// SELECT * FROM abc LEFT JOIN xyz ON a=y AND b=z // // The first join will evaluate a=y and produce columns a,b,c,x,y,cont // where cont is the continuation column used to group together rows that @@ -209,7 +208,6 @@ func (c *CustomFuncs) GenerateMergeJoins( // the primary index, evaluate b=z, and produce columns a,b,c,x,y,z. A // similar approach works for anti-joins and semi-joins. // -// // A lookup join can be created when the ON condition or implicit filters from // CHECK constraints and computed columns constrain a prefix of the index // columns to non-ranging constant values. To support this, the constant values @@ -218,37 +216,36 @@ func (c *CustomFuncs) GenerateMergeJoins( // // For example, consider the tables and query below. // -// CREATE TABLE abc (a INT PRIMARY KEY, b INT, c INT) -// CREATE TABLE xyz ( -// x INT PRIMARY KEY, -// y INT, -// z INT NOT NULL, -// CHECK z IN (1, 2, 3), -// INDEX (z, y) -// ) -// SELECT a, x FROM abc JOIN xyz ON a=y +// CREATE TABLE abc (a INT PRIMARY KEY, b INT, c INT) +// CREATE TABLE xyz ( +// x INT PRIMARY KEY, +// y INT, +// z INT NOT NULL, +// CHECK z IN (1, 2, 3), +// INDEX (z, y) +// ) +// SELECT a, x FROM abc JOIN xyz ON a=y // // GenerateLookupJoins will perform the following transformation. // -// Join LookupJoin(t@idx) -// / \ | -// / \ -> | -// Input Scan(t) Join -// / \ -// / \ -// Input Values(1, 2, 3) +// Join LookupJoin(t@idx) +// / \ | +// / \ -> | +// Input Scan(t) Join +// / \ +// / \ +// Input Values(1, 2, 3) // // If a column is constrained to a single constant value, inlining normalization // rules will reduce the cross join into a project. // -// Join LookupJoin(t@idx) -// / \ | -// / \ -> | -// Input Scan(t) Project -// | -// | -// Input -// +// Join LookupJoin(t@idx) +// / \ | +// / \ -> | +// Input Scan(t) Project +// | +// | +// Input func (c *CustomFuncs) GenerateLookupJoins( grp memo.RelExpr, joinType opt.Operator, @@ -279,19 +276,19 @@ func (c *CustomFuncs) GenerateLookupJoins( // // For example: // -// Join LookupJoin(t@idx) -// / \ | -// / \ -> | -// Input Project Input -// | -// | -// Scan(t) +// Join LookupJoin(t@idx) +// / \ | +// / \ -> | +// Input Project Input +// | +// | +// Scan(t) // // This function and its associated rule currently require that: // -// 1. The join is an inner join. -// 2. The right side projects only virtual computed columns. -// 3. All the projected virtual columns are covered by a single index. +// 1. The join is an inner join. +// 2. The right side projects only virtual computed columns. +// 3. All the projected virtual columns are covered by a single index. // // It should be possible to support semi- and anti- joins. Left joins may be // possible with additional complexity. diff --git a/pkg/sql/opt/xform/join_order_builder.go b/pkg/sql/opt/xform/join_order_builder.go index 9be542c616e8..f1bd07fabe41 100644 --- a/pkg/sql/opt/xform/join_order_builder.go +++ b/pkg/sql/opt/xform/join_order_builder.go @@ -82,16 +82,16 @@ type OnAddJoinFunc func(left, right, all, joinRefs, selectRefs []memo.RelExpr, o // tuples, where |m| is left input cardinality and |n| is right input // cardinality. With a query like this: // -// SELECT * -// FROM (SELECT * FROM xy INNER JOIN ab ON x = a) -// INNER JOIN uv ON x = u +// SELECT * +// FROM (SELECT * FROM xy INNER JOIN ab ON x = a) +// INNER JOIN uv ON x = u // // An ordering like the following is valid but not desirable, since the cross // join will likely be very expensive compared to a join with a predicate: // -// SELECT * -// FROM (SELECT * FROM uv INNER JOIN ab ON True) -// INNER JOIN xy ON x = a AND x = u +// SELECT * +// FROM (SELECT * FROM uv INNER JOIN ab ON True) +// INNER JOIN xy ON x = a AND x = u // // Avoiding cross joins significantly decreases the search space (and therefore // planning time) without preventing the best plan from being found in most @@ -113,16 +113,16 @@ type OnAddJoinFunc func(left, right, all, joinRefs, selectRefs []memo.RelExpr, o // // Taking this query as an example: // -// SELECT * -// FROM (SELECT * FROM xy LEFT JOIN ab ON x = a) -// INNER JOIN uv ON x = u AND (y = b OR b IS NULL) +// SELECT * +// FROM (SELECT * FROM xy LEFT JOIN ab ON x = a) +// INNER JOIN uv ON x = u AND (y = b OR b IS NULL) // // The vertexes of the graph would represent the base relations xy, ab and uv. // The three edges would be: // -// x = a [left] -// x = u [inner] -// y = b OR b IS NULL [inner] +// x = a [left] +// x = u [inner] +// y = b OR b IS NULL [inner] // // Then, the DPSube algorithm is executed (see citations: [8]). DPSube // enumerates all disjoint pairs of subsets of base relations such as @@ -145,10 +145,10 @@ type OnAddJoinFunc func(left, right, all, joinRefs, selectRefs []memo.RelExpr, o // contained in the SES of a join must be present in the join's input. For // example, take the following query: // -// SELECT * -// FROM xy -// LEFT JOIN (SELECT * FROM ab INNER JOIN uv ON a = u) -// ON x = u +// SELECT * +// FROM xy +// LEFT JOIN (SELECT * FROM ab INNER JOIN uv ON a = u) +// ON x = u // // The SES for the left join will contain relations xy and uv because both are // referenced by the join's predicate. Therefore, both must be in the input of @@ -166,10 +166,10 @@ type OnAddJoinFunc func(left, right, all, joinRefs, selectRefs []memo.RelExpr, o // // Consider the following (invalid) reordering of the above example): // -// SELECT * -// FROM ab -// INNER JOIN (SELECT * FROM xy LEFT JOIN uv ON x = u) -// ON a = u +// SELECT * +// FROM ab +// INNER JOIN (SELECT * FROM xy LEFT JOIN uv ON x = u) +// ON a = u // // The left join's TES will include relations xy and uv because they are in the // SES. The TES will also contain ab because the right-asscom property does not @@ -195,9 +195,9 @@ type OnAddJoinFunc func(left, right, all, joinRefs, selectRefs []memo.RelExpr, o // their original operator, free to be combined with conjuncts from other inner // joins. For example, take this query: // -// SELECT * -// FROM (SELECT * FROM xy INNER JOIN ab ON x = a) -// INNER JOIN uv ON x = u AND a = u +// SELECT * +// FROM (SELECT * FROM xy INNER JOIN ab ON x = a) +// INNER JOIN uv ON x = u AND a = u // // Treating the ON conditions of these joins as a conglomerate (as we do with // non-inner joins), a join between base relations xy and uv would not be @@ -206,37 +206,38 @@ type OnAddJoinFunc func(left, right, all, joinRefs, selectRefs []memo.RelExpr, o // conjunct solves this problem, allowing a reordering like the following // (the ab and uv relations are switched, along with the filters): // -// SELECT * -// FROM (SELECT * FROM xy INNER JOIN uv ON x = u) -// INNER JOIN ab ON x = a AND a = u +// SELECT * +// FROM (SELECT * FROM xy INNER JOIN uv ON x = u) +// INNER JOIN ab ON x = a AND a = u // // In fact, this idea can be taken even further. Take this query as an example: // -// SELECT * -// FROM xy -// INNER JOIN (SELECT * FROM ab LEFT JOIN uv ON b = v) -// ON x = a AND (y = u OR u IS NULL) +// SELECT * +// FROM xy +// INNER JOIN (SELECT * FROM ab LEFT JOIN uv ON b = v) +// ON x = a AND (y = u OR u IS NULL) // // The following is a valid reformulation: // -// SELECT * -// FROM (SELECT * FROM xy INNER JOIN ab ON x = a) -// LEFT JOIN uv ON b = v -// WHERE y = u OR u IS NULL +// SELECT * +// FROM (SELECT * FROM xy INNER JOIN ab ON x = a) +// LEFT JOIN uv ON b = v +// WHERE y = u OR u IS NULL // // Notice the new Select operation that now carries the inner join conjunct that // references the right side of the left join. We can model the process that // leads to this reformulation as follows: -// 1. The inner join is rewritten as a cross join and two selects, each -// carrying a conjunct: (x = a) for one and (y = u OR u IS NULL) for the -// other. -// 2. The Select operators are pulled above the inner join. -// 3. The left join and inner join are reordered according to the associative -// property (see citations: [8] table 2). -// 4. Finally, the inner join conjuncts are pushed back down the reordered -// join tree as far as possible. The x = a conjunct can be pushed to the -// inner join, but the (y = u OR u IS NULL) conjunct must remain on the -// Select. +// 1. The inner join is rewritten as a cross join and two selects, each +// carrying a conjunct: (x = a) for one and (y = u OR u IS NULL) for the +// other. +// 2. The Select operators are pulled above the inner join. +// 3. The left join and inner join are reordered according to the associative +// property (see citations: [8] table 2). +// 4. Finally, the inner join conjuncts are pushed back down the reordered +// join tree as far as possible. The x = a conjunct can be pushed to the +// inner join, but the (y = u OR u IS NULL) conjunct must remain on the +// Select. +// // JoinOrderBuilder is able to effect this transformation (though it is not // accomplished in so many steps). // @@ -258,9 +259,9 @@ type OnAddJoinFunc func(left, right, all, joinRefs, selectRefs []memo.RelExpr, o // we can add new edges that are implied by the transitive closure of the inner // join edges. For example, take this query: // -// SELECT * FROM xy -// INNER JOIN ab ON x = a -// INNER JOIN uv ON a = u +// SELECT * FROM xy +// INNER JOIN ab ON x = a +// INNER JOIN uv ON a = u // // The two edges x = a and a = u are explicit in this join tree. However, there // is the additional implicit edge x = u which can be added to the join graph. @@ -438,7 +439,7 @@ func (jb *JoinOrderBuilder) populateGraph(rel memo.RelExpr) (vertexSet, edgeSet) // reflect the transitive closure of all equality filters between columns. // As an example, take a query like the following: // -// SELECT * FROM xy INNER JOIN ab ON x = a INNER JOIN uv ON u = a +// SELECT * FROM xy INNER JOIN ab ON x = a INNER JOIN uv ON u = a // // Contains the explicit edges x = a and u = a, and the implicit edge x = u. // This implicit edge will be added by ensureClosure. @@ -1038,9 +1039,9 @@ type operator struct { // 'to' set must be a subset of the input relations (from -> to). Take the // following query as an example: // -// SELECT * FROM xy -// INNER JOIN (SELECT * FROM ab LEFT JOIN uv ON a = u) -// ON x = a +// SELECT * FROM xy +// INNER JOIN (SELECT * FROM ab LEFT JOIN uv ON a = u) +// ON x = a // // During execution of the CD-C algorithm, the following conflict rule would // be added to inner join edge: [uv -> ab]. This means that, for any join that @@ -1071,19 +1072,19 @@ func (e *edge) calcNullRejectedRels(jb *JoinOrderBuilder) { // a join uses a predicate in its ON condition, all relations in the SES must be // part of the join's inputs. For example, in this query: // -// SELECT * -// FROM xy -// INNER JOIN (SELECT * FROM ab INNER JOIN uv ON b = (u*2)) -// ON x = a +// SELECT * +// FROM xy +// INNER JOIN (SELECT * FROM ab INNER JOIN uv ON b = (u*2)) +// ON x = a // // The SES for the x = a edge would contain relations xy and ab. The SES for the // b = u*2 edge would contain ab and uv. Therefore, this query could be // reordered like so: // -// SELECT * -// FROM (SELECT * FROM xy INNER JOIN ab ON x = a) -// INNER JOIN uv -// ON b = (u*2) +// SELECT * +// FROM (SELECT * FROM xy INNER JOIN ab ON x = a) +// INNER JOIN uv +// ON b = (u*2) // // While still satisfying the syntactic eligibility sets of the edges. func (e *edge) calcSES(jb *JoinOrderBuilder) { @@ -1368,21 +1369,20 @@ func commute(op opt.Operator) bool { // by the given edges are associative with each other. An example of an // application of the associative property: // -// SELECT * FROM -// ( -// SELECT * FROM xy -// INNER JOIN ab ON x = a -// ) -// INNER JOIN uv ON a = u -// => -// SELECT * FROM xy -// INNER JOIN -// ( -// SELECT * FROM ab -// INNER JOIN uv ON a = u -// ) -// ON x = a -// +// SELECT * FROM +// ( +// SELECT * FROM xy +// INNER JOIN ab ON x = a +// ) +// INNER JOIN uv ON a = u +// => +// SELECT * FROM xy +// INNER JOIN +// ( +// SELECT * FROM ab +// INNER JOIN uv ON a = u +// ) +// ON x = a func assoc(edgeA, edgeB *edge) bool { return checkProperty(assocTable, edgeA, edgeB) } @@ -1391,20 +1391,19 @@ func assoc(edgeA, edgeB *edge) bool { // described by the given edges allow the left-asscom property. An example of // an application of the left-asscom property: // -// SELECT * FROM -// ( -// SELECT * FROM xy -// INNER JOIN ab ON x = a -// ) -// INNER JOIN uv ON x = u -// => -// SELECT * FROM -// ( -// SELECT * FROM xy -// INNER JOIN uv ON x = u -// ) -// INNER JOIN ab ON x = a -// +// SELECT * FROM +// ( +// SELECT * FROM xy +// INNER JOIN ab ON x = a +// ) +// INNER JOIN uv ON x = u +// => +// SELECT * FROM +// ( +// SELECT * FROM xy +// INNER JOIN uv ON x = u +// ) +// INNER JOIN ab ON x = a func leftAsscom(edgeA, edgeB *edge) bool { return checkProperty(leftAsscomTable, edgeA, edgeB) } @@ -1413,22 +1412,21 @@ func leftAsscom(edgeA, edgeB *edge) bool { // described by the given edges allow the right-asscom property. An example of // an application of the right-asscom property: // -// SELECT * FROM uv -// INNER JOIN -// ( -// SELECT * FROM xy -// INNER JOIN ab ON x = a -// ) -// ON a = u -// => -// SELECT * FROM xy -// INNER JOIN -// ( -// SELECT * FROM uv -// INNER JOIN ab ON a = u -// ) -// ON x = a -// +// SELECT * FROM uv +// INNER JOIN +// ( +// SELECT * FROM xy +// INNER JOIN ab ON x = a +// ) +// ON a = u +// => +// SELECT * FROM xy +// INNER JOIN +// ( +// SELECT * FROM uv +// INNER JOIN ab ON a = u +// ) +// ON x = a func rightAsscom(edgeA, edgeB *edge) bool { return checkProperty(rightAsscomTable, edgeA, edgeB) } diff --git a/pkg/sql/opt/xform/limit_funcs.go b/pkg/sql/opt/xform/limit_funcs.go index 362c807b2a07..a4ed36613265 100644 --- a/pkg/sql/opt/xform/limit_funcs.go +++ b/pkg/sql/opt/xform/limit_funcs.go @@ -41,8 +41,9 @@ func (c *CustomFuncs) LimitScanPrivate( // Scan operator. // // NOTE: Limiting unconstrained, non-partial index scans is done by the -// GenerateLimitedScans rule, since that can require IndexJoin operators -// to be generated. +// +// GenerateLimitedScans rule, since that can require IndexJoin operators +// to be generated. func (c *CustomFuncs) CanLimitFilteredScan( scanPrivate *memo.ScanPrivate, required props.OrderingChoice, ) bool { diff --git a/pkg/sql/opt/xform/memo_format.go b/pkg/sql/opt/xform/memo_format.go index 8de29ab6e930..5e1260dcd0cb 100644 --- a/pkg/sql/opt/xform/memo_format.go +++ b/pkg/sql/opt/xform/memo_format.go @@ -216,7 +216,8 @@ func (mf *memoFormatter) populateStates() { } // formatGroup prints out (to mf.buf) all members of the group); e.g: -// (limit G2 G3 ordering=-1) (scan a,rev,cols=(1,3),lim=10(rev)) +// +// (limit G2 G3 ordering=-1) (scan a,rev,cols=(1,3),lim=10(rev)) func (mf *memoFormatter) formatGroup(first memo.RelExpr) { for member := first; member != nil; member = member.NextExpr() { if member != first { @@ -227,7 +228,8 @@ func (mf *memoFormatter) formatGroup(first memo.RelExpr) { } // formatExpr prints out (to mf.buf) a single expression; e.g: -// (filters G6 G7) +// +// (filters G6 G7) func (mf *memoFormatter) formatExpr(e opt.Expr) { fmt.Fprintf(mf.buf, "(%s", e.Op()) for i := 0; i < e.ChildCount(); i++ { diff --git a/pkg/sql/opt/xform/optimizer.go b/pkg/sql/opt/xform/optimizer.go index c10404c0f80a..6745c0e77d39 100644 --- a/pkg/sql/opt/xform/optimizer.go +++ b/pkg/sql/opt/xform/optimizer.go @@ -315,17 +315,17 @@ func (o *Optimizer) optimizeExpr( // The following is a simplified walkthrough of how the optimizer might handle // the following SQL query: // -// SELECT * FROM a WHERE x=1 ORDER BY y +// SELECT * FROM a WHERE x=1 ORDER BY y // // Before the optimizer is invoked, the memo group contains a single normalized // expression: // -// memo -// ├── G1: (select G2 G3) -// ├── G2: (scan a) -// ├── G3: (eq 3 2) -// ├── G4: (variable x) -// └── G5: (const 1) +// memo +// ├── G1: (select G2 G3) +// ├── G2: (scan a) +// ├── G3: (eq 3 2) +// ├── G4: (variable x) +// └── G5: (const 1) // // Optimization begins at the root of the memo (group #1), and calls // optimizeGroup with the properties required of that group ("ordering:y"). @@ -345,32 +345,32 @@ func (o *Optimizer) optimizeExpr( // cost expression for that group for that set of properties (i.e. the empty // set). // -// memo -// ├── G1: (select G2 G3) -// ├── G2: (scan a) -// │ └── [] -// │ ├── best: (scan a) -// │ └── cost: 100.00 -// ├── G3: (eq 3 2) -// ├── G4: (variable x) -// └── G5: (const 1) +// memo +// ├── G1: (select G2 G3) +// ├── G2: (scan a) +// │ └── [] +// │ ├── best: (scan a) +// │ └── cost: 100.00 +// ├── G3: (eq 3 2) +// ├── G4: (variable x) +// └── G5: (const 1) // // The recursion pops up a level, and now the Sort enforcer knows its input, // and so it too can be costed (cost of input + extra cost of sort) and added // as the best expression for the property set with the ordering requirement. // -// memo -// ├── G1: (select G2 G3) -// ├── G2: (scan a) -// │ ├── [ordering: y] -// │ │ ├── best: (sort G2) -// │ │ └── cost: 150.00 -// │ └── [] -// │ ├── best: (scan a) -// │ └── cost: 100.00 -// ├── G3: (eq 3 2) -// ├── G4: (variable x) -// └── G5: (const 1) +// memo +// ├── G1: (select G2 G3) +// ├── G2: (scan a) +// │ ├── [ordering: y] +// │ │ ├── best: (sort G2) +// │ │ └── cost: 150.00 +// │ └── [] +// │ ├── best: (scan a) +// │ └── cost: 100.00 +// ├── G3: (eq 3 2) +// ├── G4: (variable x) +// └── G5: (const 1) // // Recursion pops up another level, and the Select operator now knows its input // (the Sort of the Scan). It then moves on to its scalar filter child and @@ -381,21 +381,21 @@ func (o *Optimizer) optimizeExpr( // ordering requirement. It requires the same ordering requirement from its // input child (i.e. the scan). // -// memo -// ├── G1: (select G2 G3) -// │ └── [ordering: y] -// │ ├── best: (select G2="ordering: y" G3) -// │ └── cost: 160.00 -// ├── G2: (scan a) -// │ ├── [ordering: y] -// │ │ ├── best: (sort G2) -// │ │ └── cost: 150.00 -// │ └── [] -// │ ├── best: (scan a) -// │ └── cost: 100.00 -// ├── G3: (eq 3 2) -// ├── G4: (variable x) -// └── G5: (const 1) +// memo +// ├── G1: (select G2 G3) +// │ └── [ordering: y] +// │ ├── best: (select G2="ordering: y" G3) +// │ └── cost: 160.00 +// ├── G2: (scan a) +// │ ├── [ordering: y] +// │ │ ├── best: (sort G2) +// │ │ └── cost: 150.00 +// │ └── [] +// │ ├── best: (scan a) +// │ └── cost: 100.00 +// ├── G3: (eq 3 2) +// ├── G4: (variable x) +// └── G5: (const 1) // // But the process is not yet complete. After traversing the Select child // groups, optimizeExpr generates an alternate plan that satisfies the ordering @@ -407,24 +407,24 @@ func (o *Optimizer) optimizeExpr( // returns them immediately with no extra work. The Select expression is now // costed and added as the best expression without an ordering requirement. // -// memo -// ├── G1: (select G2 G3) -// │ ├── [ordering: y] -// │ │ ├── best: (select G2="ordering: y" G3) -// │ │ └── cost: 160.00 -// │ └── [] -// │ ├── best: (select G2 G3) -// │ └── cost: 110.00 -// ├── G2: (scan a) -// │ ├── [ordering: y] -// │ │ ├── best: (sort G2) -// │ │ └── cost: 150.00 -// │ └── [] -// │ ├── best: (scan a) -// │ └── cost: 100.00 -// ├── G3: (eq 3 2) -// ├── G4: (variable x) -// └── G5: (const 1) +// memo +// ├── G1: (select G2 G3) +// │ ├── [ordering: y] +// │ │ ├── best: (select G2="ordering: y" G3) +// │ │ └── cost: 160.00 +// │ └── [] +// │ ├── best: (select G2 G3) +// │ └── cost: 110.00 +// ├── G2: (scan a) +// │ ├── [ordering: y] +// │ │ ├── best: (sort G2) +// │ │ └── cost: 150.00 +// │ └── [] +// │ ├── best: (scan a) +// │ └── cost: 100.00 +// ├── G3: (eq 3 2) +// ├── G4: (variable x) +// └── G5: (const 1) // // Finally, the Sort enforcer for group #1 has its input and can be costed. But // rather than costing 50.0 like the other Sort enforcer, this one only costs @@ -432,39 +432,38 @@ func (o *Optimizer) optimizeExpr( // cost is only 111.0, which makes it the new best expression for group #1 with // an ordering requirement: // -// memo -// ├── G1: (select G2 G3) -// │ ├── [ordering: y] -// │ │ ├── best: (sort G1) -// │ │ └── cost: 111.00 -// │ └── [] -// │ ├── best: (select G2 G3) -// │ └── cost: 110.00 -// ├── G2: (scan a) -// │ ├── [ordering: y] -// │ │ ├── best: (sort G2) -// │ │ └── cost: 150.00 -// │ └── [] -// │ ├── best: (scan a) -// │ └── cost: 100.00 -// ├── G3: (eq 3 2) -// ├── G4: (variable x) -// └── G5: (const 1) +// memo +// ├── G1: (select G2 G3) +// │ ├── [ordering: y] +// │ │ ├── best: (sort G1) +// │ │ └── cost: 111.00 +// │ └── [] +// │ ├── best: (select G2 G3) +// │ └── cost: 110.00 +// ├── G2: (scan a) +// │ ├── [ordering: y] +// │ │ ├── best: (sort G2) +// │ │ └── cost: 150.00 +// │ └── [] +// │ ├── best: (scan a) +// │ └── cost: 100.00 +// ├── G3: (eq 3 2) +// ├── G4: (variable x) +// └── G5: (const 1) // // Now the memo has been fully optimized, and the best expression for group #1 // and "ordering:y" can be set as the root of the tree by setLowestCostTree: // -// sort -// ├── columns: x:1(int) y:2(int) -// ├── ordering: +2 -// └── select -// ├── columns: x:1(int) y:2(int) -// ├── scan -// │ └── columns: x:1(int) y:2(int) -// └── eq [type=bool] -// ├── variable: a.x [type=int] -// └── const: 1 [type=int] -// +// sort +// ├── columns: x:1(int) y:2(int) +// ├── ordering: +2 +// └── select +// ├── columns: x:1(int) y:2(int) +// ├── scan +// │ └── columns: x:1(int) y:2(int) +// └── eq [type=bool] +// ├── variable: a.x [type=int] +// └── const: 1 [type=int] func (o *Optimizer) optimizeGroup(grp memo.RelExpr, required *physical.Required) *groupState { // Always start with the first expression in the group. grp = grp.FirstExpr() @@ -616,12 +615,12 @@ func (o *Optimizer) optimizeScalarExpr( // been provided by an enforcer rather than by the expression itself. There are // two reasons why this is necessary/desirable: // -// 1. The expression may not be able to provide the property on its own. For -// example, a hash join cannot provide ordered results. -// 2. The enforcer might be able to provide the property at lower overall -// cost. For example, an enforced sort on top of a hash join might be -// lower cost than a merge join that is already sorted, but at the cost of -// requiring one of its children to be sorted. +// 1. The expression may not be able to provide the property on its own. For +// example, a hash join cannot provide ordered results. +// 2. The enforcer might be able to provide the property at lower overall +// cost. For example, an enforced sort on top of a hash join might be +// lower cost than a merge join that is already sorted, but at the cost of +// requiring one of its children to be sorted. // // Note that enforceProps will recursively optimize this same group, but with // one less required physical property. The recursive call will eventually make @@ -710,15 +709,15 @@ func (o *Optimizer) shouldExplore(required *physical.Required) bool { // the memo, with a normalized tree containing the first expression in each of // the groups: // -// memo -// ├── G1: (inner-join G2 G3 G4) (inner-join G3 G2 G4) -// ├── G2: (scan a) -// ├── G3: (select G5 G6) (scan b,constrained) -// ├── G4: (true) -// ├── G5: (scan b) -// ├── G6: (eq G7 G8) -// ├── G7: (variable b.x) -// └── G8: (const 1) +// memo +// ├── G1: (inner-join G2 G3 G4) (inner-join G3 G2 G4) +// ├── G2: (scan a) +// ├── G3: (select G5 G6) (scan b,constrained) +// ├── G4: (true) +// ├── G5: (scan b) +// ├── G6: (eq G7 G8) +// ├── G7: (variable b.x) +// └── G8: (const 1) // // setLowestCostTree is called after exploration is complete, and after each // group member has been costed. If the second expression in groups G1 and G3 diff --git a/pkg/sql/opt/xform/optimizer_test.go b/pkg/sql/opt/xform/optimizer_test.go index ba1492e9551c..ec94070f45a6 100644 --- a/pkg/sql/opt/xform/optimizer_test.go +++ b/pkg/sql/opt/xform/optimizer_test.go @@ -134,9 +134,10 @@ func TestDetachMemoRace(t *testing.T) { } // TestCoster files can be run separately like this: -// make test PKG=./pkg/sql/opt/xform TESTS="TestCoster/sort" -// make test PKG=./pkg/sql/opt/xform TESTS="TestCoster/scan" -// ... +// +// make test PKG=./pkg/sql/opt/xform TESTS="TestCoster/sort" +// make test PKG=./pkg/sql/opt/xform TESTS="TestCoster/scan" +// ... func TestCoster(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) @@ -148,9 +149,10 @@ func TestCoster(t *testing.T) { } // TestPhysicalProps files can be run separately like this: -// make test PKG=./pkg/sql/opt/xform TESTS="TestPhysicalPropsFactory/ordering" -// make test PKG=./pkg/sql/opt/xform TESTS="TestPhysicalPropsFactory/presentation" -// ... +// +// make test PKG=./pkg/sql/opt/xform TESTS="TestPhysicalPropsFactory/ordering" +// make test PKG=./pkg/sql/opt/xform TESTS="TestPhysicalPropsFactory/presentation" +// ... func TestPhysicalProps(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) @@ -168,8 +170,9 @@ func TestPhysicalProps(t *testing.T) { } // TestRuleProps files can be run separately like this: -// make test PKG=./pkg/sql/opt/xform TESTS="TestRuleProps/orderings" -// ... +// +// make test PKG=./pkg/sql/opt/xform TESTS="TestRuleProps/orderings" +// ... func TestRuleProps(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) @@ -182,9 +185,10 @@ func TestRuleProps(t *testing.T) { } // TestRules files can be run separately like this: -// make test PKG=./pkg/sql/opt/xform TESTS="TestRules/scan" -// make test PKG=./pkg/sql/opt/xform TESTS="TestRules/select" -// ... +// +// make test PKG=./pkg/sql/opt/xform TESTS="TestRules/scan" +// make test PKG=./pkg/sql/opt/xform TESTS="TestRules/select" +// ... func TestRules(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) @@ -206,12 +210,13 @@ var externalTestData = flag.String( // over time. // // TestExternal files can be run separately like this: -// make test PKG=./pkg/sql/opt/xform TESTS="TestExternal/tpch" -// ... +// +// make test PKG=./pkg/sql/opt/xform TESTS="TestExternal/tpch" +// ... // // Test files from another location can be run using the -d flag: -// make test PKG=./pkg/sql/opt/xform TESTS=TestExternal TESTFLAGS='-d /some-dir' // +// make test PKG=./pkg/sql/opt/xform TESTS=TestExternal TESTFLAGS='-d /some-dir' func TestExternal(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) @@ -237,10 +242,11 @@ func TestPlaceholderFastPath(t *testing.T) { } // runDataDrivenTest runs data-driven testcases of the form -// -// -// ---- -// +// +// +// +// ---- +// // // See OptTester.Handle for supported commands. func runDataDrivenTest(t *testing.T, path string, fmtFlags memo.ExprFmtFlags) { diff --git a/pkg/sql/opt/xform/scan_funcs.go b/pkg/sql/opt/xform/scan_funcs.go index e6b8608979a6..0e468532bac0 100644 --- a/pkg/sql/opt/xform/scan_funcs.go +++ b/pkg/sql/opt/xform/scan_funcs.go @@ -31,13 +31,14 @@ import ( // in that it behaves the exactly the same as a non-partial secondary index. // // NOTE: This does not generate index joins for non-covering indexes (except in -// case of ForceIndex). Index joins are usually only introduced "one level -// up", when the Scan operator is wrapped by an operator that constrains -// or limits scan output in some way (e.g. Select, Limit, InnerJoin). -// Index joins are only lower cost when their input does not include all -// rows from the table. See GenerateConstrainedScans, -// GenerateLimitedScans, and GenerateLimitedGroupByScans for cases where -// index joins are introduced into the memo. +// +// case of ForceIndex). Index joins are usually only introduced "one level +// up", when the Scan operator is wrapped by an operator that constrains +// or limits scan output in some way (e.g. Select, Limit, InnerJoin). +// Index joins are only lower cost when their input does not include all +// rows from the table. See GenerateConstrainedScans, +// GenerateLimitedScans, and GenerateLimitedGroupByScans for cases where +// index joins are introduced into the memo. func (c *CustomFuncs) GenerateIndexScans(grp memo.RelExpr, scanPrivate *memo.ScanPrivate) { // Iterate over all non-inverted and non-partial secondary indexes. var pkCols opt.ColSet @@ -244,53 +245,54 @@ func (c *CustomFuncs) GenerateLocalityOptimizedScan( // spans may be produced which don't maximize the number of rows accessed as a // 100% local operation. // For example: -// CREATE TABLE abc_part ( -// r STRING NOT NULL , -// t INT NOT NULL, -// a INT PRIMARY KEY, -// b INT, -// c INT, -// d INT, -// UNIQUE INDEX c_idx (r, t, c) PARTITION BY LIST (r, t) ( -// PARTITION west VALUES IN (('west', 1), ('east', 4)), -// PARTITION east VALUES IN (('east', DEFAULT), ('east', 2)), -// PARTITION default VALUES IN (DEFAULT) -// ) -// ); -// ALTER PARTITION "east" OF INDEX abc_part@c_idx CONFIGURE ZONE USING -// num_voters = 5, -// voter_constraints = '{+region=east: 2}', -// lease_preferences = '[[+region=east]]' // -// ALTER PARTITION "west" OF INDEX abc_part@c_idx CONFIGURE ZONE USING -// num_voters = 5, -// voter_constraints = '{+region=west: 2}', -// lease_preferences = '[[+region=west]]' +// CREATE TABLE abc_part ( +// r STRING NOT NULL , +// t INT NOT NULL, +// a INT PRIMARY KEY, +// b INT, +// c INT, +// d INT, +// UNIQUE INDEX c_idx (r, t, c) PARTITION BY LIST (r, t) ( +// PARTITION west VALUES IN (('west', 1), ('east', 4)), +// PARTITION east VALUES IN (('east', DEFAULT), ('east', 2)), +// PARTITION default VALUES IN (DEFAULT) +// ) +// ); +// ALTER PARTITION "east" OF INDEX abc_part@c_idx CONFIGURE ZONE USING +// num_voters = 5, +// voter_constraints = '{+region=east: 2}', +// lease_preferences = '[[+region=east]]' +// +// ALTER PARTITION "west" OF INDEX abc_part@c_idx CONFIGURE ZONE USING +// num_voters = 5, +// voter_constraints = '{+region=west: 2}', +// lease_preferences = '[[+region=west]]' // -// ALTER PARTITION "default" OF INDEX abc_part@c_idx CONFIGURE ZONE USING -// num_voters = 5, -// lease_preferences = '[[+region=central]]'; +// ALTER PARTITION "default" OF INDEX abc_part@c_idx CONFIGURE ZONE USING +// num_voters = 5, +// lease_preferences = '[[+region=central]]'; // -// EXPLAIN SELECT c FROM abc_part@c_idx LIMIT 3; -// info -// ---------------------------------------------- -// distribution: local -// vectorized: true +// EXPLAIN SELECT c FROM abc_part@c_idx LIMIT 3; +// info +// ---------------------------------------------- +// distribution: local +// vectorized: true // -// • union all -// │ limit: 3 -// │ -// ├── • scan -// │ missing stats -// │ table: abc_part@c_idx -// │ spans: [/'east'/2 - /'east'/3] -// │ limit: 3 -// │ -// └── • scan -// missing stats -// table: abc_part@c_idx -// spans: [ - /'east'/1] [/'east'/4 - ] -// limit: 3 +// • union all +// │ limit: 3 +// │ +// ├── • scan +// │ missing stats +// │ table: abc_part@c_idx +// │ spans: [/'east'/2 - /'east'/3] +// │ limit: 3 +// │ +// └── • scan +// missing stats +// table: abc_part@c_idx +// spans: [ - /'east'/1] [/'east'/4 - ] +// limit: 3 // // Because of the partial-default east partition, ('east', DEFAULT), the spans // in the local (left) branch of the union all should be diff --git a/pkg/sql/opt/xform/scan_index_iter.go b/pkg/sql/opt/xform/scan_index_iter.go index c9f4bc0d1776..a36e479b22f7 100644 --- a/pkg/sql/opt/xform/scan_index_iter.go +++ b/pkg/sql/opt/xform/scan_index_iter.go @@ -25,8 +25,7 @@ import ( // iteration. For example, the iterator would skip over inverted and partial // indexes given these flags: // -// flags := rejectInvertedIndexes|rejectPartialIndexes -// +// flags := rejectInvertedIndexes|rejectPartialIndexes type indexRejectFlags int8 const ( @@ -119,10 +118,10 @@ func (it *scanIndexIter) Init( // // Consider the indexes and query: // -// CREATE INDEX idx1 ON t (a) WHERE c > 0 -// CREATE INDEX idx2 ON t (b) WHERE c > 0 +// CREATE INDEX idx1 ON t (a) WHERE c > 0 +// CREATE INDEX idx2 ON t (b) WHERE c > 0 // -// SELECT * FROM t WHERE a = 1 AND b = 2 AND c > 0 +// SELECT * FROM t WHERE a = 1 AND b = 2 AND c > 0 // // The optimal query plan is a zigzag join over idx1 and idx2. Planning a zigzag // join requires a nested loop over the indexes of a table. The outer loop will @@ -143,10 +142,10 @@ func (it *scanIndexIter) Init( // // Consider the indexes and query: // -// CREATE INDEX idx1 ON t (a) WHERE b > 0 -// CREATE INDEX idx2 ON t (c) WHERE d > 0 +// CREATE INDEX idx1 ON t (a) WHERE b > 0 +// CREATE INDEX idx2 ON t (c) WHERE d > 0 // -// SELECT * FROM t WHERE a = 1 AND b > 0 AND c = 2 AND d > 0 +// SELECT * FROM t WHERE a = 1 AND b > 0 AND c = 2 AND d > 0 // // The optimal query plan is a zigzag join over idx1 and idx2 with no remaining // Select filters. If the original filters were passed to the inner loop's Init, diff --git a/pkg/sql/opt/xform/select_funcs.go b/pkg/sql/opt/xform/select_funcs.go index 89a11f985218..6b948f9c73ae 100644 --- a/pkg/sql/opt/xform/select_funcs.go +++ b/pkg/sql/opt/xform/select_funcs.go @@ -50,18 +50,18 @@ var _ = (*CustomFuncs).IsLocking // generated along with a combination of an IndexJoin and Selects. There are // three questions to consider which determine which operators are generated. // -// 1. Does the index "cover" the columns needed? -// 2. Are there any remaining filters to apply after the Scan? -// 3. If there are remaining filters does the index cover the referenced -// columns? +// 1. Does the index "cover" the columns needed? +// 2. Are there any remaining filters to apply after the Scan? +// 3. If there are remaining filters does the index cover the referenced +// columns? // // If the index covers the columns needed, no IndexJoin is need. The two // possible generated expressions are either a lone Scan or a Scan wrapped in a // Select that applies any remaining filters. // -// (Scan $scanDef) +// (Scan $scanDef) // -// (Select (Scan $scanDef) $remainingFilters) +// (Select (Scan $scanDef) $remainingFilters) // // If the index is not covering, then an IndexJoin is required to retrieve the // needed columns. Some or all of the remaining filters may be required to be @@ -77,26 +77,25 @@ var _ = (*CustomFuncs).IsLocking // the IndexJoin, if their columns are not covered. Therefore, Selects can be // constructed before, after, or both before and after the IndexJoin. // -// (IndexJoin (Scan $scanDef) $indexJoinDef) -// -// (IndexJoin -// (Select (Scan $scanDef) $remainingFilters) -// $indexJoinDef -// ) -// -// (Select -// (IndexJoin (Scan $scanDef) $indexJoinDef) -// $outerFilter -// ) -// -// (Select -// (IndexJoin -// (Select (Scan $scanDef) $innerFilter) -// $indexJoinDef -// ) -// $outerFilter -// ) -// +// (IndexJoin (Scan $scanDef) $indexJoinDef) +// +// (IndexJoin +// (Select (Scan $scanDef) $remainingFilters) +// $indexJoinDef +// ) +// +// (Select +// (IndexJoin (Scan $scanDef) $indexJoinDef) +// $outerFilter +// ) +// +// (Select +// (IndexJoin +// (Select (Scan $scanDef) $innerFilter) +// $indexJoinDef +// ) +// $outerFilter +// ) func (c *CustomFuncs) GeneratePartialIndexScans( grp memo.RelExpr, scanPrivate *memo.ScanPrivate, filters memo.FiltersExpr, ) { @@ -156,18 +155,21 @@ func (c *CustomFuncs) GeneratePartialIndexScans( // MakeCombinedFiltersConstraint builds a constraint from explicitFilters, // optionalFilters and conditionally an IN list filter generated from the // index's PARTITION BY LIST values if both of these conditions are true: -// 1) The first partitioning column is not referenced in either -// optionalFilters or explicitFilters -// 2) No index key columns are referenced in optionalFilters or -// explicitFilters. +// 1. The first partitioning column is not referenced in either +// optionalFilters or explicitFilters +// 2. No index key columns are referenced in optionalFilters or +// explicitFilters. +// // These filters are passed in a single call to tryConstrainIndex. // In all known uses, optionalFilters consists of the CHECK constraint filters // and computed column filters. // Returns: -// partitionFilters as the IN list of PARTITION BY values, if it was built -// remainingFilters as any filters which weren't used in combinedConstraint -// combinedConstraint as the collection of Spans to scan -// ok==false if we failed to constrain the scan +// +// partitionFilters as the IN list of PARTITION BY values, if it was built +// remainingFilters as any filters which weren't used in combinedConstraint +// combinedConstraint as the collection of Spans to scan +// ok==false if we failed to constrain the scan +// // See additional comments below. func (c *CustomFuncs) MakeCombinedFiltersConstraint( tabMeta *opt.TableMeta, @@ -355,53 +357,53 @@ func (c *CustomFuncs) GetOptionalFiltersAndFilterColumns( // For each secondary index that "covers" the columns needed by the scan, there // are three cases: // -// - a filter that can be completely converted to a constraint over that index -// generates a single constrained Scan operator (to be added to the same -// group as the original Select operator): +// - a filter that can be completely converted to a constraint over that index +// generates a single constrained Scan operator (to be added to the same +// group as the original Select operator): // -// (Scan $scanDef) +// (Scan $scanDef) // -// - a filter that can be partially converted to a constraint over that index -// generates a constrained Scan operator in a new memo group, wrapped in a -// Select operator having the remaining filter (to be added to the same group -// as the original Select operator): +// - a filter that can be partially converted to a constraint over that index +// generates a constrained Scan operator in a new memo group, wrapped in a +// Select operator having the remaining filter (to be added to the same group +// as the original Select operator): // -// (Select (Scan $scanDef) $filter) +// (Select (Scan $scanDef) $filter) // -// - a filter that cannot be converted to a constraint generates nothing +// - a filter that cannot be converted to a constraint generates nothing // // And for a secondary index that does not cover the needed columns: // -// - a filter that can be completely converted to a constraint over that index -// generates a single constrained Scan operator in a new memo group, wrapped -// in an IndexJoin operator that looks up the remaining needed columns (and -// is added to the same group as the original Select operator) -// -// (IndexJoin (Scan $scanDef) $indexJoinDef) -// -// - a filter that can be partially converted to a constraint over that index -// generates a constrained Scan operator in a new memo group, wrapped in an -// IndexJoin operator that looks up the remaining needed columns; the -// remaining filter is distributed above and/or below the IndexJoin, -// depending on which columns it references: -// -// (IndexJoin -// (Select (Scan $scanDef) $filter) -// $indexJoinDef -// ) -// -// (Select -// (IndexJoin (Scan $scanDef) $indexJoinDef) -// $filter -// ) -// -// (Select -// (IndexJoin -// (Select (Scan $scanDef) $innerFilter) -// $indexJoinDef -// ) -// $outerFilter -// ) +// - a filter that can be completely converted to a constraint over that index +// generates a single constrained Scan operator in a new memo group, wrapped +// in an IndexJoin operator that looks up the remaining needed columns (and +// is added to the same group as the original Select operator) +// +// (IndexJoin (Scan $scanDef) $indexJoinDef) +// +// - a filter that can be partially converted to a constraint over that index +// generates a constrained Scan operator in a new memo group, wrapped in an +// IndexJoin operator that looks up the remaining needed columns; the +// remaining filter is distributed above and/or below the IndexJoin, +// depending on which columns it references: +// +// (IndexJoin +// (Select (Scan $scanDef) $filter) +// $indexJoinDef +// ) +// +// (Select +// (IndexJoin (Scan $scanDef) $indexJoinDef) +// $filter +// ) +// +// (Select +// (IndexJoin +// (Select (Scan $scanDef) $innerFilter) +// $indexJoinDef +// ) +// $outerFilter +// ) // // GenerateConstrainedScans will further constrain the enumerated index scans // by trying to use the check constraints and computed columns that apply to the @@ -499,7 +501,6 @@ func (c *CustomFuncs) GenerateConstrainedScans( // ID into a constant value, by evaluating it with respect to a set of other // columns that are constant. If the computed column is constant, enter it into // the constCols map and return true. Otherwise, return false. -// func (c *CustomFuncs) tryFoldComputedCol( tabMeta *opt.TableMeta, computedColID opt.ColumnID, constCols constColsMap, ) bool { @@ -555,18 +556,22 @@ func (c *CustomFuncs) tryFoldComputedCol( // these spans. // // For example, if we have: -// PARTITION BY LIST (a, b) ( -// PARTITION a VALUES IN ((1, 10)), -// PARTITION b VALUES IN ((2, 20)), -// ) +// +// PARTITION BY LIST (a, b) ( +// PARTITION a VALUES IN ((1, 10)), +// PARTITION b VALUES IN ((2, 20)), +// ) +// // The in-between filters are: -// (a, b) < (1, 10) OR -// ((a, b) > (1, 10) AND (a, b) < (2, 20)) OR -// (a, b) > (2, 20) +// +// (a, b) < (1, 10) OR +// ((a, b) > (1, 10) AND (a, b) < (2, 20)) OR +// (a, b) > (2, 20) // // When passed as optional filters to index constrains, these filters generate // the desired spans: -// [ - /1/10), (/1/10 - /2/20), (2/20 - ]. +// +// [ - /1/10), (/1/10 - /2/20), (2/20 - ]. // // TODO(radu,mgartner): technically these filters are not correct with respect // to NULL values - we would want the tuple comparisons to treat NULLs as the @@ -772,19 +777,22 @@ func (c *CustomFuncs) isPrefixOf(pre []tree.Datum, other []tree.Datum) bool { // For example consider the following table and partitioned index: // // CREATE TABLE orders ( -// region STRING NOT NULL, id INT8 NOT NULL, total DECIMAL NOT NULL, seq_num INT NOT NULL, -// PRIMARY KEY (region, id) +// +// region STRING NOT NULL, id INT8 NOT NULL, total DECIMAL NOT NULL, seq_num INT NOT NULL, +// PRIMARY KEY (region, id) +// // ) // // CREATE INDEX orders_by_seq_num -// ON orders (region, seq_num, id) -// STORING (total) -// PARTITION BY LIST (region) -// ( -// PARTITION us_east1 VALUES IN ('us-east1'), -// PARTITION us_west1 VALUES IN ('us-west1'), -// PARTITION europe_west2 VALUES IN ('europe-west2') -// ) +// +// ON orders (region, seq_num, id) +// STORING (total) +// PARTITION BY LIST (region) +// ( +// PARTITION us_east1 VALUES IN ('us-east1'), +// PARTITION us_west1 VALUES IN ('us-west1'), +// PARTITION europe_west2 VALUES IN ('europe-west2') +// ) // // Now consider the following query: // SELECT sum(total) FROM orders WHERE seq_num >= 100 AND seq_num < 200 @@ -795,21 +803,21 @@ func (c *CustomFuncs) isPrefixOf(pre []tree.Datum, other []tree.Datum) bool { // filters to catch all the values that are not part of the partitions). // By doing so, we get the following plan: // scalar-group-by -// ├── select -// │ ├── scan orders@orders_by_seq_num -// │ │ └── constraint: /1/4/2: [ - /'europe-west2') -// │ │ [/'europe-west2'/100 - /'europe-west2'/199] -// │ │ [/e'europe-west2\x00'/100 - /'us-east1') -// │ │ [/'us-east1'/100 - /'us-east1'/199] -// │ │ [/e'us-east1\x00'/100 - /'us-west1') -// │ │ [/'us-west1'/100 - /'us-west1'/199] -// │ │ [/e'us-west1\x00'/100 - ] -// │ └── filters -// │ └── (seq_num >= 100) AND (seq_num < 200) -// └── aggregations -// └── sum -// └── variable: total // +// ├── select +// │ ├── scan orders@orders_by_seq_num +// │ │ └── constraint: /1/4/2: [ - /'europe-west2') +// │ │ [/'europe-west2'/100 - /'europe-west2'/199] +// │ │ [/e'europe-west2\x00'/100 - /'us-east1') +// │ │ [/'us-east1'/100 - /'us-east1'/199] +// │ │ [/e'us-east1\x00'/100 - /'us-west1') +// │ │ [/'us-west1'/100 - /'us-west1'/199] +// │ │ [/e'us-west1\x00'/100 - ] +// │ └── filters +// │ └── (seq_num >= 100) AND (seq_num < 200) +// └── aggregations +// └── sum +// └── variable: total func (c *CustomFuncs) partitionValuesFilters( tabID opt.TableID, index cat.Index, ) (partitionFilter, inBetweenFilter memo.FiltersExpr) { @@ -978,10 +986,9 @@ func (c *CustomFuncs) tryConstrainIndex( // If any of the three following statements are true, then it is // possible that the index can be constrained: // -// 1. The filter references the first index column. -// 2. The constraints are not tight (see props.Scalar.TightConstraints). -// 3. Any of the filter's constraints start with the first index column. -// +// 1. The filter references the first index column. +// 2. The constraints are not tight (see props.Scalar.TightConstraints). +// 3. Any of the filter's constraints start with the first index column. func (c *CustomFuncs) canMaybeConstrainNonInvertedIndex( filters memo.FiltersExpr, tabID opt.TableID, indexOrd int, ) bool { @@ -1654,14 +1661,14 @@ func (c *CustomFuncs) SplitDisjunction( // // An "interesting" pair of expressions is one where: // -// 1. The column sets of both expressions in the pair are not -// equal. -// 2. Two index scans can potentially be constrained by both expressions in -// the pair. +// 1. The column sets of both expressions in the pair are not +// equal. +// 2. Two index scans can potentially be constrained by both expressions in +// the pair. // // Consider the expression: // -// u = 1 OR v = 2 +// u = 1 OR v = 2 // // If an index exists on u and another on v, an "interesting" pair exists, ("u = // 1", "v = 1"). If both indexes do not exist, there is no "interesting" pair @@ -1669,7 +1676,7 @@ func (c *CustomFuncs) SplitDisjunction( // // Now consider the expression: // -// u = 1 OR u = 2 +// u = 1 OR u = 2 // // There is no possible "interesting" pair here because the left and right sides // of the disjunction share the same columns. @@ -1754,14 +1761,14 @@ func (c *CustomFuncs) findInterestingDisjunctionPair( // negatives. As an example of a false negative, consider the following table // and query. // -// CREATE TABLE t ( -// k PRIMARY KEY, -// a INT, -// hash INT AS (a % 4) STORED, -// INDEX hash (hash) -// ) +// CREATE TABLE t ( +// k PRIMARY KEY, +// a INT, +// hash INT AS (a % 4) STORED, +// INDEX hash (hash) +// ) // -// SELECT * FROM t WHERE a = 5 +// SELECT * FROM t WHERE a = 5 // // The expression "a = 5" can constrain a scan over the hash index: The columns // "hash" must be a constant value of 1 because it is dependent on column "a" diff --git a/pkg/sql/opt_exec_factory.go b/pkg/sql/opt_exec_factory.go index c678a9bb778f..70fcc249b7b3 100644 --- a/pkg/sql/opt_exec_factory.go +++ b/pkg/sql/opt_exec_factory.go @@ -2211,10 +2211,11 @@ func makeColList(table cat.Table, cols exec.TableColumnOrdinalSet) []catalog.Col // makePublicToReturnColumnIndexMapping returns a map from the ordinals // of the table's public columns to ordinals in the returnColDescs slice. -// More precisely, for 0 <= i < len(tableDesc.PublicColumns()): -// result[i] = j such that returnColDescs[j].ID is the ID of -// the i'th public column, or -// -1 if the i'th public column is not found in returnColDescs. +// +// More precisely, for 0 <= i < len(tableDesc.PublicColumns()): +// result[i] = j such that returnColDescs[j].ID is the ID of +// the i'th public column, or +// -1 if the i'th public column is not found in returnColDescs. func makePublicToReturnColumnIndexMapping( tableDesc catalog.TableDescriptor, returnCols []catalog.Column, ) []int { diff --git a/pkg/sql/pg_catalog.go b/pkg/sql/pg_catalog.go index c7805c4bec52..6c8681c9ae66 100644 --- a/pkg/sql/pg_catalog.go +++ b/pkg/sql/pg_catalog.go @@ -4391,16 +4391,15 @@ func init() { // are 32 bits and that they are stable across accesses. // // The type has a few layers of methods: -// - write methods write concrete types to the underlying running hash. -// - write methods account for single database objects like TableDescriptors -// or IndexDescriptors in the running hash. These methods aim to write information -// that would uniquely fingerprint the object to the hash using the first layer of -// methods. -// - Oid methods use the second layer of methods to construct a unique -// object identifier for the provided database object. This object identifier will -// be returned as a *tree.DInt, and the running hash will be reset. These are the -// only methods that are part of the oidHasher's external facing interface. -// +// - write methods write concrete types to the underlying running hash. +// - write methods account for single database objects like TableDescriptors +// or IndexDescriptors in the running hash. These methods aim to write information +// that would uniquely fingerprint the object to the hash using the first layer of +// methods. +// - Oid methods use the second layer of methods to construct a unique +// object identifier for the provided database object. This object identifier will +// be returned as a *tree.DInt, and the running hash will be reset. These are the +// only methods that are part of the oidHasher's external facing interface. type oidHasher struct { h hash.Hash32 } diff --git a/pkg/sql/pg_metadata_diff.go b/pkg/sql/pg_metadata_diff.go index f28bc5dd252c..ebc0f6af79f2 100644 --- a/pkg/sql/pg_metadata_diff.go +++ b/pkg/sql/pg_metadata_diff.go @@ -68,12 +68,12 @@ type PGMetadataColumnDiff struct { type PGMetadataColumnDiffs map[string]*PGMetadataColumnDiff // PGMetadataTableDiffs is used to store and load expected diffs: -// - A table name pointing to a zero length PGMetadataColumnDiffs means that we expect this table to be missing -// in cockroach db. -// - If PGMetadataColumnDiffs is not empty but columnName points to null, we expect that column to be missing in that table in -// cockroach db. -// - If column Name points to a not null PGMetadataColumnDiff, the test column describes how we expect that data type to be -// different between cockroach db and postgres. +// - A table name pointing to a zero length PGMetadataColumnDiffs means that we expect this table to be missing +// in cockroach db. +// - If PGMetadataColumnDiffs is not empty but columnName points to null, we expect that column to be missing in that table in +// cockroach db. +// - If column Name points to a not null PGMetadataColumnDiff, the test column describes how we expect that data type to be +// different between cockroach db and postgres. type PGMetadataTableDiffs map[string]PGMetadataColumnDiffs // PGMetadataColumnType represents a column type from postgres/mysql. @@ -286,7 +286,7 @@ func Save(writer io.Writer, file interface{}) { } } -//getUnimplementedTables retrieves the tables that are not yet part of CRDB. +// getUnimplementedTables retrieves the tables that are not yet part of CRDB. func (d PGMetadataTableDiffs) getUnimplementedTables(source PGMetadataTables) PGMetadataTables { unimplementedTables := make(PGMetadataTables) for tableName := range d { diff --git a/pkg/sql/pg_metadata_test.go b/pkg/sql/pg_metadata_test.go index 38ba61504cf5..a2b7904d2b39 100644 --- a/pkg/sql/pg_metadata_test.go +++ b/pkg/sql/pg_metadata_test.go @@ -13,7 +13,9 @@ // db skipping all the known diffs. To Run: // // SCENARIO 1. Using defaults, will test using defaults: -// rdbms=postgres, catalog=pg_catalog, no rewrite diffs, no adding missing tables. +// +// rdbms=postgres, catalog=pg_catalog, no rewrite diffs, no adding missing tables. +// // cd pkg/sql // go test -run TestDiffTool // @@ -21,21 +23,24 @@ // add -rewrite-diffs flag when running this test: // // SCENARIO 2: Updating known diffs, will use same defaults as SCENARIO 1, -// Except that it will rewrite known diffs +// +// Except that it will rewrite known diffs // // cd pkg/sql // go test -run TestDiffTool --rewrite-diffs // // SCENARIO 3: Need to add missing tables/columns, this also have same defaults as -// SCENARIO 1, except for adding missing tables/columns. -// NOTE: This options only works for pg_catalog and information_schema from postgres -// information_schema can't add missing columns, only missing tables. +// +// SCENARIO 1, except for adding missing tables/columns. +// NOTE: This options only works for pg_catalog and information_schema from postgres +// information_schema can't add missing columns, only missing tables. // // cd pkg/sql // go test -run TestDiffTool --add-missing-tables // // SCENARIO 4: Want to check differences on information_schema from postgres. -// NOTE: This can be combined with --add-missing-tables or --rewrite-diffs +// +// NOTE: This can be combined with --add-missing-tables or --rewrite-diffs // // cd pkg/sql // go test -run TestDiffTool --catalog information_schema @@ -46,8 +51,9 @@ // go test -run TestInformationSchemaPostgres // // SCENARIO 5: Want to check differences on information_schema from mysql. -// NOTE: --add-missing-tables is not allowed when using rdbms != postgres. -// --rewrite-diffs is allowed. +// +// NOTE: --add-missing-tables is not allowed when using rdbms != postgres. +// --rewrite-diffs is allowed. // // cd pkg/sql // go test -run TestDiffTool --catalog information_schema --rdbms mysql @@ -58,41 +64,45 @@ // got test -run TestInformationSchemaMySQL // // To create/update dump files from postgres/mysql see: -// pkg/cmd/generate-metadata-tables/main.go +// +// pkg/cmd/generate-metadata-tables/main.go // // Most common use case is Updating/Adding missing columns: // -// 1. Run pkg/cmd/generate-metadata-tables/main.go with flags to connect -// postgres. -// 2. Run SCENARIO 3 to add missing tables/columns. -// 3. Run SCENARIO 2 to update expected diffs. -// 4. Validate SCENARIO 1 passes. -// 5. Rewrite logic tests, the most probable logic tests that might fail -// after adding missing tables are: -// - pg_catalog -// - information_schema -// - create_statements -// - create_statements -// - grant_table -// - table +// 1. Run pkg/cmd/generate-metadata-tables/main.go with flags to connect +// postgres. +// +// 2. Run SCENARIO 3 to add missing tables/columns. +// +// 3. Run SCENARIO 2 to update expected diffs. // -// NOTE: Even if you updated pg_catalog, It is recommended that you rewrite -// information_schema logic tests. +// 4. Validate SCENARIO 1 passes. +// +// 5. Rewrite logic tests, the most probable logic tests that might fail +// after adding missing tables are: +// - pg_catalog +// - information_schema +// - create_statements +// - create_statements +// - grant_table +// - table +// +// NOTE: Even if you updated pg_catalog, It is recommended that you rewrite +// information_schema logic tests. // // How to debug using Goland: -// 1. Go to Run/Debug configurations -// 2. Select "Go Test" -// 3. Click Add configuration or edit an existing configuration -// 4. Give it a Name, set directory to pkg/sql and set the program arguments with the -// scenario that you want to test: example of program arguments: -// -run TestDiffTool --add-missing-tables -// NOTE: In the program arguments you can use another flag called test-data-filename -// If you want to use a different JSON source (Like a testing JSON just for -// debugging purposes). +// 1. Go to Run/Debug configurations +// 2. Select "Go Test" +// 3. Click Add configuration or edit an existing configuration +// 4. Give it a Name, set directory to pkg/sql and set the program arguments with the +// scenario that you want to test: example of program arguments: +// -run TestDiffTool --add-missing-tables +// NOTE: In the program arguments you can use another flag called test-data-filename +// If you want to use a different JSON source (Like a testing JSON just for +// debugging purposes). // // Where to start when debugging? // -> func TestDiffTool -// package sql import ( diff --git a/pkg/sql/pgwire/auth_test.go b/pkg/sql/pgwire/auth_test.go index 5061989cabc8..4d6e30e7dfd7 100644 --- a/pkg/sql/pgwire/auth_test.go +++ b/pkg/sql/pgwire/auth_test.go @@ -57,72 +57,78 @@ import ( // It supports the following DSL: // // config [secure] [insecure] -// Only run the test file if the server is in the specified -// security mode. (The default is `config secure insecure` i.e. -// the test file is applicable to both.) +// +// Only run the test file if the server is in the specified +// security mode. (The default is `config secure insecure` i.e. +// the test file is applicable to both.) // // accept_sql_without_tls -// Enable TCP connections without TLS in secure mode. +// +// Enable TCP connections without TLS in secure mode. // // set_hba // -// Load the provided HBA configuration via the cluster setting -// server.host_based_authentication.configuration. -// The expected output is the configuration after parsing -// and reloading in the server. +// +// Load the provided HBA configuration via the cluster setting +// server.host_based_authentication.configuration. +// The expected output is the configuration after parsing +// and reloading in the server. // // set_identity_map // -// Load the provided identity map via the cluster setting -// server.identity_map.configuration. -// The expected output is the configuration after parsing -// and reloading in the server. +// +// Load the provided identity map via the cluster setting +// server.identity_map.configuration. +// The expected output is the configuration after parsing +// and reloading in the server. // // sql // -// Execute the specified SQL statement using the default root -// connection provided by StartServer(). +// +// Execute the specified SQL statement using the default root +// connection provided by StartServer(). // // authlog N // -// Expect at the end of the auth log then report the -// N entries before that. +// +// Expect at the end of the auth log then report the +// N entries before that. // // connect [key=value ...] -// Attempt a SQL connection using the provided connection -// parameters using the pg "DSN notation": k/v pairs separated -// by spaces. -// The following standard pg keys are recognized: -// user - the username -// password - the password -// host - the server name/address -// port - the server port -// force_certs - force the use of baked-in certificates -// sslmode, sslrootcert, sslcert, sslkey - SSL parameters. // -// The order of k/v pairs matters: if the same key is specified -// multiple times, the first occurrence takes priority. +// Attempt a SQL connection using the provided connection +// parameters using the pg "DSN notation": k/v pairs separated +// by spaces. +// The following standard pg keys are recognized: +// user - the username +// password - the password +// host - the server name/address +// port - the server port +// force_certs - force the use of baked-in certificates +// sslmode, sslrootcert, sslcert, sslkey - SSL parameters. // -// Additionally, the test runner will always _append_ a default -// value for user (root), host/port/sslrootcert from the -// initialized test server. This default configuration is placed -// at the end so that each test can override the values. +// The order of k/v pairs matters: if the same key is specified +// multiple times, the first occurrence takes priority. // -// The test runner also adds a default value for sslcert and -// sslkey based on the value of "user" — either when provided by -// the test, or root by default. +// Additionally, the test runner will always _append_ a default +// value for user (root), host/port/sslrootcert from the +// initialized test server. This default configuration is placed +// at the end so that each test can override the values. // -// When the user is either "root" or "testuser" (those are the -// users for which the test server generates certificates), -// sslmode also gets a default of "verify-full". For other -// users, sslmode is initialized by default to "verify-ca". +// The test runner also adds a default value for sslcert and +// sslkey based on the value of "user" — either when provided by +// the test, or root by default. +// +// When the user is either "root" or "testuser" (those are the +// users for which the test server generates certificates), +// sslmode also gets a default of "verify-full". For other +// users, sslmode is initialized by default to "verify-ca". // // For the directives "sql" and "connect", the expected output can be // either "ok" (no error) or "ERROR:" followed by the expected error // string. // The auth and connection log entries, if any, are also produced // alongside the "ok" or "ERROR" message. -// func TestAuthenticationAndHBARules(t *testing.T) { defer leaktest.AfterTest(t)() skip.UnderRace(t, "takes >1min under race") diff --git a/pkg/sql/pgwire/conn_test.go b/pkg/sql/pgwire/conn_test.go index 66b73b4e51c7..b88ddf1712dc 100644 --- a/pkg/sql/pgwire/conn_test.go +++ b/pkg/sql/pgwire/conn_test.go @@ -17,7 +17,6 @@ import ( "database/sql/driver" "fmt" "io" - "io/ioutil" "net" "net/url" "strconv" @@ -1045,7 +1044,7 @@ func TestMaliciousInputs(t *testing.T) { // The reason this works is that ioutil.devNull implements ReadFrom // as an infinite loop, so it will Read continuously until it hits an // error (on w.Close()). - _, _ = io.Copy(ioutil.Discard, w) + _, _ = io.Copy(io.Discard, w) }() errChan := make(chan error, 1) diff --git a/pkg/sql/pgwire/encoding_test.go b/pkg/sql/pgwire/encoding_test.go index 9fd183338bea..058bf3f6e40b 100644 --- a/pkg/sql/pgwire/encoding_test.go +++ b/pkg/sql/pgwire/encoding_test.go @@ -125,8 +125,9 @@ func readEncodingTests(t testing.TB) []*encodingTest { // TestEncodings uses testdata/encodings.json to test expected pgwire encodings // and ensure they are identical to what Postgres produces. Regenerate that // file by: -// Starting a postgres server on :5432 then running: -// cd pkg/cmd/generate-binary; go run main.go > ../../sql/pgwire/testdata/encodings.json +// +// Starting a postgres server on :5432 then running: +// cd pkg/cmd/generate-binary; go run main.go > ../../sql/pgwire/testdata/encodings.json func TestEncodings(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) diff --git a/pkg/sql/pgwire/fuzz.go b/pkg/sql/pgwire/fuzz.go index 6cf19c9fe3cd..19e60f9ab143 100644 --- a/pkg/sql/pgwire/fuzz.go +++ b/pkg/sql/pgwire/fuzz.go @@ -16,7 +16,6 @@ package pgwire import ( "context" "io" - "io/ioutil" "net" "time" @@ -49,7 +48,7 @@ func FuzzServeConn(data []byte) int { }() go func() { // Discard all data sent from the server. - _, _ = io.Copy(ioutil.Discard, client) + _, _ = io.Copy(io.Discard, client) }() err := s.ServeConn(context.Background(), srv) if err != nil { diff --git a/pkg/sql/pgwire/hba/hba.go b/pkg/sql/pgwire/hba/hba.go index 9e80bc68af2b..19992607ddf6 100644 --- a/pkg/sql/pgwire/hba/hba.go +++ b/pkg/sql/pgwire/hba/hba.go @@ -320,13 +320,12 @@ func (s String) IsKeyword(v string) bool { // ParseAndNormalize parses the HBA configuration from the provided // string and performs two tasks: // -// - it unicode-normalizes the usernames. Since usernames are -// initialized during pgwire session initialization, this -// ensures that string comparisons can be used to match usernames. -// -// - it ensures there is one entry per username. This simplifies -// the code in the authentication logic. +// - it unicode-normalizes the usernames. Since usernames are +// initialized during pgwire session initialization, this +// ensures that string comparisons can be used to match usernames. // +// - it ensures there is one entry per username. This simplifies +// the code in the authentication logic. func ParseAndNormalize(val string) (*Conf, error) { conf, err := Parse(val) if err != nil { diff --git a/pkg/sql/pgwire/hba/scanner.go b/pkg/sql/pgwire/hba/scanner.go index f1f292fb36ec..8dcba1b4b3fe 100644 --- a/pkg/sql/pgwire/hba/scanner.go +++ b/pkg/sql/pgwire/hba/scanner.go @@ -50,10 +50,11 @@ type lex struct { // rules describes the scanning rules. // // As per pg's source, file src/backend/libpq/hba.c: -// Tokens are strings of non-blank -// characters bounded by blank characters, commas, beginning of line, and -// end of line. Blank means space or tab. Tokens can be delimited by -// double quotes (this allows the inclusion of blanks, but not newlines). +// +// Tokens are strings of non-blank +// characters bounded by blank characters, commas, beginning of line, and +// end of line. Blank means space or tab. Tokens can be delimited by +// double quotes (this allows the inclusion of blanks, but not newlines). // // The scanner implemented here is slightly more strict than the one // used by PostgreSQL. For example, PostgreSQL supports tokens written @@ -74,9 +75,9 @@ type lex struct { // // Meanwhile, the scanner does implements some other oddities of // PostgreSQL. For example: -// a, b (space after comma) counts as a single comma-delimited field. -// a ,b (space before comma) counts as two fields. // +// a, b (space after comma) counts as a single comma-delimited field. +// a ,b (space before comma) counts as two fields. var rules = []struct { r rule rg *regexp.Regexp diff --git a/pkg/sql/pgwire/identmap/ident_map.go b/pkg/sql/pgwire/identmap/ident_map.go index 28110fdc45fd..94974c08b1de 100644 --- a/pkg/sql/pgwire/identmap/ident_map.go +++ b/pkg/sql/pgwire/identmap/ident_map.go @@ -34,10 +34,10 @@ import ( // The Conf supports being initialized from a file format that // is compatible with Postgres's pg_ident.conf file: // -// # Comments -// map-name system-identity database-username -// # Convert "carl@example.com" ==> "example-carl" -// map-name /^(.*)@example.com$ example-\1 +// # Comments +// map-name system-identity database-username +// # Convert "carl@example.com" ==> "example-carl" +// map-name /^(.*)@example.com$ example-\1 // // If the system-identity field starts with a slash, it will be // interpreted as a regular expression. The system-identity expression diff --git a/pkg/sql/pgwire/pgerror/flatten.go b/pkg/sql/pgwire/pgerror/flatten.go index 2e02ed262500..f06ead260c89 100644 --- a/pkg/sql/pgwire/pgerror/flatten.go +++ b/pkg/sql/pgwire/pgerror/flatten.go @@ -25,9 +25,9 @@ import ( // the name implies, the details from the chain of causes is projected // into a single struct. This is useful in at least two places: // -// - to generate Error objects suitable for 19.1 nodes, which -// only recognize this type of payload. -// - to generate an error packet on pgwire. +// - to generate Error objects suitable for 19.1 nodes, which +// only recognize this type of payload. +// - to generate an error packet on pgwire. // // Additionally, this can be used in the remainder of the code // base when an Error object is expected, until that code diff --git a/pkg/sql/pgwire/pgerror/pgcode.go b/pkg/sql/pgwire/pgerror/pgcode.go index 00032c04f68a..01e333e923c3 100644 --- a/pkg/sql/pgwire/pgerror/pgcode.go +++ b/pkg/sql/pgwire/pgerror/pgcode.go @@ -42,20 +42,24 @@ func HasCandidateCode(err error) bool { // - at each level: // // - if there is a candidate code at that level, that is used; +// // - otherwise, it calls computeDefaultCode(). // if the function returns an empty string, // UncategorizedError is used. // An example implementation for computeDefaultCode is provided below. // -// - after that, it combines the code computed already for the cause -// (inner) and the new code just computed at the current level (outer) -// as follows: +// - after that, it combines the code computed already for the cause +// (inner) and the new code just computed at the current level (outer) +// as follows: // // - if the outer code is uncategorized, the inner code is kept no // matter what. +// // - if the outer code has the special XX prefix, that is kept. // (The "XX" prefix signals importance in the pg code hierarchy.) +// // - if the inner code is not uncategorized, it is retained. +// // - otherwise the outer code is retained. // // This function should not be used directly. It is only exported diff --git a/pkg/sql/pgwire/pgwirebase/encoding.go b/pkg/sql/pgwire/pgwirebase/encoding.go index 44e582dbb460..63a60af00074 100644 --- a/pkg/sql/pgwire/pgwirebase/encoding.go +++ b/pkg/sql/pgwire/pgwirebase/encoding.go @@ -846,6 +846,7 @@ func validateStringBytes(b []byte) error { } // PGNumericSign indicates the sign of a numeric. +// //go:generate stringer -type=PGNumericSign type PGNumericSign uint16 diff --git a/pkg/sql/pgwire/pgwirebase/msg.go b/pkg/sql/pgwire/pgwirebase/msg.go index b4ee25dab53b..29ec9d5c6e51 100644 --- a/pkg/sql/pgwire/pgwirebase/msg.go +++ b/pkg/sql/pgwire/pgwirebase/msg.go @@ -12,11 +12,13 @@ package pgwirebase import "math" -//ClientMessageType represents a client pgwire message. +// ClientMessageType represents a client pgwire message. +// //go:generate stringer -type=ClientMessageType type ClientMessageType byte -//ServerMessageType represents a server pgwire message. +// ServerMessageType represents a server pgwire message. +// //go:generate stringer -type=ServerMessageType type ServerMessageType byte @@ -56,6 +58,7 @@ const ( ) // ServerErrFieldType represents the error fields. +// //go:generate stringer -type=ServerErrFieldType type ServerErrFieldType byte @@ -74,6 +77,7 @@ const ( ) // PrepareType represents a subtype for prepare messages. +// //go:generate stringer -type=PrepareType type PrepareType byte diff --git a/pkg/sql/pgwire/testdata/pgtest/notice b/pkg/sql/pgwire/testdata/pgtest/notice index 8a78ae6b0a25..1193cf6e078e 100644 --- a/pkg/sql/pgwire/testdata/pgtest/notice +++ b/pkg/sql/pgwire/testdata/pgtest/notice @@ -55,7 +55,7 @@ Query {"String": "DROP INDEX t_x_idx"} until crdb_only CommandComplete ---- -{"Severity":"NOTICE","SeverityUnlocalized":"NOTICE","Code":"00000","Message":"the data for dropped indexes is reclaimed asynchronously","Detail":"","Hint":"The reclamation delay can be customized in the zone configuration for the table.","Position":0,"InternalPosition":0,"InternalQuery":"","Where":"","SchemaName":"","TableName":"","ColumnName":"","DataTypeName":"","ConstraintName":"","File":"drop_index.go","Line":546,"Routine":"dropIndexByName","UnknownFields":null} +{"Severity":"NOTICE","SeverityUnlocalized":"NOTICE","Code":"00000","Message":"the data for dropped indexes is reclaimed asynchronously","Detail":"","Hint":"The reclamation delay can be customized in the zone configuration for the table.","Position":0,"InternalPosition":0,"InternalQuery":"","Where":"","SchemaName":"","TableName":"","ColumnName":"","DataTypeName":"","ConstraintName":"","File":"drop_index.go","Line":547,"Routine":"dropIndexByName","UnknownFields":null} {"Type":"CommandComplete","CommandTag":"DROP INDEX"} until noncrdb_only diff --git a/pkg/sql/physicalplan/physical_plan.go b/pkg/sql/physicalplan/physical_plan.go index 8c278d14fa51..25ea559f30d2 100644 --- a/pkg/sql/physicalplan/physical_plan.go +++ b/pkg/sql/physicalplan/physical_plan.go @@ -644,15 +644,17 @@ func (p *PhysicalPlan) AddRendering( // columns (i.e. after post-processing). // // Inputs: -// indexVarMap is a mapping from columns that appear in an expression -// (planNode columns) to columns in the output stream of a -// processor. -// outputColumns is the list of output columns in the processor's -// PostProcessSpec; it is effectively a mapping from the output -// schema to the internal schema of a processor. +// +// indexVarMap is a mapping from columns that appear in an expression +// (planNode columns) to columns in the output stream of a +// processor. +// outputColumns is the list of output columns in the processor's +// PostProcessSpec; it is effectively a mapping from the output +// schema to the internal schema of a processor. // // Result: a "composite map" that maps the planNode columns to the internal -// columns of the processor. +// +// columns of the processor. // // For efficiency, the indexVarMap and the resulting map are represented as // slices, with missing elements having values -1. @@ -660,27 +662,27 @@ func (p *PhysicalPlan) AddRendering( // Used when adding expressions (filtering, rendering) to a processor's // PostProcessSpec. For example: // -// TableReader // table columns A,B,C,D -// Internal schema (before post-processing): A, B, C, D -// OutputColumns: [1 3] -// Output schema (after post-processing): B, D +// TableReader // table columns A,B,C,D +// Internal schema (before post-processing): A, B, C, D +// OutputColumns: [1 3] +// Output schema (after post-processing): B, D // -// Expression "B < D" might be represented as: -// IndexedVar(4) < IndexedVar(1) -// with associated indexVarMap: -// [-1 1 -1 -1 0] // 1->1, 4->0 -// This is effectively equivalent to "IndexedVar(0) < IndexedVar(1)"; 0 means -// the first output column (B), 1 means the second output column (D). +// Expression "B < D" might be represented as: +// IndexedVar(4) < IndexedVar(1) +// with associated indexVarMap: +// [-1 1 -1 -1 0] // 1->1, 4->0 +// This is effectively equivalent to "IndexedVar(0) < IndexedVar(1)"; 0 means +// the first output column (B), 1 means the second output column (D). // -// To get an index var map that refers to the internal schema: -// reverseProjection( -// [1 3], // OutputColumns -// [-1 1 -1 -1 0], -// ) = -// [-1 3 -1 -1 1] // 1->3, 4->1 -// This is effectively equivalent to "IndexedVar(1) < IndexedVar(3)"; 1 -// means the second internal column (B), 3 means the fourth internal column -// (D). +// To get an index var map that refers to the internal schema: +// reverseProjection( +// [1 3], // OutputColumns +// [-1 1 -1 -1 0], +// ) = +// [-1 3 -1 -1 1] // 1->3, 4->1 +// This is effectively equivalent to "IndexedVar(1) < IndexedVar(3)"; 1 +// means the second internal column (B), 3 means the fourth internal column +// (D). func reverseProjection(outputColumns []uint32, indexVarMap []int) []int { if indexVarMap == nil { panic("no indexVarMap") diff --git a/pkg/sql/physicalplan/span_resolver.go b/pkg/sql/physicalplan/span_resolver.go index 9c59f0f5b5b1..e876940701e6 100644 --- a/pkg/sql/physicalplan/span_resolver.go +++ b/pkg/sql/physicalplan/span_resolver.go @@ -30,36 +30,36 @@ import ( // Sample usage for resolving a bunch of spans: // // func resolveSpans( -// ctx context.Context, -// it *execinfra.SpanResolverIterator, -// spans ...spanWithDir, -// ) ([][]kv.ReplicaInfo, error) { -// lr := execinfra.NewSpanResolver( -// distSender, nodeDescs, nodeDescriptor, -// execinfra.BinPackingLeaseHolderChoice) -// it := lr.NewSpanResolverIterator(nil) -// res := make([][]kv.ReplicaInfo, 0) -// for _, span := range spans { -// repls := make([]kv.ReplicaInfo, 0) -// for it.Seek(ctx, span.Span, span.dir); ; it.Next(ctx) { -// if !it.Valid() { -// return nil, it.Error() -// } -// repl, err := it.ReplicaInfo(ctx) -// if err != nil { -// return nil, err -// } -// repls = append(repls, repl) -// if !it.NeedAnother() { -// break -// } -// } -// res = append(res, repls) -// } -// return res, nil -// } // +// ctx context.Context, +// it *execinfra.SpanResolverIterator, +// spans ...spanWithDir, // +// ) ([][]kv.ReplicaInfo, error) { +// lr := execinfra.NewSpanResolver( +// distSender, nodeDescs, nodeDescriptor, +// execinfra.BinPackingLeaseHolderChoice) +// it := lr.NewSpanResolverIterator(nil) +// res := make([][]kv.ReplicaInfo, 0) +// for _, span := range spans { +// repls := make([]kv.ReplicaInfo, 0) +// for it.Seek(ctx, span.Span, span.dir); ; it.Next(ctx) { +// if !it.Valid() { +// return nil, it.Error() +// } +// repl, err := it.ReplicaInfo(ctx) +// if err != nil { +// return nil, err +// } +// repls = append(repls, repl) +// if !it.NeedAnother() { +// break +// } +// } +// res = append(res, repls) +// } +// return res, nil +// } type SpanResolver interface { // NewSpanResolverIterator creates a new SpanResolverIterator. // Txn is used for testing and for determining if follower reads are possible. diff --git a/pkg/sql/plan.go b/pkg/sql/plan.go index 3d9e3219473a..2a1eb7da55af 100644 --- a/pkg/sql/plan.go +++ b/pkg/sql/plan.go @@ -69,7 +69,6 @@ func (r *runParams) Ann() *tree.Annotations { // - planNodeNames (walk.go) // - setLimitHint() (limit_hint.go) // - planColumns() (plan_columns.go) -// type planNode interface { startExec(params runParams) error diff --git a/pkg/sql/plan_batch.go b/pkg/sql/plan_batch.go index 707ba88b8d69..c153a294db86 100644 --- a/pkg/sql/plan_batch.go +++ b/pkg/sql/plan_batch.go @@ -20,13 +20,13 @@ import ( // indicate that the local execution behavior operates in batches. // The word "complement" here contrasts with "specializes" as follows: // -// - batchedPlanNode specializes planNode for the purpose of logical -// planning: a node implementing batchedPlanNode behaves in all -// respects like a planNode from the perspective of the various -// logical planning transforms. +// - batchedPlanNode specializes planNode for the purpose of logical +// planning: a node implementing batchedPlanNode behaves in all +// respects like a planNode from the perspective of the various +// logical planning transforms. // -// - batchedPlanNode *replaces* planNode for the purpose of local -// execution. +// - batchedPlanNode *replaces* planNode for the purpose of local +// execution. type batchedPlanNode interface { // batchedPlanNode specializes planNode for the purpose of the recursions // on planNode trees performed during logical planning, so it should "inherit" diff --git a/pkg/sql/plan_opt.go b/pkg/sql/plan_opt.go index f62303344699..5ae124373194 100644 --- a/pkg/sql/plan_opt.go +++ b/pkg/sql/plan_opt.go @@ -47,10 +47,10 @@ var queryCacheEnabled = settings.RegisterBoolSetting( // prepareUsingOptimizer builds a memo for a prepared statement and populates // the following stmt.Prepared fields: -// - Columns -// - Types -// - AnonymizedStr -// - Memo (for reuse during exec, if appropriate). +// - Columns +// - Types +// - AnonymizedStr +// - Memo (for reuse during exec, if appropriate). func (p *planner) prepareUsingOptimizer(ctx context.Context) (planFlags, error) { stmt := &p.stmt diff --git a/pkg/sql/planhook.go b/pkg/sql/planhook.go index 510f8f283bba..bded72055846 100644 --- a/pkg/sql/planhook.go +++ b/pkg/sql/planhook.go @@ -52,7 +52,7 @@ type planHookFn func( // sends on it when necessary. Any subplans returned by the hook when initially // called are passed back, planned and started, for the RowFn's use. // -//TODO(dt): should this take runParams like a normal planNode.Next? +// TODO(dt): should this take runParams like a normal planNode.Next? type PlanHookRowFn func(context.Context, []planNode, chan<- tree.Datums) error type planHook struct { diff --git a/pkg/sql/project_set.go b/pkg/sql/project_set.go index d9d2f71505ad..f9c3bfbaa082 100644 --- a/pkg/sql/project_set.go +++ b/pkg/sql/project_set.go @@ -24,7 +24,7 @@ import ( // returns tuples of values from a,b,c picked "simultaneously". NULLs // are used when an iterator is "shorter" than another. For example: // -// zip([1,2,3], ['a','b']) = [(1,'a'), (2,'b'), (3, null)] +// zip([1,2,3], ['a','b']) = [(1,'a'), (2,'b'), (3, null)] // // In this context, projectSetNode corresponds to a relational // operator project(R, a, b, c, ...) which, for each row in R, diff --git a/pkg/sql/randgen/schema.go b/pkg/sql/randgen/schema.go index 5006c94b4b75..c882dbe8b836 100644 --- a/pkg/sql/randgen/schema.go +++ b/pkg/sql/randgen/schema.go @@ -272,8 +272,8 @@ func generateInsertStmtVals(rng *rand.Rand, colTypes []*types.T, nullable []bool // PopulateTableWithRandData populates the provided table by executing exactly // `numInserts` statements. numRowsInserted <= numInserts because inserting into // an arbitrary table can fail for reasons which include: -// - UNIQUE or CHECK constraint violation. RandDatum is naive to these constraints. -// - Out of range error for a computed INT2 or INT4 column. +// - UNIQUE or CHECK constraint violation. RandDatum is naive to these constraints. +// - Out of range error for a computed INT2 or INT4 column. // // If numRowsInserted == 0, PopulateTableWithRandomData or RandDatum couldn't // handle this table's schema. Consider increasing numInserts or filing a bug. @@ -685,9 +685,9 @@ func nonComputedColumnTableDefs(cols []*tree.ColumnTableDef) []*tree.ColumnTable // // The value types must match the primary key columns (or a prefix of them); // supported types are: - Datum -// - bool (converts to DBool) -// - int (converts to DInt) -// - string (converts to DString) +// - bool (converts to DBool) +// - int (converts to DInt) +// - string (converts to DString) func TestingMakePrimaryIndexKey( desc catalog.TableDescriptor, vals ...interface{}, ) (roachpb.Key, error) { @@ -749,9 +749,9 @@ func TestingMakePrimaryIndexKeyForTenant( // // The value types must match the secondary key columns, // supported types are: - Datum -// - bool (converts to DBool) -// - int (converts to DInt) -// - string (converts to DString) +// - bool (converts to DBool) +// - int (converts to DInt) +// - string (converts to DString) func TestingMakeSecondaryIndexKey( desc catalog.TableDescriptor, index catalog.Index, codec keys.SQLCodec, vals ...interface{}, ) (roachpb.Key, error) { diff --git a/pkg/sql/recursive_cte.go b/pkg/sql/recursive_cte.go index dd06bb0c831c..d27046622b80 100644 --- a/pkg/sql/recursive_cte.go +++ b/pkg/sql/recursive_cte.go @@ -25,9 +25,10 @@ import ( // a "working" table. // 2. So long as the working table is not empty: // * evaluate the recursive query, substituting the current contents of -// the working table for the recursive self-reference; +// the working table for the recursive self-reference; // * emit all resulting rows, and save them as the next iteration's -// working table. +// working table. +// // The recursive query tree is regenerated each time using a callback // (implemented by the execbuilder). type recursiveCTENode struct { diff --git a/pkg/sql/rename_column.go b/pkg/sql/rename_column.go index cd724eebfcc3..9f31d1962dbc 100644 --- a/pkg/sql/rename_column.go +++ b/pkg/sql/rename_column.go @@ -31,8 +31,9 @@ type renameColumnNode struct { // RenameColumn renames the column. // Privileges: CREATE on table. -// notes: postgres requires CREATE on the table. -// mysql requires ALTER, CREATE, INSERT on the table. +// +// notes: postgres requires CREATE on the table. +// mysql requires ALTER, CREATE, INSERT on the table. func (p *planner) RenameColumn(ctx context.Context, n *tree.RenameColumn) (planNode, error) { if err := checkSchemaChangeEnabled( ctx, diff --git a/pkg/sql/rename_database.go b/pkg/sql/rename_database.go index f92308f4b340..e972cbc9cf95 100644 --- a/pkg/sql/rename_database.go +++ b/pkg/sql/rename_database.go @@ -39,7 +39,8 @@ type renameDatabaseNode struct { // RenameDatabase renames the database. // Privileges: superuser + DROP or ownership + CREATEDB privileges -// Notes: mysql >= 5.1.23 does not allow database renames. +// +// Notes: mysql >= 5.1.23 does not allow database renames. func (p *planner) RenameDatabase(ctx context.Context, n *tree.RenameDatabase) (planNode, error) { if err := checkSchemaChangeEnabled( ctx, diff --git a/pkg/sql/rename_index.go b/pkg/sql/rename_index.go index 89e40e2dd1a7..206dfd40b571 100644 --- a/pkg/sql/rename_index.go +++ b/pkg/sql/rename_index.go @@ -32,8 +32,9 @@ type renameIndexNode struct { // RenameIndex renames the index. // Privileges: CREATE on table. -// notes: postgres requires CREATE on the table. -// mysql requires ALTER, CREATE, INSERT on the table. +// +// notes: postgres requires CREATE on the table. +// mysql requires ALTER, CREATE, INSERT on the table. func (p *planner) RenameIndex(ctx context.Context, n *tree.RenameIndex) (planNode, error) { if err := checkSchemaChangeEnabled( ctx, diff --git a/pkg/sql/rename_table.go b/pkg/sql/rename_table.go index 3f304463c9ca..4e966c3e69a5 100644 --- a/pkg/sql/rename_table.go +++ b/pkg/sql/rename_table.go @@ -35,9 +35,10 @@ type renameTableNode struct { // RenameTable renames the table, view or sequence. // Privileges: DROP on source table/view/sequence, CREATE on destination database. -// Notes: postgres requires the table owner. -// mysql requires ALTER, DROP on the original table, and CREATE, INSERT -// on the new table (and does not copy privileges over). +// +// Notes: postgres requires the table owner. +// mysql requires ALTER, DROP on the original table, and CREATE, INSERT +// on the new table (and does not copy privileges over). func (p *planner) RenameTable(ctx context.Context, n *tree.RenameTable) (planNode, error) { if err := checkSchemaChangeEnabled( ctx, diff --git a/pkg/sql/row/fetcher.go b/pkg/sql/row/fetcher.go index 3c3baf25ccb6..68b6b6387362 100644 --- a/pkg/sql/row/fetcher.go +++ b/pkg/sql/row/fetcher.go @@ -143,20 +143,21 @@ type tableInfo struct { // Fetcher handles fetching kvs and forming table rows for a single table. // Usage: -// var rf Fetcher -// err := rf.Init(..) -// // Handle err -// err := rf.StartScan(..) -// // Handle err -// for { -// res, err := rf.NextRow() -// // Handle err -// if res.row == nil { -// // Done -// break -// } -// // Process res.row -// } +// +// var rf Fetcher +// err := rf.Init(..) +// // Handle err +// err := rf.StartScan(..) +// // Handle err +// for { +// res, err := rf.NextRow() +// // Handle err +// if res.row == nil { +// // Done +// break +// } +// // Process res.row +// } type Fetcher struct { args FetcherInitArgs table tableInfo @@ -398,11 +399,11 @@ func (rf *Fetcher) Init(ctx context.Context, args FetcherInitArgs) error { // the underlying KVBatchFetcher is not a txnKVFetcher. // // This method is designed to support two use cases: -// - providing the Fetcher with the txn when the txn was not available during -// Fetcher.Init; -// - allowing the caller to update the Fetcher to use the new txn. In this case, -// the caller should be careful since reads performed under different txns -// do not provide consistent view of the data. +// - providing the Fetcher with the txn when the txn was not available during +// Fetcher.Init; +// - allowing the caller to update the Fetcher to use the new txn. In this case, +// the caller should be careful since reads performed under different txns +// do not provide consistent view of the data. func (rf *Fetcher) SetTxn(txn *kv.Txn) error { sendFn := makeKVBatchFetcherDefaultSendFunc(txn, rf.kvFetcher.atomics.batchRequestsIssued) return rf.setTxnAndSendFn(txn, sendFn) diff --git a/pkg/sql/row/row_converter.go b/pkg/sql/row/row_converter.go index a435bdcfe94d..4a990acee0ee 100644 --- a/pkg/sql/row/row_converter.go +++ b/pkg/sql/row/row_converter.go @@ -78,16 +78,15 @@ func (i KVInserter) InitPut(key, value interface{}, failOnTombstones bool) { // The result is a row tuple providing values for every column in insertCols. // This results contains: // -// - the values provided by rowVals, the tuple of source values. The -// caller ensures this provides values 1-to-1 to the prefix of -// insertCols that was specified explicitly in the INSERT statement. -// - the default values for any additional columns in insertCols that -// have default values in defaultExprs. -// - the computed values for any additional columns in insertCols -// that are computed. The mapping in rowContainerForComputedCols -// maps the indexes of the comptuedCols/computeExpr slices -// back into indexes in the result row tuple. -// +// - the values provided by rowVals, the tuple of source values. The +// caller ensures this provides values 1-to-1 to the prefix of +// insertCols that was specified explicitly in the INSERT statement. +// - the default values for any additional columns in insertCols that +// have default values in defaultExprs. +// - the computed values for any additional columns in insertCols +// that are computed. The mapping in rowContainerForComputedCols +// maps the indexes of the comptuedCols/computeExpr slices +// back into indexes in the result row tuple. func GenerateInsertRow( defaultExprs []tree.TypedExpr, computeExprs []tree.TypedExpr, diff --git a/pkg/sql/row/writer.go b/pkg/sql/row/writer.go index ecacc54fb6b0..0b48284faa61 100644 --- a/pkg/sql/row/writer.go +++ b/pkg/sql/row/writer.go @@ -40,8 +40,8 @@ func ColIDtoRowIndexFromCols(cols []catalog.Column) catalog.TableColMap { // ColMapping returns a map from ordinals in the fromCols list to ordinals in // the toCols list. More precisely, for 0 <= i < fromCols: // -// result[i] = j such that fromCols[i].ID == toCols[j].ID, or -// -1 if the column is not part of toCols. +// result[i] = j such that fromCols[i].ID == toCols[j].ID, or +// -1 if the column is not part of toCols. func ColMapping(fromCols, toCols []catalog.Column) []int { // colMap is a map from ColumnID to ordinal into fromCols. var colMap util.FastIntMap @@ -67,25 +67,25 @@ func ColMapping(fromCols, toCols []catalog.Column) []int { // prepareInsertOrUpdateBatch constructs a KV batch that inserts or // updates a row in KV. -// - batch is the KV batch where commands should be appended. -// - putFn is the functions that can append Put/CPut commands to the batch. -// (must be adapted depending on whether 'overwrite' is set) -// - helper is the rowHelper that knows about the table being modified. -// - primaryIndexKey is the PK prefix for the current row. -// - fetchedCols is the list of schema columns that have been fetched -// in preparation for this update. -// - values is the SQL-level row values that are being written. -// - valColIDMapping is the mapping from column IDs into positions of the slice -// values. -// - updatedColIDMapping is the mapping from column IDs into the positions of -// the updated values. -// - kvKey and kvValues must be heap-allocated scratch buffers to write -// roachpb.Key and roachpb.Value values. -// - rawValueBuf must be a scratch byte array. This must be reinitialized -// to an empty slice on each call but can be preserved at its current -// capacity to avoid allocations. The function returns the slice. -// - overwrite must be set to true for UPDATE and UPSERT. -// - traceKV is to be set to log the KV operations added to the batch. +// - batch is the KV batch where commands should be appended. +// - putFn is the functions that can append Put/CPut commands to the batch. +// (must be adapted depending on whether 'overwrite' is set) +// - helper is the rowHelper that knows about the table being modified. +// - primaryIndexKey is the PK prefix for the current row. +// - fetchedCols is the list of schema columns that have been fetched +// in preparation for this update. +// - values is the SQL-level row values that are being written. +// - valColIDMapping is the mapping from column IDs into positions of the slice +// values. +// - updatedColIDMapping is the mapping from column IDs into the positions of +// the updated values. +// - kvKey and kvValues must be heap-allocated scratch buffers to write +// roachpb.Key and roachpb.Value values. +// - rawValueBuf must be a scratch byte array. This must be reinitialized +// to an empty slice on each call but can be preserved at its current +// capacity to avoid allocations. The function returns the slice. +// - overwrite must be set to true for UPDATE and UPSERT. +// - traceKV is to be set to log the KV operations added to the batch. func prepareInsertOrUpdateBatch( ctx context.Context, batch putter, diff --git a/pkg/sql/rowcontainer/disk_row_container.go b/pkg/sql/rowcontainer/disk_row_container.go index 86f5cc31622c..d79479501806 100644 --- a/pkg/sql/rowcontainer/disk_row_container.go +++ b/pkg/sql/rowcontainer/disk_row_container.go @@ -91,10 +91,10 @@ var _ DeDupingRowContainer = &DiskRowContainer{} // MakeDiskRowContainer creates a DiskRowContainer with the given engine as the // underlying store that rows are stored on. // Arguments: -// - diskMonitor is used to monitor this DiskRowContainer's disk usage. -// - types is the schema of rows that will be added to this container. -// - ordering is the output ordering; the order in which rows should be sorted. -// - e is the underlying store that rows are stored on. +// - diskMonitor is used to monitor this DiskRowContainer's disk usage. +// - types is the schema of rows that will be added to this container. +// - ordering is the output ordering; the order in which rows should be sorted. +// - e is the underlying store that rows are stored on. func MakeDiskRowContainer( diskMonitor *mon.BytesMonitor, types []*types.T, @@ -443,7 +443,7 @@ func (d *DiskRowContainer) newIterator(ctx context.Context) diskRowIterator { return diskRowIterator{rowContainer: d, SortedDiskMapIterator: d.diskMap.NewIterator()} } -//NewIterator is part of the SortableRowContainer interface. +// NewIterator is part of the SortableRowContainer interface. func (d *DiskRowContainer) NewIterator(ctx context.Context) RowIterator { i := d.newIterator(ctx) if d.topK > 0 { diff --git a/pkg/sql/rowcontainer/numbered_row_container.go b/pkg/sql/rowcontainer/numbered_row_container.go index 626b6727a4a1..d7649a16c1f4 100644 --- a/pkg/sql/rowcontainer/numbered_row_container.go +++ b/pkg/sql/rowcontainer/numbered_row_container.go @@ -52,13 +52,13 @@ type DiskBackedNumberedRowContainer struct { // NewDiskBackedNumberedRowContainer creates a DiskBackedNumberedRowContainer. // // Arguments: -// - deDup is true if it should de-duplicate. -// - types is the schema of rows that will be added to this container. -// - evalCtx defines the context. -// - engine is the underlying store that rows are stored on when the container -// spills to disk. -// - memoryMonitor is used to monitor this container's memory usage. -// - diskMonitor is used to monitor this container's disk usage. +// - deDup is true if it should de-duplicate. +// - types is the schema of rows that will be added to this container. +// - evalCtx defines the context. +// - engine is the underlying store that rows are stored on when the container +// spills to disk. +// - memoryMonitor is used to monitor this container's memory usage. +// - diskMonitor is used to monitor this container's disk usage. func NewDiskBackedNumberedRowContainer( deDup bool, types []*types.T, @@ -249,18 +249,18 @@ func (d *DiskBackedNumberedRowContainer) Close(ctx context.Context) { // the highest reuse distance currently in the cache. This optimality requires // some book-keeping overhead: // -// - A map with O(R) entries where R is the number of unique rows that will be -// accessed and an overall size proportional to the total number of accesses. -// Overall this is within a constant factor of [][]int, but the constant could -// be high. Note that we need this map because when doing Next() on the iterator -// we encounter entries different from the ones that caused this cache miss -// and we need to decide whether to cache them -- if we had a random access -// iterator such that sequential access was the same cost as random -// access, then a single []int with the next reuse position for each access -// would have sufficed. -// - A heap containing the rows in the cache that is updated on each cache hit, -// and whenever a row is evicted or added to the cache. This is O(log N) where -// N is the number of entries in the cache. +// - A map with O(R) entries where R is the number of unique rows that will be +// accessed and an overall size proportional to the total number of accesses. +// Overall this is within a constant factor of [][]int, but the constant could +// be high. Note that we need this map because when doing Next() on the iterator +// we encounter entries different from the ones that caused this cache miss +// and we need to decide whether to cache them -- if we had a random access +// iterator such that sequential access was the same cost as random +// access, then a single []int with the next reuse position for each access +// would have sufficed. +// - A heap containing the rows in the cache that is updated on each cache hit, +// and whenever a row is evicted or added to the cache. This is O(log N) where +// N is the number of entries in the cache. // // Overall, this may be too much memory and cpu overhead for not enough // benefit, but it will put an upper bound on what we can achieve with a @@ -273,8 +273,8 @@ func (d *DiskBackedNumberedRowContainer) Close(ctx context.Context) { // cost of a cache miss is high. // // TODO(sumeer): -// - Use some realistic inverted index workloads (including geospatial) to -// measure the effect of this cache. +// - Use some realistic inverted index workloads (including geospatial) to +// measure the effect of this cache. type numberedDiskRowIterator struct { rowIter *numberedRowIterator // After creation, the rowIter is not positioned. isPositioned transitions diff --git a/pkg/sql/rowcontainer/row_container.go b/pkg/sql/rowcontainer/row_container.go index d27d256f861d..ccec3909193b 100644 --- a/pkg/sql/rowcontainer/row_container.go +++ b/pkg/sql/rowcontainer/row_container.go @@ -107,20 +107,20 @@ type DeDupingRowContainer interface { // RowIterator is a simple iterator used to iterate over sqlbase.EncDatumRows. // Example use: -// var i RowIterator -// for i.Rewind(); ; i.Next() { -// if ok, err := i.Valid(); err != nil { -// // Handle error. -// } else if !ok { +// +// var i RowIterator +// for i.Rewind(); ; i.Next() { +// if ok, err := i.Valid(); err != nil { +// // Handle error. +// } else if !ok { // break -// } +// } // row, err := i.Row() // if err != nil { // // Handle error. // } // // Do something. -// } -// +// } type RowIterator interface { // Rewind seeks to the first row. Rewind() @@ -388,16 +388,16 @@ var _ DeDupingRowContainer = &DiskBackedRowContainer{} // Init initializes a DiskBackedRowContainer. // Arguments: -// - ordering is the output ordering; the order in which rows should be sorted. -// - types is the schema of rows that will be added to this container. -// - evalCtx defines the context in which to evaluate comparisons, only used -// when storing rows in memory. -// - engine is the store used for rows when spilling to disk. -// - memoryMonitor is used to monitor the DiskBackedRowContainer's memory usage. -// If this monitor denies an allocation, the DiskBackedRowContainer will -// spill to disk. -// - diskMonitor is used to monitor the DiskBackedRowContainer's disk usage if -// and when it spills to disk. +// - ordering is the output ordering; the order in which rows should be sorted. +// - types is the schema of rows that will be added to this container. +// - evalCtx defines the context in which to evaluate comparisons, only used +// when storing rows in memory. +// - engine is the store used for rows when spilling to disk. +// - memoryMonitor is used to monitor the DiskBackedRowContainer's memory usage. +// If this monitor denies an allocation, the DiskBackedRowContainer will +// spill to disk. +// - diskMonitor is used to monitor the DiskBackedRowContainer's disk usage if +// and when it spills to disk. func (f *DiskBackedRowContainer) Init( ordering colinfo.ColumnOrdering, types []*types.T, @@ -670,14 +670,14 @@ var _ IndexedRowContainer = &DiskBackedIndexedRowContainer{} // with the given engine as the underlying store that rows are stored on when // it spills to disk. // Arguments: -// - ordering is the output ordering; the order in which rows should be sorted. -// - types is the schema of rows that will be added to this container. -// - evalCtx defines the context in which to evaluate comparisons, only used -// when storing rows in memory. -// - engine is the underlying store that rows are stored on when the container -// spills to disk. -// - memoryMonitor is used to monitor this container's memory usage. -// - diskMonitor is used to monitor this container's disk usage. +// - ordering is the output ordering; the order in which rows should be sorted. +// - types is the schema of rows that will be added to this container. +// - evalCtx defines the context in which to evaluate comparisons, only used +// when storing rows in memory. +// - engine is the underlying store that rows are stored on when the container +// spills to disk. +// - memoryMonitor is used to monitor this container's memory usage. +// - diskMonitor is used to monitor this container's disk usage. func NewDiskBackedIndexedRowContainer( ordering colinfo.ColumnOrdering, typs []*types.T, diff --git a/pkg/sql/rowenc/encoded_datum.go b/pkg/sql/rowenc/encoded_datum.go index 37fd4ec5008c..48b395ec461e 100644 --- a/pkg/sql/rowenc/encoded_datum.go +++ b/pkg/sql/rowenc/encoded_datum.go @@ -349,9 +349,10 @@ func (ed *EncDatum) Fingerprint( } // Compare returns: -// -1 if the receiver is less than rhs, -// 0 if the receiver is equal to rhs, -// +1 if the receiver is greater than rhs. +// +// -1 if the receiver is less than rhs, +// 0 if the receiver is equal to rhs, +// +1 if the receiver is greater than rhs. func (ed *EncDatum) Compare( typ *types.T, a *tree.DatumAlloc, evalCtx *eval.Context, rhs *EncDatum, ) (int, error) { @@ -493,10 +494,11 @@ func EncDatumRowToDatums( // Compare returns the relative ordering of two EncDatumRows according to a // ColumnOrdering: -// -1 if the receiver comes before the rhs in the ordering, -// +1 if the receiver comes after the rhs in the ordering, -// 0 if the relative order does not matter (i.e. the two rows have the same -// values for the columns in the ordering). +// +// -1 if the receiver comes before the rhs in the ordering, +// +1 if the receiver comes after the rhs in the ordering, +// 0 if the relative order does not matter (i.e. the two rows have the same +// values for the columns in the ordering). // // Note that a return value of 0 does not (in general) imply that the rows are // equal; for example, rows [1 1 5] and [1 1 6] when compared against ordering diff --git a/pkg/sql/rowenc/valueside/doc.go b/pkg/sql/rowenc/valueside/doc.go index 77a6bb5e9ee0..8325efb5f8de 100644 --- a/pkg/sql/rowenc/valueside/doc.go +++ b/pkg/sql/rowenc/valueside/doc.go @@ -16,13 +16,13 @@ // // There are two separate schemes for encoding values: // -// - version 1 (legacy): the original encoding, which supported at most one SQL -// value (column) per roachpb.Value. It is still used for old table -// descriptors that went through many upgrades, and for some system tables. -// Primitives related to this version contain the name `Legacy`. +// - version 1 (legacy): the original encoding, which supported at most one SQL +// value (column) per roachpb.Value. It is still used for old table +// descriptors that went through many upgrades, and for some system tables. +// Primitives related to this version contain the name `Legacy`. // -// - version 2 (column families): the current encoding which supports multiple -// SQL values (columns) per roachpb.Value. +// - version 2 (column families): the current encoding which supports multiple +// SQL values (columns) per roachpb.Value. // // See also: docs/tech-notes/encoding.md. package valueside diff --git a/pkg/sql/rowexec/aggregator_test.go b/pkg/sql/rowexec/aggregator_test.go index 9d07299728f1..0cd1ea214de9 100644 --- a/pkg/sql/rowexec/aggregator_test.go +++ b/pkg/sql/rowexec/aggregator_test.go @@ -49,14 +49,15 @@ func aggregations(aggTestSpecs []aggTestSpec) []execinfrapb.AggregatorSpec_Aggre } // TODO(irfansharif): Add tests to verify the following aggregation functions: -// AVG -// BOOL_AND -// BOOL_OR -// CONCAT_AGG -// JSON_AGG -// JSONB_AGG -// STDDEV -// VARIANCE +// +// AVG +// BOOL_AND +// BOOL_OR +// CONCAT_AGG +// JSON_AGG +// JSONB_AGG +// STDDEV +// VARIANCE func TestAggregator(t *testing.T) { defer leaktest.AfterTest(t)() diff --git a/pkg/sql/rowexec/inverted_expr_evaluator.go b/pkg/sql/rowexec/inverted_expr_evaluator.go index 51e64a1c91c8..15c7ab9601b7 100644 --- a/pkg/sql/rowexec/inverted_expr_evaluator.go +++ b/pkg/sql/rowexec/inverted_expr_evaluator.go @@ -304,15 +304,17 @@ type batchedInvertedExprEvaluator struct { // // Example 1: // pendingSpans contains -// c---g -// c-----i -// c--e +// +// c---g +// c-----i +// c--e // // And fragmentUntil = i. Since end keys are exclusive we can fragment and // remove all spans in pendingSpans. These will be: -// c-e-g -// c-e-g-i -// c-e +// +// c-e-g +// c-e-g-i +// c-e // // For the c-e span, all the exprAndSetIndexList slices for these spans are // appended since any row in that span needs to be routed to all these @@ -324,10 +326,10 @@ type batchedInvertedExprEvaluator struct { // Same pendingSpans, and fragmentUntil = f. The fragments that are generated // for fragmentedSpans and the remaining spans in pendingSpans are: // -// fragments remaining -// c-e-f f-g -// c-e-f f-i -// c-e +// fragments remaining +// c-e-f f-g +// c-e-f f-i +// c-e func (b *batchedInvertedExprEvaluator) fragmentPendingSpans( pendingSpans []invertedSpanRoutingInfo, fragmentUntil inverted.EncVal, ) []invertedSpanRoutingInfo { diff --git a/pkg/sql/rowexec/joinreader.go b/pkg/sql/rowexec/joinreader.go index f7c6a928d7c7..22f6295e2cca 100644 --- a/pkg/sql/rowexec/joinreader.go +++ b/pkg/sql/rowexec/joinreader.go @@ -1299,11 +1299,11 @@ func (jr *joinReader) updateGroupingStateForNonEmptyBatch() { // groups in an input batch, for lookup joins (not used for index // joins). // It functions in one of two modes: -// - doGrouping is false: It is expected that for each input row in -// a batch, addContinuationValForRow(false) will be called. -// - doGrouping is true: The join is functioning in a manner where -// the continuation column in the input indicates the parameter -// value of addContinuationValForRow calls. +// - doGrouping is false: It is expected that for each input row in +// a batch, addContinuationValForRow(false) will be called. +// - doGrouping is true: The join is functioning in a manner where +// the continuation column in the input indicates the parameter +// value of addContinuationValForRow calls. // // The initialization and resetting of state for a batch is // handled by joinReader. Updates to this state based on row diff --git a/pkg/sql/rowexec/joinreader_span_generator.go b/pkg/sql/rowexec/joinreader_span_generator.go index 4250ac68309e..c46bb329c451 100644 --- a/pkg/sql/rowexec/joinreader_span_generator.go +++ b/pkg/sql/rowexec/joinreader_span_generator.go @@ -285,9 +285,10 @@ func (g *defaultSpanGenerator) close(ctx context.Context) { // columns can take on multiple constant values. For example, the // multiSpanGenerator would be used for a left lookup join in the following // case: -// - The index has key columns (region, id) -// - The input columns are (a, b, c) -// - The join condition is region IN ('east', 'west') AND id = a +// - The index has key columns (region, id) +// - The input columns are (a, b, c) +// - The join condition is region IN ('east', 'west') AND id = a +// // In this case, the multiSpanGenerator would generate two spans for each input // row: [/'east'/ - /'east'/] [/'west'/ - /'west'/]. type multiSpanGenerator struct { diff --git a/pkg/sql/rowexec/joinreader_strategies.go b/pkg/sql/rowexec/joinreader_strategies.go index b6fc36d71e86..657dfdcd9861 100644 --- a/pkg/sql/rowexec/joinreader_strategies.go +++ b/pkg/sql/rowexec/joinreader_strategies.go @@ -38,16 +38,16 @@ import ( // rows from pairs of joined rows. // // There are three implementations of joinReaderStrategy: -// - joinReaderNoOrderingStrategy: used when the joined rows do not need to be -// produced in input-row order. -// - joinReaderOrderingStrategy: used when the joined rows need to be produced -// in input-row order. As opposed to the prior strategy, this one needs to do -// more buffering to deal with out-of-order looked-up rows. -// - joinReaderIndexJoinStrategy: used when we're performing a join between an -// index and the table's PK. This one is the simplest and the most efficient -// because it doesn't actually join anything - it directly emits the PK rows. -// The joinReaderIndexJoinStrategy is used by both ordered and unordered index -// joins; see comments on joinReaderIndexJoinStrategy for details. +// - joinReaderNoOrderingStrategy: used when the joined rows do not need to be +// produced in input-row order. +// - joinReaderOrderingStrategy: used when the joined rows need to be produced +// in input-row order. As opposed to the prior strategy, this one needs to do +// more buffering to deal with out-of-order looked-up rows. +// - joinReaderIndexJoinStrategy: used when we're performing a join between an +// index and the table's PK. This one is the simplest and the most efficient +// because it doesn't actually join anything - it directly emits the PK rows. +// The joinReaderIndexJoinStrategy is used by both ordered and unordered index +// joins; see comments on joinReaderIndexJoinStrategy for details. type joinReaderStrategy interface { // getLookupRowsBatchSizeHint returns the size in bytes of the batch of lookup // rows. @@ -100,10 +100,10 @@ type joinReaderStrategy interface { // more performant than joinReaderOrderingStrategy. // // Consider the following example: -// - the input side has rows (1, red), (2, blue), (3, blue), (4, red). -// - the lookup side has rows (red, x), (blue, y). -// - the join needs to produce the pairs (1, x), (2, y), (3, y), (4, x), in any -// order. +// - the input side has rows (1, red), (2, blue), (3, blue), (4, red). +// - the lookup side has rows (red, x), (blue, y). +// - the join needs to produce the pairs (1, x), (2, y), (3, y), (4, x), in any +// order. // // Say the joinReader looks up rows in order: (red, x), then (blue, y). Once // (red, x) is fetched, it is handed to @@ -460,10 +460,10 @@ var partialJoinSentinel = []int{-1} // of the rows passed to processLookupRows(). // // Consider the following example: -// - the input side has rows (1, red), (2, blue), (3, blue), (4, red). -// - the lookup side has rows (red, x), (blue, y). -// - the join needs to produce the pairs (1, x), (2, y), (3, y), (4, x), in this -// order. +// - the input side has rows (1, red), (2, blue), (3, blue), (4, red). +// - the lookup side has rows (red, x), (blue, y). +// - the join needs to produce the pairs (1, x), (2, y), (3, y), (4, x), in this +// order. // // Say the joinReader looks up rows in order: (red, x), then (blue, y). Once // (red, x) is fetched, it is handed to diff --git a/pkg/sql/rowexec/joinreader_test.go b/pkg/sql/rowexec/joinreader_test.go index 28ca3214cdc7..fc797bc725bb 100644 --- a/pkg/sql/rowexec/joinreader_test.go +++ b/pkg/sql/rowexec/joinreader_test.go @@ -1641,17 +1641,22 @@ func BenchmarkJoinReader(b *testing.B) { // // input: 0,1,2,3,4 (size of input is 'numLookupRows') // table: one | four | sixteen | -// 0 | 0 | 0 -// 1 | 0 | 0 -// 2 | 0 | 0 -// 3 | 0 | 0 -// 4 | 1 | 0 -// 5 | 1 | 0 -// ... +// +// 0 | 0 | 0 +// 1 | 0 | 0 +// 2 | 0 | 0 +// 3 | 0 | 0 +// 4 | 1 | 0 +// 5 | 1 | 0 +// ... +// // SELECT one FROM input INNER LOOKUP JOIN t64 ON i = one; -// -> 0,1,2,3,4 +// +// -> 0,1,2,3,4 +// // SELECT four FROM input INNER LOOKUP JOIN t64 ON i = four; -// -> 0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3 +// +// -> 0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3 func benchmarkJoinReader(b *testing.B, bc JRBenchConfig) { // Create an *on-disk* store spec for the primary store and temp engine to diff --git a/pkg/sql/rowexec/utils_test.go b/pkg/sql/rowexec/utils_test.go index e08f7a7a25e9..665974a76079 100644 --- a/pkg/sql/rowexec/utils_test.go +++ b/pkg/sql/rowexec/utils_test.go @@ -194,7 +194,8 @@ func (r *rowDisposer) NumRowsDisposed() int { // makeFetchSpec creates an IndexFetchSpec for the given index, with the columns // specified by name, separated by a comma. For example: -// makeFetchSpec(t, table, "idx_c", "a,b,c") +// +// makeFetchSpec(t, table, "idx_c", "a,b,c") func makeFetchSpec( t testing.TB, table catalog.TableDescriptor, indexName string, colNames string, ) descpb.IndexFetchSpec { diff --git a/pkg/sql/rowexec/zigzagjoiner.go b/pkg/sql/rowexec/zigzagjoiner.go index 974187c662f3..fdeaeb43781e 100644 --- a/pkg/sql/rowexec/zigzagjoiner.go +++ b/pkg/sql/rowexec/zigzagjoiner.go @@ -44,7 +44,6 @@ import ( // // SELECT * FROM abcd@c_idx WHERE c = 2 AND d = 3; // -// // Without a zigzag joiner, this query would previously execute: index scan on // `c_idx`, followed by an index join on the primary index, then filter out rows // where `d ≠ 3`. @@ -79,23 +78,28 @@ import ( // // The actual execution can be visualized below : // -// c_idx d_idx +// c_idx d_idx +// // c | a, b d | a, b // ============= ============ // --> 2 1 1 ----> 3 1 1 ---+ X -// | +// +// | +// // +----------------- 3 4 2 <--+ // | 3 4 3 // | 3 5 6 // | 3 7 2 // +--> 2 8 2 -------------------+ -// | +// +// | +// // +----------------- 3 8 3 ----+ // | // +-> 2 9 3 -----> 3 9 3 --+ X -// | -// nil (Done) <--+ // +// | +// nil (Done) <--+ // // - The execution starts by fetching the (2, 1, 1) row from c_idx. This is the // first row fetched when an index lookup in `c_idx` where `c = 2`. Let this be @@ -127,7 +131,6 @@ import ( // - We are done when the index lookup returns `nil`. There were no more rows in // this index that could satisfy the join. // -// // When Can a Zigzag Join Be Planned: // // Every side of a zigzag join has fixed columns, equality columns, and index @@ -151,7 +154,7 @@ import ( // // For a description of index columns, refer to Appendix A. // -// Additional Cases +// # Additional Cases // // Normal Joins // This algorithm can also be applied to normal joins such as: @@ -173,7 +176,7 @@ import ( // - Index: `abcd@primary` // - Equality columns: (a) // - Fixed columns: None -//- Fixed values: None +// - Fixed values: None // // Note: If the query were to `SELECT *` instead of `SELECT a, b` a further // index join would be needed, but this index join would only be applied on the @@ -192,7 +195,6 @@ import ( // that a zigzag join’s utility will increase as the number of sides increases // because more rows will be able to be skipped. // -// // Appendix A: Indexes // // The zigzag joins makes use of multiple indexes. Each index is composed of a diff --git a/pkg/sql/scan_test.go b/pkg/sql/scan_test.go index b8cd7acf02ee..bd8b42c4efae 100644 --- a/pkg/sql/scan_test.go +++ b/pkg/sql/scan_test.go @@ -45,7 +45,9 @@ func genValues(num, valRange int) []int { } // testScanBatchQuery runs a query of the form -// SELECT a,B FROM test.scan WHERE a IN (1,5,3..) AND b >= 5 AND b <= 10 +// +// SELECT a,B FROM test.scan WHERE a IN (1,5,3..) AND b >= 5 AND b <= 10 +// // numSpans controls the number of possible values for a. func testScanBatchQuery(t *testing.T, db *gosql.DB, numSpans, numAs, numBs int, reverse bool) { // Generate numSpans values for A diff --git a/pkg/sql/schema_resolver.go b/pkg/sql/schema_resolver.go index 5d7902e9a5bd..e70d5c326b9d 100644 --- a/pkg/sql/schema_resolver.go +++ b/pkg/sql/schema_resolver.go @@ -329,9 +329,11 @@ func (sr *schemaResolver) canResolveDescUnderSchema( // // var someVar T // var err error -// p.runWithOptions(resolveFlags{skipCache: true}, func() { -// someVar, err = ResolveExistingTableObject(ctx, p, ...) -// }) +// +// p.runWithOptions(resolveFlags{skipCache: true}, func() { +// someVar, err = ResolveExistingTableObject(ctx, p, ...) +// }) +// // if err != nil { ... } // use(someVar) func (sr *schemaResolver) runWithOptions(flags resolveFlags, fn func()) { diff --git a/pkg/sql/schemachanger/rel/doc.go b/pkg/sql/schemachanger/rel/doc.go index 6149de57887c..6375c011e7db 100644 --- a/pkg/sql/schemachanger/rel/doc.go +++ b/pkg/sql/schemachanger/rel/doc.go @@ -16,15 +16,14 @@ // index these entities (struct pointers) by those attributes. These entities // can be queried using a declarative query language exposed by the library. // -// Why rel +// # Why rel // // The key motivations end up being explainability, maintainability, and // observability. There's a runtime consideration we'll get to later, but // a driving motivation here is the ability to present domain complexity // outside the considerations of runtime performance and imperative code. // -// -// Uniformity dealing with heterogeneous data +// # Uniformity dealing with heterogeneous data // // The schema is fundamentally full of heterogeneous data with a number of cross // references. Having a uniform mechanism to describe and interact with this @@ -38,19 +37,19 @@ // // Design Goals // -// * Observable: The library's rules should be trivially serialized in -// an easy to consume format. -// * Declarative: The library should separate the logic of the domain from -// the process of the evaluation. -// * Ergonomic: The library should feel comfortable to use for go programmers -// looking to model relational graph problems for which it was intended. -// * Reasonable efficiency: The library should provide mechanisms to index -// data to accelerate queries out of band such that the big-O runtime can -// be made sane and the constant overheads aren't too many orders of -// magnitude off of specialized imperative code as to make its use -// unacceptable. +// - Observable: The library's rules should be trivially serialized in +// an easy to consume format. +// - Declarative: The library should separate the logic of the domain from +// the process of the evaluation. +// - Ergonomic: The library should feel comfortable to use for go programmers +// looking to model relational graph problems for which it was intended. +// - Reasonable efficiency: The library should provide mechanisms to index +// data to accelerate queries out of band such that the big-O runtime can +// be made sane and the constant overheads aren't too many orders of +// magnitude off of specialized imperative code as to make its use +// unacceptable. // -// Terminology +// # Terminology // // The basic design of the library is that we want to index and find tuples of // struct pointers, which are termed entities. These entities have attribute @@ -86,7 +85,7 @@ // less elegant) than datomic. The language does not permit any recursion or // runtime creation of facts. // -// Query Language +// # Query Language // // The query language provides a mechanism to reason relationally about data // stored in regular structs which may themselves have hierarchy between them. @@ -100,7 +99,7 @@ // libraries can generate queries of an arbitrary depth. Furthermore, users // can implement their own forms of recursion. // -// Runtime considerations +// # Runtime considerations // // An early primary motivation for this package was the relatively // straightforward problem of determining the set of dependency edges which @@ -154,38 +153,34 @@ // indexes in O(N*log(N)) per statement meaning at worst O(N^2 log(N)) which is // acceptable for an N of ~1000 as opposed to O(N^3) which isn't really. // -// -// Future work +// # Future work // // Below find a listing of features not yet done. // -// * Arrays, Maps, Slices. -// - It would be nice to have a mechanism to talk about decomposing -// data stored in these collections. One approach would be to define -// some more system attributes and some exported structs which act to -// bind a slice member to the entity holding the slice. Consider: -// -// type SliceMember struct { -// Index int -// SourceAttr Attr -// Source interface{} -// Value interface{} -// } -// -// * More ergonomic iteration with reflection. -// - The current Result interface requires type assertions. -// Given we know about the types of the variables, we could -// accept a function with an appropriate signature and use -// reflection to make sure that the corresponding variables -// are properly typed. -// * Variable bindings. -// - If we wanted to make recursion more sane, it'd be better to plan a -// query with some input parameters and then be able to invoke it on those -// parameters. In that way, we could imagine invoking a query recursively. -// * More generalized disjunction. +// - Arrays, Maps, Slices. +// +// - It would be nice to have a mechanism to talk about decomposing +// data stored in these collections. One approach would be to define +// some more system attributes and some exported structs which act to +// bind a slice member to the entity holding the slice. Consider: +// +// - More ergonomic iteration with reflection. +// +// - The current Result interface requires type assertions. +// Given we know about the types of the variables, we could +// accept a function with an appropriate signature and use +// reflection to make sure that the corresponding variables +// are properly typed. +// +// - Variable bindings. +// +// - If we wanted to make recursion more sane, it'd be better to plan a +// query with some input parameters and then be able to invoke it on those +// parameters. In that way, we could imagine invoking a query recursively. +// +// - More generalized disjunction. // // TODO(ajwerner): Note that arrays of bytes can probably be used as slice but // that would probably be unfortunate. We'd probably prefer to shove them into // a string using some unsafe magic. -// package rel diff --git a/pkg/sql/schemachanger/scexec/executor_external_test.go b/pkg/sql/schemachanger/scexec/executor_external_test.go index ddd62d4194e5..f402a4ccaec9 100644 --- a/pkg/sql/schemachanger/scexec/executor_external_test.go +++ b/pkg/sql/schemachanger/scexec/executor_external_test.go @@ -523,14 +523,14 @@ func (noopMetadataUpdater) DeleteAllCommentsForTables(ids catalog.DescriptorIDSe return nil } -//UpsertConstraintComment implements scexec.DescriptorMetadataUpdater. +// UpsertConstraintComment implements scexec.DescriptorMetadataUpdater. func (noopMetadataUpdater) UpsertConstraintComment( tableID descpb.ID, constraintID descpb.ConstraintID, comment string, ) error { return nil } -//DeleteConstraintComment implements scexec.DescriptorMetadataUpdater. +// DeleteConstraintComment implements scexec.DescriptorMetadataUpdater. func (noopMetadataUpdater) DeleteConstraintComment( tableID descpb.ID, constraintID descpb.ConstraintID, ) error { diff --git a/pkg/sql/schemachanger/scplan/internal/rules/assertions_test.go b/pkg/sql/schemachanger/scplan/internal/rules/assertions_test.go index 0b9ca70f7c08..f426e7290e4b 100644 --- a/pkg/sql/schemachanger/scplan/internal/rules/assertions_test.go +++ b/pkg/sql/schemachanger/scplan/internal/rules/assertions_test.go @@ -70,10 +70,10 @@ func checkSimpleDependentsReferenceDescID(e scpb.Element) error { // Assert that elements can be grouped into three categories when transitioning // from PUBLIC to ABSENT: -// - go via DROPPED iff they're descriptor elements -// - go via a non-read status iff they're indexes or columns, which are -// subject to the two-version invariant. -// - go direct to ABSENT in all other cases. +// - go via DROPPED iff they're descriptor elements +// - go via a non-read status iff they're indexes or columns, which are +// subject to the two-version invariant. +// - go direct to ABSENT in all other cases. func checkToAbsentCategories(e scpb.Element) error { s0 := opgen.InitialStatus(e, scpb.Status_ABSENT) s1 := opgen.NextStatus(e, scpb.Status_ABSENT, s0) diff --git a/pkg/sql/schemachanger/scplan/internal/rules/dep_drop_object.go b/pkg/sql/schemachanger/scplan/internal/rules/dep_drop_object.go index 71218ce6c5cb..c82b13b76f5a 100644 --- a/pkg/sql/schemachanger/scplan/internal/rules/dep_drop_object.go +++ b/pkg/sql/schemachanger/scplan/internal/rules/dep_drop_object.go @@ -18,18 +18,18 @@ import ( ) // These rules ensure that: -// - a descriptor reaches the TXN_DROPPED state in the statement phase, and -// it does not reach DROPPED until the pre-commit phase. -// - a descriptor reaches ABSENT in a different transaction than it reaches -// DROPPED (i.e. it cannot be removed until PostCommit). -// - a descriptor element reaches the DROPPED state in the txn before -// its dependent elements (namespace entry, comments, column names, etc) reach -// the ABSENT state; -// - for those dependent elements which have to wait post-commit to reach the -// ABSENT state, we tie them to the same stage as when the descriptor element -// reaches the ABSENT state, but afterwards in the stage, so as to not -// interfere with the event logging op which is tied to the descriptor element -// removal. +// - a descriptor reaches the TXN_DROPPED state in the statement phase, and +// it does not reach DROPPED until the pre-commit phase. +// - a descriptor reaches ABSENT in a different transaction than it reaches +// DROPPED (i.e. it cannot be removed until PostCommit). +// - a descriptor element reaches the DROPPED state in the txn before +// its dependent elements (namespace entry, comments, column names, etc) reach +// the ABSENT state; +// - for those dependent elements which have to wait post-commit to reach the +// ABSENT state, we tie them to the same stage as when the descriptor element +// reaches the ABSENT state, but afterwards in the stage, so as to not +// interfere with the event logging op which is tied to the descriptor element +// removal. func init() { registerDepRule( diff --git a/pkg/sql/schemachanger/scplan/internal/rules/registry.go b/pkg/sql/schemachanger/scplan/internal/rules/registry.go index 4010bb579366..ee77102b54ec 100644 --- a/pkg/sql/schemachanger/scplan/internal/rules/registry.go +++ b/pkg/sql/schemachanger/scplan/internal/rules/registry.go @@ -9,8 +9,8 @@ // licenses/APL.txt. // Package rules contains rules to: -// - generate dependency edges for a graph which contains op edges, -// - mark certain op-edges as no-op. +// - generate dependency edges for a graph which contains op edges, +// - mark certain op-edges as no-op. package rules import ( diff --git a/pkg/sql/schemachanger/scplan/internal/rules/testdata/deprules b/pkg/sql/schemachanger/scplan/internal/rules/testdata/deprules index 5ca9da05c0f8..fe2e83a2e4bd 100644 --- a/pkg/sql/schemachanger/scplan/internal/rules/testdata/deprules +++ b/pkg/sql/schemachanger/scplan/internal/rules/testdata/deprules @@ -1338,8 +1338,9 @@ deprules - $column[Type] = '*scpb.Column' - $dependent[Type] IN ['*scpb.ColumnName', '*scpb.ColumnType', '*scpb.ColumnDefaultExpression', '*scpb.ColumnOnUpdateExpression', '*scpb.SequenceOwner', '*scpb.ColumnComment', '*scpb.IndexColumn'] - joinOnColumnID($column, $dependent, $table-id, $col-id) - - transient($column-target, $dependent-target) - - $column-node[CurrentStatus] = TRANSIENT_WRITE_ONLY + - $column-target[TargetStatus] = ABSENT + - $column-node[CurrentStatus] = WRITE_ONLY + - $dependent-target[TargetStatus] = TRANSIENT_ABSENT - $dependent-node[CurrentStatus] = TRANSIENT_ABSENT - joinTargetNode($column, $column-target, $column-node) - joinTargetNode($dependent, $dependent-target, $dependent-node) @@ -1365,9 +1366,8 @@ deprules - $column[Type] = '*scpb.Column' - $dependent[Type] IN ['*scpb.ColumnName', '*scpb.ColumnType', '*scpb.ColumnDefaultExpression', '*scpb.ColumnOnUpdateExpression', '*scpb.SequenceOwner', '*scpb.ColumnComment', '*scpb.IndexColumn'] - joinOnColumnID($column, $dependent, $table-id, $col-id) - - $column-target[TargetStatus] = ABSENT - - $column-node[CurrentStatus] = WRITE_ONLY - - $dependent-target[TargetStatus] = TRANSIENT_ABSENT + - transient($column-target, $dependent-target) + - $column-node[CurrentStatus] = TRANSIENT_WRITE_ONLY - $dependent-node[CurrentStatus] = TRANSIENT_ABSENT - joinTargetNode($column, $column-target, $column-node) - joinTargetNode($dependent, $dependent-target, $dependent-node) @@ -1404,25 +1404,11 @@ deprules to: constraint-node query: - $dependent[Type] IN ['*scpb.ConstraintName', '*scpb.ConstraintComment'] - - $constraint[Type] IN ['*scpb.UniqueWithoutIndexConstraint', '*scpb.CheckConstraint', '*scpb.ForeignKeyConstraint'] - - joinOnConstraintID($dependent, $constraint, $table-id, $constraint-id) - - toAbsent($dependent-target, $constraint-target) - - $dependent-node[CurrentStatus] = ABSENT - - $constraint-node[CurrentStatus] = ABSENT - - joinTargetNode($dependent, $dependent-target, $dependent-node) - - joinTargetNode($constraint, $constraint-target, $constraint-node) -- name: constraint dependent absent right before constraint - from: dependent-node - kind: SameStagePrecedence - to: constraint-node - query: - - $dependent[Type] IN ['*scpb.ConstraintName', '*scpb.ConstraintComment'] - - $constraint[Type] IN ['*scpb.UniqueWithoutIndexConstraint', '*scpb.CheckConstraint', '*scpb.ForeignKeyConstraint'] + - $constraint[Type] IN ['*scpb.PrimaryIndex', '*scpb.SecondaryIndex', '*scpb.TemporaryIndex'] - joinOnConstraintID($dependent, $constraint, $table-id, $constraint-id) - - $dependent-target[TargetStatus] = TRANSIENT_ABSENT - - $dependent-node[CurrentStatus] = TRANSIENT_ABSENT - - $constraint-target[TargetStatus] = ABSENT - - $constraint-node[CurrentStatus] = ABSENT + - transient($dependent-target, $constraint-target) + - $dependent-node[CurrentStatus] = TRANSIENT_VALIDATED + - $constraint-node[CurrentStatus] = TRANSIENT_ABSENT - joinTargetNode($dependent, $dependent-target, $dependent-node) - joinTargetNode($constraint, $constraint-target, $constraint-node) - name: constraint dependent absent right before constraint @@ -1431,10 +1417,10 @@ deprules to: constraint-node query: - $dependent[Type] IN ['*scpb.ConstraintName', '*scpb.ConstraintComment'] - - $constraint[Type] IN ['*scpb.UniqueWithoutIndexConstraint', '*scpb.CheckConstraint', '*scpb.ForeignKeyConstraint'] + - $constraint[Type] IN ['*scpb.PrimaryIndex', '*scpb.SecondaryIndex', '*scpb.TemporaryIndex'] - joinOnConstraintID($dependent, $constraint, $table-id, $constraint-id) - $dependent-target[TargetStatus] = ABSENT - - $dependent-node[CurrentStatus] = ABSENT + - $dependent-node[CurrentStatus] = VALIDATED - $constraint-target[TargetStatus] = TRANSIENT_ABSENT - $constraint-node[CurrentStatus] = TRANSIENT_ABSENT - joinTargetNode($dependent, $dependent-target, $dependent-node) @@ -1445,10 +1431,10 @@ deprules to: constraint-node query: - $dependent[Type] IN ['*scpb.ConstraintName', '*scpb.ConstraintComment'] - - $constraint[Type] IN ['*scpb.PrimaryIndex', '*scpb.SecondaryIndex', '*scpb.TemporaryIndex'] + - $constraint[Type] IN ['*scpb.UniqueWithoutIndexConstraint', '*scpb.CheckConstraint', '*scpb.ForeignKeyConstraint'] - joinOnConstraintID($dependent, $constraint, $table-id, $constraint-id) - toAbsent($dependent-target, $constraint-target) - - $dependent-node[CurrentStatus] = VALIDATED + - $dependent-node[CurrentStatus] = ABSENT - $constraint-node[CurrentStatus] = ABSENT - joinTargetNode($dependent, $dependent-target, $dependent-node) - joinTargetNode($constraint, $constraint-target, $constraint-node) @@ -1458,10 +1444,10 @@ deprules to: constraint-node query: - $dependent[Type] IN ['*scpb.ConstraintName', '*scpb.ConstraintComment'] - - $constraint[Type] IN ['*scpb.PrimaryIndex', '*scpb.SecondaryIndex', '*scpb.TemporaryIndex'] + - $constraint[Type] IN ['*scpb.UniqueWithoutIndexConstraint', '*scpb.CheckConstraint', '*scpb.ForeignKeyConstraint'] - joinOnConstraintID($dependent, $constraint, $table-id, $constraint-id) - transient($dependent-target, $constraint-target) - - $dependent-node[CurrentStatus] = TRANSIENT_VALIDATED + - $dependent-node[CurrentStatus] = TRANSIENT_ABSENT - $constraint-node[CurrentStatus] = TRANSIENT_ABSENT - joinTargetNode($dependent, $dependent-target, $dependent-node) - joinTargetNode($constraint, $constraint-target, $constraint-node) @@ -1471,10 +1457,10 @@ deprules to: constraint-node query: - $dependent[Type] IN ['*scpb.ConstraintName', '*scpb.ConstraintComment'] - - $constraint[Type] IN ['*scpb.PrimaryIndex', '*scpb.SecondaryIndex', '*scpb.TemporaryIndex'] + - $constraint[Type] IN ['*scpb.UniqueWithoutIndexConstraint', '*scpb.CheckConstraint', '*scpb.ForeignKeyConstraint'] - joinOnConstraintID($dependent, $constraint, $table-id, $constraint-id) - $dependent-target[TargetStatus] = TRANSIENT_ABSENT - - $dependent-node[CurrentStatus] = TRANSIENT_VALIDATED + - $dependent-node[CurrentStatus] = TRANSIENT_ABSENT - $constraint-target[TargetStatus] = ABSENT - $constraint-node[CurrentStatus] = ABSENT - joinTargetNode($dependent, $dependent-target, $dependent-node) @@ -1485,10 +1471,10 @@ deprules to: constraint-node query: - $dependent[Type] IN ['*scpb.ConstraintName', '*scpb.ConstraintComment'] - - $constraint[Type] IN ['*scpb.PrimaryIndex', '*scpb.SecondaryIndex', '*scpb.TemporaryIndex'] + - $constraint[Type] IN ['*scpb.UniqueWithoutIndexConstraint', '*scpb.CheckConstraint', '*scpb.ForeignKeyConstraint'] - joinOnConstraintID($dependent, $constraint, $table-id, $constraint-id) - $dependent-target[TargetStatus] = ABSENT - - $dependent-node[CurrentStatus] = VALIDATED + - $dependent-node[CurrentStatus] = ABSENT - $constraint-target[TargetStatus] = TRANSIENT_ABSENT - $constraint-node[CurrentStatus] = TRANSIENT_ABSENT - joinTargetNode($dependent, $dependent-target, $dependent-node) @@ -1499,11 +1485,25 @@ deprules to: constraint-node query: - $dependent[Type] IN ['*scpb.ConstraintName', '*scpb.ConstraintComment'] - - $constraint[Type] IN ['*scpb.UniqueWithoutIndexConstraint', '*scpb.CheckConstraint', '*scpb.ForeignKeyConstraint'] + - $constraint[Type] IN ['*scpb.PrimaryIndex', '*scpb.SecondaryIndex', '*scpb.TemporaryIndex'] - joinOnConstraintID($dependent, $constraint, $table-id, $constraint-id) - - transient($dependent-target, $constraint-target) - - $dependent-node[CurrentStatus] = TRANSIENT_ABSENT - - $constraint-node[CurrentStatus] = TRANSIENT_ABSENT + - toAbsent($dependent-target, $constraint-target) + - $dependent-node[CurrentStatus] = VALIDATED + - $constraint-node[CurrentStatus] = ABSENT + - joinTargetNode($dependent, $dependent-target, $dependent-node) + - joinTargetNode($constraint, $constraint-target, $constraint-node) +- name: constraint dependent absent right before constraint + from: dependent-node + kind: SameStagePrecedence + to: constraint-node + query: + - $dependent[Type] IN ['*scpb.ConstraintName', '*scpb.ConstraintComment'] + - $constraint[Type] IN ['*scpb.PrimaryIndex', '*scpb.SecondaryIndex', '*scpb.TemporaryIndex'] + - joinOnConstraintID($dependent, $constraint, $table-id, $constraint-id) + - $dependent-target[TargetStatus] = TRANSIENT_ABSENT + - $dependent-node[CurrentStatus] = TRANSIENT_VALIDATED + - $constraint-target[TargetStatus] = ABSENT + - $constraint-node[CurrentStatus] = ABSENT - joinTargetNode($dependent, $dependent-target, $dependent-node) - joinTargetNode($constraint, $constraint-target, $constraint-node) - name: constraint dependent public right before constraint @@ -1527,9 +1527,8 @@ deprules - $dependent[Type] IN ['*scpb.ColumnName', '*scpb.ColumnType', '*scpb.ColumnDefaultExpression', '*scpb.ColumnOnUpdateExpression', '*scpb.SequenceOwner', '*scpb.ColumnComment', '*scpb.IndexColumn'] - $column[Type] = '*scpb.Column' - joinOnColumnID($dependent, $column, $table-id, $col-id) - - $dependent-target[TargetStatus] = TRANSIENT_ABSENT - - $dependent-node[CurrentStatus] = TRANSIENT_ABSENT - - $column-target[TargetStatus] = ABSENT + - toAbsent($dependent-target, $column-target) + - $dependent-node[CurrentStatus] = ABSENT - $column-node[CurrentStatus] = ABSENT - joinTargetNode($dependent, $dependent-target, $dependent-node) - joinTargetNode($column, $column-target, $column-node) @@ -1541,9 +1540,9 @@ deprules - $dependent[Type] IN ['*scpb.ColumnName', '*scpb.ColumnType', '*scpb.ColumnDefaultExpression', '*scpb.ColumnOnUpdateExpression', '*scpb.SequenceOwner', '*scpb.ColumnComment', '*scpb.IndexColumn'] - $column[Type] = '*scpb.Column' - joinOnColumnID($dependent, $column, $table-id, $col-id) - - toAbsent($dependent-target, $column-target) - - $dependent-node[CurrentStatus] = ABSENT - - $column-node[CurrentStatus] = ABSENT + - transient($dependent-target, $column-target) + - $dependent-node[CurrentStatus] = TRANSIENT_ABSENT + - $column-node[CurrentStatus] = TRANSIENT_ABSENT - joinTargetNode($dependent, $dependent-target, $dependent-node) - joinTargetNode($column, $column-target, $column-node) - name: dependents removed before column @@ -1554,10 +1553,10 @@ deprules - $dependent[Type] IN ['*scpb.ColumnName', '*scpb.ColumnType', '*scpb.ColumnDefaultExpression', '*scpb.ColumnOnUpdateExpression', '*scpb.SequenceOwner', '*scpb.ColumnComment', '*scpb.IndexColumn'] - $column[Type] = '*scpb.Column' - joinOnColumnID($dependent, $column, $table-id, $col-id) - - $dependent-target[TargetStatus] = ABSENT - - $dependent-node[CurrentStatus] = ABSENT - - $column-target[TargetStatus] = TRANSIENT_ABSENT - - $column-node[CurrentStatus] = TRANSIENT_ABSENT + - $dependent-target[TargetStatus] = TRANSIENT_ABSENT + - $dependent-node[CurrentStatus] = TRANSIENT_ABSENT + - $column-target[TargetStatus] = ABSENT + - $column-node[CurrentStatus] = ABSENT - joinTargetNode($dependent, $dependent-target, $dependent-node) - joinTargetNode($column, $column-target, $column-node) - name: dependents removed before column @@ -1568,8 +1567,9 @@ deprules - $dependent[Type] IN ['*scpb.ColumnName', '*scpb.ColumnType', '*scpb.ColumnDefaultExpression', '*scpb.ColumnOnUpdateExpression', '*scpb.SequenceOwner', '*scpb.ColumnComment', '*scpb.IndexColumn'] - $column[Type] = '*scpb.Column' - joinOnColumnID($dependent, $column, $table-id, $col-id) - - transient($dependent-target, $column-target) - - $dependent-node[CurrentStatus] = TRANSIENT_ABSENT + - $dependent-target[TargetStatus] = ABSENT + - $dependent-node[CurrentStatus] = ABSENT + - $column-target[TargetStatus] = TRANSIENT_ABSENT - $column-node[CurrentStatus] = TRANSIENT_ABSENT - joinTargetNode($dependent, $dependent-target, $dependent-node) - joinTargetNode($column, $column-target, $column-node) @@ -1581,9 +1581,8 @@ deprules - $dependent[Type] IN ['*scpb.IndexName', '*scpb.IndexPartitioning', '*scpb.SecondaryIndexPartial', '*scpb.IndexComment', '*scpb.IndexColumn'] - $index[Type] IN ['*scpb.PrimaryIndex', '*scpb.SecondaryIndex', '*scpb.TemporaryIndex'] - joinOnIndexID($dependent, $index, $table-id, $index-id) - - $dependent-target[TargetStatus] = ABSENT - - $dependent-node[CurrentStatus] = ABSENT - - $index-target[TargetStatus] = TRANSIENT_ABSENT + - transient($dependent-target, $index-target) + - $dependent-node[CurrentStatus] = TRANSIENT_ABSENT - $index-node[CurrentStatus] = TRANSIENT_ABSENT - joinTargetNode($dependent, $dependent-target, $dependent-node) - joinTargetNode($index, $index-target, $index-node) @@ -1595,9 +1594,9 @@ deprules - $dependent[Type] IN ['*scpb.IndexName', '*scpb.IndexPartitioning', '*scpb.SecondaryIndexPartial', '*scpb.IndexComment', '*scpb.IndexColumn'] - $index[Type] IN ['*scpb.PrimaryIndex', '*scpb.SecondaryIndex', '*scpb.TemporaryIndex'] - joinOnIndexID($dependent, $index, $table-id, $index-id) - - transient($dependent-target, $index-target) - - $dependent-node[CurrentStatus] = TRANSIENT_ABSENT - - $index-node[CurrentStatus] = TRANSIENT_ABSENT + - toAbsent($dependent-target, $index-target) + - $dependent-node[CurrentStatus] = ABSENT + - $index-node[CurrentStatus] = ABSENT - joinTargetNode($dependent, $dependent-target, $dependent-node) - joinTargetNode($index, $index-target, $index-node) - name: dependents removed before index @@ -1622,9 +1621,10 @@ deprules - $dependent[Type] IN ['*scpb.IndexName', '*scpb.IndexPartitioning', '*scpb.SecondaryIndexPartial', '*scpb.IndexComment', '*scpb.IndexColumn'] - $index[Type] IN ['*scpb.PrimaryIndex', '*scpb.SecondaryIndex', '*scpb.TemporaryIndex'] - joinOnIndexID($dependent, $index, $table-id, $index-id) - - toAbsent($dependent-target, $index-target) + - $dependent-target[TargetStatus] = ABSENT - $dependent-node[CurrentStatus] = ABSENT - - $index-node[CurrentStatus] = ABSENT + - $index-target[TargetStatus] = TRANSIENT_ABSENT + - $index-node[CurrentStatus] = TRANSIENT_ABSENT - joinTargetNode($dependent, $dependent-target, $dependent-node) - joinTargetNode($index, $index-target, $index-node) - name: descriptor DROPPED in transaction before removal @@ -1806,10 +1806,10 @@ deprules - $index[Type] IN ['*scpb.PrimaryIndex', '*scpb.SecondaryIndex', '*scpb.TemporaryIndex'] - $dependent[Type] IN ['*scpb.IndexName', '*scpb.IndexPartitioning', '*scpb.SecondaryIndexPartial', '*scpb.IndexComment', '*scpb.IndexColumn'] - joinOnIndexID($index, $dependent, $table-id, $index-id) - - $index-target[TargetStatus] = TRANSIENT_ABSENT - - $index-node[CurrentStatus] = TRANSIENT_VALIDATED - - $dependent-target[TargetStatus] = ABSENT - - $dependent-node[CurrentStatus] = ABSENT + - $index-target[TargetStatus] = ABSENT + - $index-node[CurrentStatus] = VALIDATED + - $dependent-target[TargetStatus] = TRANSIENT_ABSENT + - $dependent-node[CurrentStatus] = TRANSIENT_ABSENT - joinTargetNode($index, $index-target, $index-node) - joinTargetNode($dependent, $dependent-target, $dependent-node) - name: index no longer public before dependents @@ -1820,10 +1820,10 @@ deprules - $index[Type] IN ['*scpb.PrimaryIndex', '*scpb.SecondaryIndex', '*scpb.TemporaryIndex'] - $dependent[Type] IN ['*scpb.IndexName', '*scpb.IndexPartitioning', '*scpb.SecondaryIndexPartial', '*scpb.IndexComment', '*scpb.IndexColumn'] - joinOnIndexID($index, $dependent, $table-id, $index-id) - - $index-target[TargetStatus] = ABSENT - - $index-node[CurrentStatus] = VALIDATED - - $dependent-target[TargetStatus] = TRANSIENT_ABSENT - - $dependent-node[CurrentStatus] = TRANSIENT_ABSENT + - $index-target[TargetStatus] = TRANSIENT_ABSENT + - $index-node[CurrentStatus] = TRANSIENT_VALIDATED + - $dependent-target[TargetStatus] = ABSENT + - $dependent-node[CurrentStatus] = ABSENT - joinTargetNode($index, $index-target, $index-node) - joinTargetNode($dependent, $dependent-target, $dependent-node) - name: index-column added to index before index is backfilled @@ -1864,10 +1864,10 @@ deprules - joinOnColumnID($index-column, $column, $table-id, $column-id) - joinOnColumnID($index-column, $column-type, $table-id, $column-id) - relationIsNotBeingDropped(*scpb.ColumnType)($column-type) - - $index-target[TargetStatus] = ABSENT - - $index-node[CurrentStatus] = ABSENT - - $column-target[TargetStatus] = TRANSIENT_ABSENT - - $column-node[CurrentStatus] = TRANSIENT_ABSENT + - $index-target[TargetStatus] = TRANSIENT_ABSENT + - $index-node[CurrentStatus] = TRANSIENT_ABSENT + - $column-target[TargetStatus] = ABSENT + - $column-node[CurrentStatus] = ABSENT - joinTargetNode($index, $index-target, $index-node) - joinTargetNode($column, $column-target, $column-node) - name: indexes containing column reach absent before column @@ -1882,10 +1882,10 @@ deprules - joinOnColumnID($index-column, $column, $table-id, $column-id) - joinOnColumnID($index-column, $column-type, $table-id, $column-id) - relationIsNotBeingDropped(*scpb.ColumnType)($column-type) - - $index-target[TargetStatus] = TRANSIENT_ABSENT - - $index-node[CurrentStatus] = TRANSIENT_ABSENT - - $column-target[TargetStatus] = ABSENT - - $column-node[CurrentStatus] = ABSENT + - $index-target[TargetStatus] = ABSENT + - $index-node[CurrentStatus] = ABSENT + - $column-target[TargetStatus] = TRANSIENT_ABSENT + - $column-node[CurrentStatus] = TRANSIENT_ABSENT - joinTargetNode($index, $index-target, $index-node) - joinTargetNode($column, $column-target, $column-node) - name: indexes containing column reach absent before column @@ -1951,10 +1951,9 @@ deprules - $index[Type] = '*scpb.SecondaryIndex' - joinOnIndexID($partial-predicate, $index, $table-id, $index-id) - relationIsNotBeingDropped(*scpb.SecondaryIndexPartial)($partial-predicate) - - $partial-predicate-target[TargetStatus] = TRANSIENT_ABSENT + - transient($partial-predicate-target, $index-target) - $partial-predicate-node[CurrentStatus] = TRANSIENT_ABSENT - - $index-target[TargetStatus] = ABSENT - - $index-node[CurrentStatus] = ABSENT + - $index-node[CurrentStatus] = TRANSIENT_ABSENT - joinTargetNode($partial-predicate, $partial-predicate-target, $partial-predicate-node) - joinTargetNode($index, $index-target, $index-node) - name: partial predicate removed right before secondary index when not dropping relation @@ -1980,10 +1979,10 @@ deprules - $index[Type] = '*scpb.SecondaryIndex' - joinOnIndexID($partial-predicate, $index, $table-id, $index-id) - relationIsNotBeingDropped(*scpb.SecondaryIndexPartial)($partial-predicate) - - $partial-predicate-target[TargetStatus] = ABSENT - - $partial-predicate-node[CurrentStatus] = ABSENT - - $index-target[TargetStatus] = TRANSIENT_ABSENT - - $index-node[CurrentStatus] = TRANSIENT_ABSENT + - $partial-predicate-target[TargetStatus] = TRANSIENT_ABSENT + - $partial-predicate-node[CurrentStatus] = TRANSIENT_ABSENT + - $index-target[TargetStatus] = ABSENT + - $index-node[CurrentStatus] = ABSENT - joinTargetNode($partial-predicate, $partial-predicate-target, $partial-predicate-node) - joinTargetNode($index, $index-target, $index-node) - name: partial predicate removed right before secondary index when not dropping relation @@ -1995,8 +1994,9 @@ deprules - $index[Type] = '*scpb.SecondaryIndex' - joinOnIndexID($partial-predicate, $index, $table-id, $index-id) - relationIsNotBeingDropped(*scpb.SecondaryIndexPartial)($partial-predicate) - - transient($partial-predicate-target, $index-target) - - $partial-predicate-node[CurrentStatus] = TRANSIENT_ABSENT + - $partial-predicate-target[TargetStatus] = ABSENT + - $partial-predicate-node[CurrentStatus] = ABSENT + - $index-target[TargetStatus] = TRANSIENT_ABSENT - $index-node[CurrentStatus] = TRANSIENT_ABSENT - joinTargetNode($partial-predicate, $partial-predicate-target, $partial-predicate-node) - joinTargetNode($index, $index-target, $index-node) @@ -2016,22 +2016,6 @@ deprules - $new-index-node[CurrentStatus] = PUBLIC - joinTargetNode($old-index, $old-index-target, $old-index-node) - joinTargetNode($new-index, $new-index-target, $new-index-node) -- name: primary index swap - from: new-index-node - kind: SameStagePrecedence - to: old-index-node - query: - - $new-index[Type] = '*scpb.PrimaryIndex' - - $old-index[Type] = '*scpb.PrimaryIndex' - - joinOnDescID($new-index, $old-index, $table-id) - - $new-index[SourceIndexID] = $old-index-id - - $old-index[IndexID] = $old-index-id - - $new-index-target[TargetStatus] = ABSENT - - $new-index-node[CurrentStatus] = VALIDATED - - $old-index-target[TargetStatus] = PUBLIC - - $old-index-node[CurrentStatus] = PUBLIC - - joinTargetNode($new-index, $new-index-target, $new-index-node) - - joinTargetNode($old-index, $old-index-target, $old-index-node) - name: primary index swap from: old-index-node kind: SameStagePrecedence @@ -2048,6 +2032,22 @@ deprules - $new-index-node[CurrentStatus] = PUBLIC - joinTargetNode($old-index, $old-index-target, $old-index-node) - joinTargetNode($new-index, $new-index-target, $new-index-node) +- name: primary index swap + from: new-index-node + kind: SameStagePrecedence + to: old-index-node + query: + - $new-index[Type] = '*scpb.PrimaryIndex' + - $old-index[Type] = '*scpb.PrimaryIndex' + - joinOnDescID($new-index, $old-index, $table-id) + - $new-index[SourceIndexID] = $old-index-id + - $old-index[IndexID] = $old-index-id + - $new-index-target[TargetStatus] = ABSENT + - $new-index-node[CurrentStatus] = VALIDATED + - $old-index-target[TargetStatus] = PUBLIC + - $old-index-node[CurrentStatus] = PUBLIC + - joinTargetNode($new-index, $new-index-target, $new-index-node) + - joinTargetNode($old-index, $old-index-target, $old-index-node) - name: primary index with new columns should exist before secondary indexes from: primary-index-node kind: Precedence @@ -2086,10 +2086,10 @@ deprules - $index[Type] = '*scpb.IndexColumn' - $index-column[Type] IN ['*scpb.PrimaryIndex', '*scpb.SecondaryIndex', '*scpb.TemporaryIndex'] - joinOnIndexID($index, $index-column, $table-id, $index-id) - - $index-target[TargetStatus] = TRANSIENT_ABSENT - - $index-node[CurrentStatus] = TRANSIENT_DELETE_ONLY - - $index-column-target[TargetStatus] = ABSENT - - $index-column-node[CurrentStatus] = ABSENT + - $index-target[TargetStatus] = ABSENT + - $index-node[CurrentStatus] = DELETE_ONLY + - $index-column-target[TargetStatus] = TRANSIENT_ABSENT + - $index-column-node[CurrentStatus] = TRANSIENT_ABSENT - joinTargetNode($index, $index-target, $index-node) - joinTargetNode($index-column, $index-column-target, $index-column-node) - name: remove columns from index right before removing index @@ -2100,9 +2100,10 @@ deprules - $index[Type] = '*scpb.IndexColumn' - $index-column[Type] IN ['*scpb.PrimaryIndex', '*scpb.SecondaryIndex', '*scpb.TemporaryIndex'] - joinOnIndexID($index, $index-column, $table-id, $index-id) - - transient($index-target, $index-column-target) + - $index-target[TargetStatus] = TRANSIENT_ABSENT - $index-node[CurrentStatus] = TRANSIENT_DELETE_ONLY - - $index-column-node[CurrentStatus] = TRANSIENT_ABSENT + - $index-column-target[TargetStatus] = ABSENT + - $index-column-node[CurrentStatus] = ABSENT - joinTargetNode($index, $index-target, $index-node) - joinTargetNode($index-column, $index-column-target, $index-column-node) - name: remove columns from index right before removing index @@ -2113,9 +2114,9 @@ deprules - $index[Type] = '*scpb.IndexColumn' - $index-column[Type] IN ['*scpb.PrimaryIndex', '*scpb.SecondaryIndex', '*scpb.TemporaryIndex'] - joinOnIndexID($index, $index-column, $table-id, $index-id) - - toAbsent($index-target, $index-column-target) - - $index-node[CurrentStatus] = DELETE_ONLY - - $index-column-node[CurrentStatus] = ABSENT + - transient($index-target, $index-column-target) + - $index-node[CurrentStatus] = TRANSIENT_DELETE_ONLY + - $index-column-node[CurrentStatus] = TRANSIENT_ABSENT - joinTargetNode($index, $index-target, $index-node) - joinTargetNode($index-column, $index-column-target, $index-column-node) - name: remove columns from index right before removing index @@ -2126,10 +2127,9 @@ deprules - $index[Type] = '*scpb.IndexColumn' - $index-column[Type] IN ['*scpb.PrimaryIndex', '*scpb.SecondaryIndex', '*scpb.TemporaryIndex'] - joinOnIndexID($index, $index-column, $table-id, $index-id) - - $index-target[TargetStatus] = ABSENT + - toAbsent($index-target, $index-column-target) - $index-node[CurrentStatus] = DELETE_ONLY - - $index-column-target[TargetStatus] = TRANSIENT_ABSENT - - $index-column-node[CurrentStatus] = TRANSIENT_ABSENT + - $index-column-node[CurrentStatus] = ABSENT - joinTargetNode($index, $index-target, $index-node) - joinTargetNode($index-column, $index-column-target, $index-column-node) - name: secondary indexes containing column as key reach write-only before column diff --git a/pkg/sql/scrub.go b/pkg/sql/scrub.go index dca97daeaa5f..7f133c475f5c 100644 --- a/pkg/sql/scrub.go +++ b/pkg/sql/scrub.go @@ -38,8 +38,8 @@ type scrubNode struct { // then bundled together and iterated through to pull results. // // NB: Other changes that need to be made to implement a new check are: -// 1) Add the option parsing in startScrubTable -// 2) Queue the checkOperation structs into scrubNode.checkQueue. +// 1. Add the option parsing in startScrubTable +// 2. Queue the checkOperation structs into scrubNode.checkQueue. // // TODO(joey): Eventually we will add the ability to repair check // failures. In that case, we can add a AttemptRepair function that is @@ -320,9 +320,12 @@ func colRefs(tableAlias string, columnNames []string) []string { // pairwiseOp joins each string on the left with the string on the right, with a // given operator in-between. For example -// pairwiseOp([]string{"a","b"}, []string{"x", "y"}, "=") +// +// pairwiseOp([]string{"a","b"}, []string{"x", "y"}, "=") +// // returns -// []string{"a = x", "b = y"}. +// +// []string{"a = x", "b = y"}. func pairwiseOp(left []string, right []string, op string) []string { if len(left) != len(right) { panic(errors.AssertionFailedf("slice length mismatch (%d vs %d)", len(left), len(right))) diff --git a/pkg/sql/scrub_index.go b/pkg/sql/scrub_index.go index bc55069bc8be..cf2175e810fd 100644 --- a/pkg/sql/scrub_index.go +++ b/pkg/sql/scrub_index.go @@ -26,10 +26,10 @@ import ( // indexCheckOperation implements the checkOperation interface. It is a // scrub check for a secondary index's integrity. This operation will // detect: -// 1) Missing index entries. When there is a secondary index entry -// expected, but is not found. -// 2) Dangling index references. When there is a secondary index entry -// that refers to a primary index key that cannot be found. +// 1. Missing index entries. When there is a secondary index entry +// expected, but is not found. +// 2. Dangling index references. When there is a secondary index entry +// that refers to a primary index key that cannot be found. type indexCheckOperation struct { tableName *tree.TableName tableDesc catalog.TableDescriptor @@ -234,49 +234,49 @@ func (o *indexCheckOperation) Close(ctx context.Context) { // // For example, given the following table schema: // -// CREATE TABLE table ( -// k INT, l INT, a INT, b INT, c INT, -// PRIMARY KEY (k, l), -// INDEX idx (a,b), -// ) +// CREATE TABLE table ( +// k INT, l INT, a INT, b INT, c INT, +// PRIMARY KEY (k, l), +// INDEX idx (a,b), +// ) // // The generated query to check the `v_idx` will be: // -// SELECT pri.k pri.l, pri.a, pri.b, -// sec.k, sec.l, sec.a, sec.b -// FROM -// (SELECT k, l, a, b FROM [tbl_id AS table_pri]@{FORCE_INDEX=[pri_idx_id]}) AS pri -// FULL OUTER JOIN -// (SELECT k, l, a, b FROM [tbl_id AS table_sec]@{FORCE_INDEX=[idx_id]} AS sec -// ON -// pri.k = sec.k AND -// pri.l = sec.l AND -// pri.a IS NOT DISTINCT FROM sec.a AND -// pri.b IS NOT DISTINCT FROM sec.b -// WHERE -// pri.k IS NULL OR sec.k IS NULL +// SELECT pri.k pri.l, pri.a, pri.b, +// sec.k, sec.l, sec.a, sec.b +// FROM +// (SELECT k, l, a, b FROM [tbl_id AS table_pri]@{FORCE_INDEX=[pri_idx_id]}) AS pri +// FULL OUTER JOIN +// (SELECT k, l, a, b FROM [tbl_id AS table_sec]@{FORCE_INDEX=[idx_id]} AS sec +// ON +// pri.k = sec.k AND +// pri.l = sec.l AND +// pri.a IS NOT DISTINCT FROM sec.a AND +// pri.b IS NOT DISTINCT FROM sec.b +// WHERE +// pri.k IS NULL OR sec.k IS NULL // // Explanation: -// 1) We scan both the primary index and the secondary index. // -// 2) We join them on equality on the PK columns and "IS NOT DISTINCT FROM" on -// the other index columns. "IS NOT DISTINCT FROM" is like equality except -// that NULL equals NULL; it is not needed for the PK columns because those -// can't be NULL. +// 1. We scan both the primary index and the secondary index. // -// Note: currently, only the PK columns will be used as join equality -// columns, but that is sufficient. +// 2. We join them on equality on the PK columns and "IS NOT DISTINCT FROM" on +// the other index columns. "IS NOT DISTINCT FROM" is like equality except +// that NULL equals NULL; it is not needed for the PK columns because those +// can't be NULL. // -// 3) We select the "outer" rows (those that had no match), effectively -// achieving a "double" anti-join. We use the PK columns which cannot be -// NULL except on these rows. +// Note: currently, only the PK columns will be used as join equality +// columns, but that is sufficient. // -// 4) The results are as follows: -// - if a PK column on the left is NULL, that means that the right-hand -// side row from the secondary index had no match in the primary index. -// - if a PK column on the right is NULL, that means that the left-hand -// side row from the primary key had no match in the secondary index. +// 3. We select the "outer" rows (those that had no match), effectively +// achieving a "double" anti-join. We use the PK columns which cannot be +// NULL except on these rows. // +// 4. The results are as follows: +// - if a PK column on the left is NULL, that means that the right-hand +// side row from the secondary index had no match in the primary index. +// - if a PK column on the right is NULL, that means that the left-hand +// side row from the primary key had no match in the secondary index. func createIndexCheckQuery( pkColumns []string, otherColumns []string, diff --git a/pkg/sql/scrub_test.go b/pkg/sql/scrub_test.go index eea537d9c44f..a724d0ac9ca6 100644 --- a/pkg/sql/scrub_test.go +++ b/pkg/sql/scrub_test.go @@ -36,7 +36,7 @@ import ( ) // TestScrubIndexMissingIndexEntry tests that -// `SCRUB TABLE ... INDEX ALL`` will find missing index entries. To test +// `SCRUB TABLE ... INDEX ALL“ will find missing index entries. To test // this, a row's underlying secondary index k/v is deleted using the KV // client. This causes a missing index entry error as the row is missing // the expected secondary index k/v. @@ -265,7 +265,7 @@ func addIndexEntryForDatums( } // TestScrubIndexDanglingIndexReference tests that -// `SCRUB TABLE ... INDEX`` will find dangling index references, which +// `SCRUB TABLE ... INDEX“ will find dangling index references, which // are index entries that have no corresponding primary k/v. To test // this an index entry is generated and inserted. This creates a // dangling index error as the corresponding primary k/v is not equal. diff --git a/pkg/sql/sem/builtins/aggregate_builtins.go b/pkg/sql/sem/builtins/aggregate_builtins.go index 1e19ba09c7ea..c45218bd6a93 100644 --- a/pkg/sql/sem/builtins/aggregate_builtins.go +++ b/pkg/sql/sem/builtins/aggregate_builtins.go @@ -88,9 +88,9 @@ var allMaxMinAggregateTypes = append( // an aggregate function call to NULL in the presence of a NULL argument may // not be correct. There are two cases where an aggregate function must handle // be called with null inputs: -// 1) the aggregate function does not skip NULLs (e.g., ARRAY_AGG); and -// 2) the aggregate function does not return NULL when it aggregates no rows -// (e.g., COUNT). +// 1. the aggregate function does not skip NULLs (e.g., ARRAY_AGG); and +// 2. the aggregate function does not return NULL when it aggregates no rows +// (e.g., COUNT). // // For use in other packages, see AllAggregateBuiltinNames and // GetBuiltinProperties(). @@ -1372,13 +1372,13 @@ type anyNotNullAggregate struct { // Note that NULL values do not affect the result of the aggregation; this is // important in a few different contexts: // -// - in distributed multi-stage aggregations, we can have a local stage with -// multiple (parallel) instances feeding into a final stage. If some of the -// instances see no rows, they emit a NULL into the final stage which needs -// to be ignored. +// - in distributed multi-stage aggregations, we can have a local stage with +// multiple (parallel) instances feeding into a final stage. If some of the +// instances see no rows, they emit a NULL into the final stage which needs +// to be ignored. // -// - for query optimization, when moving aggregations across left joins (which -// add NULL values). +// - for query optimization, when moving aggregations across left joins (which +// add NULL values). func NewAnyNotNullAggregate(evalCtx *eval.Context, _ tree.Datums) eval.AggregateFunc { return &anyNotNullAggregate{ singleDatumAggregateBase: makeSingleDatumAggregateBase(evalCtx), @@ -3688,7 +3688,8 @@ func (a *floatSumSqrDiffsAggregate) Count() int64 { } // The signature for the datums is: -// SQRDIFF (float), SUM (float), COUNT(int) +// +// SQRDIFF (float), SUM (float), COUNT(int) func (a *floatSumSqrDiffsAggregate) Add( _ context.Context, sqrDiffD tree.Datum, otherArgs ...tree.Datum, ) error { @@ -3944,8 +3945,9 @@ func newDecimalFinalVarianceAggregate( } // Add is part of the eval.AggregateFunc interface. -// Variance: VALUE(float) -// FinalVariance: SQRDIFF(float), SUM(float), COUNT(int) +// +// Variance: VALUE(float) +// FinalVariance: SQRDIFF(float), SUM(float), COUNT(int) func (a *floatVarianceAggregate) Add( ctx context.Context, firstArg tree.Datum, otherArgs ...tree.Datum, ) error { @@ -3953,8 +3955,9 @@ func (a *floatVarianceAggregate) Add( } // Add is part of the eval.AggregateFunc interface. -// Variance: VALUE(int|decimal) -// FinalVariance: SQRDIFF(decimal), SUM(decimal), COUNT(int) +// +// Variance: VALUE(int|decimal) +// FinalVariance: SQRDIFF(decimal), SUM(decimal), COUNT(int) func (a *decimalVarianceAggregate) Add( ctx context.Context, firstArg tree.Datum, otherArgs ...tree.Datum, ) error { @@ -4060,7 +4063,8 @@ func newDecimalFinalVarPopAggregate( } // Add is part of the eval.AggregateFunc interface. -// Population Variance: VALUE(float) +// +// Population Variance: VALUE(float) func (a *floatVarPopAggregate) Add( ctx context.Context, firstArg tree.Datum, otherArgs ...tree.Datum, ) error { @@ -4068,7 +4072,8 @@ func (a *floatVarPopAggregate) Add( } // Add is part of the eval.AggregateFunc interface. -// Population Variance: VALUE(int|decimal) +// +// Population Variance: VALUE(int|decimal) func (a *decimalVarPopAggregate) Add( ctx context.Context, firstArg tree.Datum, otherArgs ...tree.Datum, ) error { @@ -4214,8 +4219,9 @@ func newFloatFinalStdDevPopAggregate( // Add implements the eval.AggregateFunc interface. // The signature of the datums is: -// StdDev: VALUE(float) -// FinalStdDev: SQRDIFF(float), SUM(float), COUNT(int) +// +// StdDev: VALUE(float) +// FinalStdDev: SQRDIFF(float), SUM(float), COUNT(int) func (a *floatStdDevAggregate) Add( ctx context.Context, firstArg tree.Datum, otherArgs ...tree.Datum, ) error { @@ -4224,8 +4230,9 @@ func (a *floatStdDevAggregate) Add( // Add is part of the eval.AggregateFunc interface. // The signature of the datums is: -// StdDev: VALUE(int|decimal) -// FinalStdDev: SQRDIFF(decimal), SUM(decimal), COUNT(int) +// +// StdDev: VALUE(int|decimal) +// FinalStdDev: SQRDIFF(decimal), SUM(decimal), COUNT(int) func (a *decimalStdDevAggregate) Add( ctx context.Context, firstArg tree.Datum, otherArgs ...tree.Datum, ) error { diff --git a/pkg/sql/sem/builtins/all_builtins_test.go b/pkg/sql/sem/builtins/all_builtins_test.go index b1610c57585b..ffad05e13c09 100644 --- a/pkg/sql/sem/builtins/all_builtins_test.go +++ b/pkg/sql/sem/builtins/all_builtins_test.go @@ -48,13 +48,15 @@ func TestOverloadsHaveVolatility(t *testing.T) { // overloads for Volatility. // Dump command below: // COPY (SELECT proname, args, rettype, provolatile, proleakproof FROM ( -// SELECT -// lhs.oid, proname, pg2.typname as rettype, ARRAY_AGG(pg1.typname) as args, provolatile, proleakproof -// FROM -// (select oid, proname, unnest(proargtypes) as typ, proargnames, prorettype, provolatile, proleakproof from pg_proc) AS lhs -// JOIN pg_type AS pg1 ON (lhs.typ = pg1.oid) -// JOIN pg_type AS pg2 ON (lhs.prorettype = pg2.oid) GROUP BY lhs.oid, proname, pg2.typname, provolatile, proleakproof) a -// ORDER BY proname, args +// +// SELECT +// lhs.oid, proname, pg2.typname as rettype, ARRAY_AGG(pg1.typname) as args, provolatile, proleakproof +// FROM +// (select oid, proname, unnest(proargtypes) as typ, proargnames, prorettype, provolatile, proleakproof from pg_proc) AS lhs +// JOIN pg_type AS pg1 ON (lhs.typ = pg1.oid) +// JOIN pg_type AS pg2 ON (lhs.prorettype = pg2.oid) GROUP BY lhs.oid, proname, pg2.typname, provolatile, proleakproof) a +// ORDER BY proname, args +// // ) TO '/tmp/pg_proc_provolatile_dump.csv' WITH CSV DELIMITER '|' HEADER; func TestOverloadsVolatilityMatchesPostgres(t *testing.T) { defer leaktest.AfterTest(t)() diff --git a/pkg/sql/sem/builtins/pgcrypto_builtins.go b/pkg/sql/sem/builtins/pgcrypto_builtins.go index b05941ecc8a7..7fbe5d9e7a21 100644 --- a/pkg/sql/sem/builtins/pgcrypto_builtins.go +++ b/pkg/sql/sem/builtins/pgcrypto_builtins.go @@ -281,6 +281,7 @@ func cryptMD5(password, salt []byte) ([]byte, error) { } // bcryptLinked accesses private method bcrypt.bcrypt by using go:linkname. +// //go:linkname bcryptLinked golang.org/x/crypto/bcrypt.bcrypt func bcryptLinked(password []byte, cost int, salt []byte) ([]byte, error) diff --git a/pkg/sql/sem/builtins/window_builtins.go b/pkg/sql/sem/builtins/window_builtins.go index 3a4f6082f255..9c404a01ba08 100644 --- a/pkg/sql/sem/builtins/window_builtins.go +++ b/pkg/sql/sem/builtins/window_builtins.go @@ -511,7 +511,8 @@ func (w *denseRankWindow) Reset(context.Context) { func (w *denseRankWindow) Close(context.Context, *eval.Context) {} // percentRankWindow computes the relative rank of the current row using: -// (rank - 1) / (total rows - 1) +// +// (rank - 1) / (total rows - 1) type percentRankWindow struct { peerRes *tree.DFloat } @@ -545,7 +546,8 @@ func (w *percentRankWindow) Reset(context.Context) { func (w *percentRankWindow) Close(context.Context, *eval.Context) {} // cumulativeDistWindow computes the relative rank of the current row using: -// (number of rows preceding or peer with current row) / (total rows) +// +// (number of rows preceding or peer with current row) / (total rows) type cumulativeDistWindow struct { peerRes *tree.DFloat } diff --git a/pkg/sql/sem/cast/cast_test.go b/pkg/sql/sem/cast/cast_test.go index e0aed166c465..8076745e8de4 100644 --- a/pkg/sql/sem/cast/cast_test.go +++ b/pkg/sql/sem/cast/cast_test.go @@ -36,37 +36,36 @@ import ( // types that we do not support, and we ignore geospatial types because they are // an extension of Postgres and have no official OIDs. // -// \copy ( -// WITH ignored_types AS ( -// SELECT t::regtype::oid t -// FROM (VALUES -// ('geography'), -// ('geometry'), -// ('box2d'), -// ('box3d'), -// ('tstzmultirange'), -// ('int4multirange'), -// ('int8multirange'), -// ('tstzmultirange'), -// ('tsmultirange'), -// ('datemultirange'), -// ('nummultirange') -// ) AS types(t) -// ) -// SELECT -// c.castsource, -// c.casttarget, -// p.provolatile, -// p.proleakproof, -// c.castcontext, -// substring(version(), 'PostgreSQL (\d+\.\d+)') pg_version -// FROM pg_cast c JOIN pg_proc p ON (c.castfunc = p.oid) -// WHERE -// c.castsource NOT IN (SELECT t FROM ignored_types) -// AND c.casttarget NOT IN (SELECT t FROM ignored_types) -// ORDER BY 1, 2 -// ) TO pg_cast_dump.csv WITH CSV DELIMITER '|' HEADER; -// +// \copy ( +// WITH ignored_types AS ( +// SELECT t::regtype::oid t +// FROM (VALUES +// ('geography'), +// ('geometry'), +// ('box2d'), +// ('box3d'), +// ('tstzmultirange'), +// ('int4multirange'), +// ('int8multirange'), +// ('tstzmultirange'), +// ('tsmultirange'), +// ('datemultirange'), +// ('nummultirange') +// ) AS types(t) +// ) +// SELECT +// c.castsource, +// c.casttarget, +// p.provolatile, +// p.proleakproof, +// c.castcontext, +// substring(version(), 'PostgreSQL (\d+\.\d+)') pg_version +// FROM pg_cast c JOIN pg_proc p ON (c.castfunc = p.oid) +// WHERE +// c.castsource NOT IN (SELECT t FROM ignored_types) +// AND c.casttarget NOT IN (SELECT t FROM ignored_types) +// ORDER BY 1, 2 +// ) TO pg_cast_dump.csv WITH CSV DELIMITER '|' HEADER; func TestCastsMatchPostgres(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) diff --git a/pkg/sql/sem/eval/comparison.go b/pkg/sql/sem/eval/comparison.go index 895bff8c53ad..ca98eb6f22f2 100644 --- a/pkg/sql/sem/eval/comparison.go +++ b/pkg/sql/sem/eval/comparison.go @@ -57,15 +57,19 @@ func evalComparison( // It returns the result of the ANY/SOME/ALL predicate. // // A NULL result is returned if there exists a NULL element and: -// ANY/SOME: no comparisons evaluate to true -// ALL: no comparisons evaluate to false +// +// ANY/SOME: no comparisons evaluate to true +// ALL: no comparisons evaluate to false // // For example, given 1 < ANY (SELECT * FROM generate_series(1,3)) // (right is a DTuple), evalTupleCmp would be called with: -// evalDatumsCmp(ctx, LT, Any, CmpOp(LT, leftType, rightParamType), leftDatum, rightTuple.D). +// +// evalDatumsCmp(ctx, LT, Any, CmpOp(LT, leftType, rightParamType), leftDatum, rightTuple.D). +// // Similarly, given 1 < ANY (ARRAY[1, 2, 3]) (right is a DArray), // evalArrayCmp would be called with: -// evalDatumsCmp(ctx, LT, Any, CmpOp(LT, leftType, rightParamType), leftDatum, rightArray.Array). +// +// evalDatumsCmp(ctx, LT, Any, CmpOp(LT, leftType, rightParamType), leftDatum, rightArray.Array). func evalDatumsCmp( ctx *Context, op, subOp treecmp.ComparisonOperator, diff --git a/pkg/sql/sem/eval/match.go b/pkg/sql/sem/eval/match.go index f3bed446764f..97827cd759d0 100644 --- a/pkg/sql/sem/eval/match.go +++ b/pkg/sql/sem/eval/match.go @@ -317,15 +317,18 @@ func LikeEscape(pattern string) (string, error) { // For example, suppose we have escape token `\` (e.g. `B` is escaped in // `A\BC` and `\` is escaped in `A\\C`). // We need to convert -// `\` --> `` -// `\\` --> `\` +// +// `\` --> `` +// `\\` --> `\` +// // We cannot simply use strings.Replace for each conversion since the first -// conversion will incorrectly replace our escaped escape token `\\` with ``. +// conversion will incorrectly replace our escaped escape token `\\` with “. // Another example is if our escape token is `\\` (e.g. after // regexp.QuoteMeta). // We need to convert -// `\\` --> `` -// `\\\\` --> `\\` +// +// `\\` --> `` +// `\\\\` --> `\\` func unescapePattern( pattern, escapeToken string, emitEscapeCharacterLastError bool, ) (string, error) { @@ -369,11 +372,14 @@ func unescapePattern( // replaceUnescaped replaces all instances of oldStr that are not escaped (read: // preceded) with the specified unescape token with newStr. // For example, with an escape token of `\\` -// replaceUnescaped("TE\\__ST", "_", ".", `\\`) --> "TE\\_.ST" -// replaceUnescaped("TE\\%%ST", "%", ".*", `\\`) --> "TE\\%.*ST" +// +// replaceUnescaped("TE\\__ST", "_", ".", `\\`) --> "TE\\_.ST" +// replaceUnescaped("TE\\%%ST", "%", ".*", `\\`) --> "TE\\%.*ST" +// // If the preceding escape token is escaped, then oldStr will be replaced. // For example -// replaceUnescaped("TE\\\\_ST", "_", ".", `\\`) --> "TE\\\\.ST" +// +// replaceUnescaped("TE\\\\_ST", "_", ".", `\\`) --> "TE\\\\.ST" func replaceUnescaped(s, oldStr, newStr string, escapeToken string) string { // We count the number of occurrences of 'oldStr'. // This however can be an overestimate since the oldStr token could be @@ -447,20 +453,23 @@ OldLoop: // Replaces all custom escape characters in s with `\\` only when they are unescaped. (1) // E.g. original pattern after QuoteMeta after replaceCustomEscape with '@' as escape -// '@w@w' -> '@w@w' -> '\\w\\w' -// '@\@\' -> '@\\@\\' -> '\\\\\\\\' +// +// '@w@w' -> '@w@w' -> '\\w\\w' +// '@\@\' -> '@\\@\\' -> '\\\\\\\\' // // When an escape character is escaped, we replace it with its single occurrence. (2) // E.g. original pattern after QuoteMeta after replaceCustomEscape with '@' as escape -// '@@w@w' -> '@@w@w' -> '@w\\w' -// '@@@\' -> '@@@\\' -> '@\\\\' +// +// '@@w@w' -> '@@w@w' -> '@w\\w' +// '@@@\' -> '@@@\\' -> '@\\\\' // // At the same time, we do not want to confuse original backslashes (which // after QuoteMeta are '\\') with backslashes that replace our custom escape characters, // so we escape these original backslashes again by converting '\\' into '\\\\'. (3) // E.g. original pattern after QuoteMeta after replaceCustomEscape with '@' as escape -// '@\' -> '@\\' -> '\\\\\\' -// '@\@@@\' -> '@\\@@@\\' -> '\\\\\\@\\\\\\' +// +// '@\' -> '@\\' -> '\\\\\\' +// '@\@@@\' -> '@\\@@@\\' -> '\\\\\\@\\\\\\' // // Explanation of the last example: // 1. we replace '@' with '\\' since it's unescaped; @@ -891,7 +900,9 @@ func similarEscapeCustomChar(pattern string, escapeChar rune, isEscapeNonEmpty b } // caseInsensitive surrounds the transformed input string with -// (?i: ... ) +// +// (?i: ... ) +// // which uses a non-capturing set of parens to turn a case sensitive // regular expression pattern into a case insensitive regular // expression pattern. @@ -900,7 +911,9 @@ func caseInsensitive(pattern string) string { } // anchorPattern surrounds the transformed input string with -// ^(?s: ... )$ +// +// ^(?s: ... )$ +// // which requires some explanation. We need "^" and "$" to force // the pattern to match the entire input string as per SQL99 spec. // The "(?:" and ")" are a non-capturing set of parens; we have to have diff --git a/pkg/sql/sem/tree/alter_table.go b/pkg/sql/sem/tree/alter_table.go index 70278ee269d0..188005d42529 100644 --- a/pkg/sql/sem/tree/alter_table.go +++ b/pkg/sql/sem/tree/alter_table.go @@ -128,11 +128,11 @@ func (node *AlterTableAddColumn) Format(ctx *FmtCtx) { // stored in node.Cmds, into top-level commands to add those constraints. // Currently, this only applies to checks. For example, the ADD COLUMN in // -// ALTER TABLE t ADD COLUMN a INT CHECK (a < 1) +// ALTER TABLE t ADD COLUMN a INT CHECK (a < 1) // // is transformed into two commands, as in // -// ALTER TABLE t ADD COLUMN a INT, ADD CONSTRAINT check_a CHECK (a < 1) +// ALTER TABLE t ADD COLUMN a INT, ADD CONSTRAINT check_a CHECK (a < 1) // // (with an auto-generated name). // @@ -142,7 +142,7 @@ func (node *AlterTableAddColumn) Format(ctx *FmtCtx) { // constraints. For example, the following statement is accepted in // CockroachDB and Postgres, but not necessarily other SQL databases: // -// ALTER TABLE t ADD COLUMN a INT CHECK (a < b) +// ALTER TABLE t ADD COLUMN a INT CHECK (a < b) func (node *AlterTable) HoistAddColumnConstraints(onHoistedFKConstraint func()) { var normalizedCmds AlterTableCmds diff --git a/pkg/sql/sem/tree/comment_on_constraint.go b/pkg/sql/sem/tree/comment_on_constraint.go index d5a9f8673b27..56d8e5f354de 100644 --- a/pkg/sql/sem/tree/comment_on_constraint.go +++ b/pkg/sql/sem/tree/comment_on_constraint.go @@ -12,14 +12,14 @@ package tree import "github.com/cockroachdb/cockroach/pkg/sql/lexbase" -//CommentOnConstraint represents a COMMENT ON CONSTRAINT statement +// CommentOnConstraint represents a COMMENT ON CONSTRAINT statement type CommentOnConstraint struct { Constraint Name Table *UnresolvedObjectName Comment *string } -//Format implements the NodeFormatter interface. +// Format implements the NodeFormatter interface. func (n *CommentOnConstraint) Format(ctx *FmtCtx) { ctx.WriteString("COMMENT ON CONSTRAINT ") ctx.FormatNode(&n.Constraint) diff --git a/pkg/sql/sem/tree/constant.go b/pkg/sql/sem/tree/constant.go index c0caa9ba31bb..a7f7bec30b75 100644 --- a/pkg/sql/sem/tree/constant.go +++ b/pkg/sql/sem/tree/constant.go @@ -186,20 +186,22 @@ func (expr *NumVal) Format(ctx *FmtCtx) { } // canBeInt64 checks if it's possible for the value to become an int64: -// 1 = yes -// 1.0 = yes -// 1.1 = no -// 123...overflow...456 = no +// +// 1 = yes +// 1.0 = yes +// 1.1 = no +// 123...overflow...456 = no func (expr *NumVal) canBeInt64() bool { _, err := expr.AsInt64() return err == nil } // ShouldBeInt64 checks if the value naturally is an int64: -// 1 = yes -// 1.0 = no -// 1.1 = no -// 123...overflow...456 = no +// +// 1 = yes +// 1.0 = no +// 1.1 = no +// 123...overflow...456 = no func (expr *NumVal) ShouldBeInt64() bool { return expr.Kind() == constant.Int && expr.canBeInt64() } @@ -562,12 +564,13 @@ var ( // respective datum types could succeed. The hope was to eliminate impossibilities // and constrain the returned type sets as much as possible. Unfortunately, two issues // were found with this approach: -// - date and timestamp formats do not always imply a fixed-length valid input. For -// instance, timestamp formats that take fractional seconds can successfully parse -// inputs of varied length. -// - the set of date and timestamp formats are not disjoint, which means that ambiguity -// can not be eliminated when inferring the type of string literals that use these -// shared formats. +// - date and timestamp formats do not always imply a fixed-length valid input. For +// instance, timestamp formats that take fractional seconds can successfully parse +// inputs of varied length. +// - the set of date and timestamp formats are not disjoint, which means that ambiguity +// can not be eliminated when inferring the type of string literals that use these +// shared formats. +// // While these limitations still permitted improved type inference in many cases, they // resulted in behavior that was ultimately incomplete, resulted in unpredictable levels // of inference, and occasionally failed to eliminate ambiguity. Further heuristics could diff --git a/pkg/sql/sem/tree/create.go b/pkg/sql/sem/tree/create.go index 43002d2db642..42779cefd8fb 100644 --- a/pkg/sql/sem/tree/create.go +++ b/pkg/sql/sem/tree/create.go @@ -1581,19 +1581,19 @@ func (node *CreateTable) FormatBody(ctx *FmtCtx) { // inline with their columns and makes them table-level constraints, stored in // n.Defs. For example, the foreign key constraint in // -// CREATE TABLE foo (a INT REFERENCES bar(a)) +// CREATE TABLE foo (a INT REFERENCES bar(a)) // // gets pulled into a top-level constraint like: // -// CREATE TABLE foo (a INT, FOREIGN KEY (a) REFERENCES bar(a)) +// CREATE TABLE foo (a INT, FOREIGN KEY (a) REFERENCES bar(a)) // // Similarly, the CHECK constraint in // -// CREATE TABLE foo (a INT CHECK (a < 1), b INT) +// CREATE TABLE foo (a INT CHECK (a < 1), b INT) // // gets pulled into a top-level constraint like: // -// CREATE TABLE foo (a INT, b INT, CHECK (a < 1)) +// CREATE TABLE foo (a INT, b INT, CHECK (a < 1)) // // Note that some SQL databases require that a constraint attached to a column // to refer only to the column it is attached to. We follow Postgres' behavior, @@ -1601,10 +1601,9 @@ func (node *CreateTable) FormatBody(ctx *FmtCtx) { // constraints. For example, the following table definition is accepted in // CockroachDB and Postgres, but not necessarily other SQL databases: // -// CREATE TABLE foo (a INT CHECK (a < b), b INT) +// CREATE TABLE foo (a INT CHECK (a < b), b INT) // // Unique constraints are not hoisted. -// func (node *CreateTable) HoistConstraints() { for _, d := range node.Defs { if col, ok := d.(*ColumnTableDef); ok { diff --git a/pkg/sql/sem/tree/datum.go b/pkg/sql/sem/tree/datum.go index eb7bd7222528..2cb20e605141 100644 --- a/pkg/sql/sem/tree/datum.go +++ b/pkg/sql/sem/tree/datum.go @@ -4209,9 +4209,10 @@ func (d *DTuple) Size() uintptr { // ContainsNull returns true if the tuple contains NULL, possibly nested inside // other tuples. For example, all the following tuples contain NULL: -// (1, 2, NULL) -// ((1, 1), (2, NULL)) -// (((1, 1), (2, 2)), ((3, 3), (4, NULL))) +// +// (1, 2, NULL) +// ((1, 1), (2, NULL)) +// (((1, 1), (2, 2)), ((3, 3), (4, NULL))) func (d *DTuple) ContainsNull() bool { for _, r := range d.D { if r == DNull { @@ -5126,14 +5127,13 @@ func (d *DOid) Min(ctx CompareContext) (Datum, bool) { // // Instead, DOidWrapper allows a standard Datum to be wrapped with a new Oid. // This approach provides two major advantages: -// - performance of the existing Datum types are not affected because they -// do not need to have custom oid.Oids added to their structure. -// - the introduction of new Datum aliases is straightforward and does not require -// additions to typing rules or type-dependent evaluation behavior. +// - performance of the existing Datum types are not affected because they +// do not need to have custom oid.Oids added to their structure. +// - the introduction of new Datum aliases is straightforward and does not require +// additions to typing rules or type-dependent evaluation behavior. // // Types that currently benefit from DOidWrapper are: // - DName => DOidWrapper(*DString, oid.T_name) -// type DOidWrapper struct { Wrapped Datum Oid oid.Oid @@ -5514,8 +5514,9 @@ var baseDatumTypeSizes = map[types.Family]struct { // MaxDistinctCount returns the maximum number of distinct values between the // given datums (inclusive). This is possible if: -// a. the types of the datums are equivalent and countable, or -// b. the datums have the same value (in which case the distinct count is 1). +// +// a. the types of the datums are equivalent and countable, or +// b. the datums have the same value (in which case the distinct count is 1). // // If neither of these conditions hold, MaxDistinctCount returns ok=false. // Additionally, it must be the case that first <= last, otherwise diff --git a/pkg/sql/sem/tree/evalgen/eval_gen.go b/pkg/sql/sem/tree/evalgen/eval_gen.go index 748f59a856fc..77f278379765 100644 --- a/pkg/sql/sem/tree/evalgen/eval_gen.go +++ b/pkg/sql/sem/tree/evalgen/eval_gen.go @@ -13,9 +13,8 @@ // // Generated files can be regenerated with either of the follow commands: // -// ./dev generate go -// go generate ./pkg/sql/sem/tree -// +// ./dev generate go +// go generate ./pkg/sql/sem/tree package main import ( diff --git a/pkg/sql/sem/tree/expr.go b/pkg/sql/sem/tree/expr.go index 9ca2d5716723..edf8a2c96354 100644 --- a/pkg/sql/sem/tree/expr.go +++ b/pkg/sql/sem/tree/expr.go @@ -346,8 +346,10 @@ func (node *ParenExpr) TypedInnerExpr() TypedExpr { // StripParens strips any parentheses surrounding an expression and // returns the inner expression. For instance: -// 1 -> 1 -// (1) -> 1 +// +// 1 -> 1 +// (1) -> 1 +// // ((1)) -> 1 func StripParens(expr Expr) Expr { if p, ok := expr.(*ParenExpr); ok { diff --git a/pkg/sql/sem/tree/hide_constants.go b/pkg/sql/sem/tree/hide_constants.go index e1e0a09c1f4b..88c43d9ebfc5 100644 --- a/pkg/sql/sem/tree/hide_constants.go +++ b/pkg/sql/sem/tree/hide_constants.go @@ -93,12 +93,13 @@ func (node *Exprs) formatHideConstants(ctx *FmtCtx) { // placeholders and longer than 1 element as a tuple of its first // two elements, scrubbed. // e.g. (1) -> (_) -// (1, 2) -> (_, _) -// (1, 2, 3) -> (_, _, __more1_10__) -// ROW() -> ROW() -// ROW($1, $2, $3) -> ROW($1, $2, __more1_10__) -// (1+2, 2+3, 3+4) -> (_ + _, _ + _, _ + _) -// (1+2, b, c) -> (_ + _, b, c) +// +// (1, 2) -> (_, _) +// (1, 2, 3) -> (_, _, __more1_10__) +// ROW() -> ROW() +// ROW($1, $2, $3) -> ROW($1, $2, __more1_10__) +// (1+2, 2+3, 3+4) -> (_ + _, _ + _, _ + _) +// (1+2, b, c) -> (_ + _, b, c) func (node *Tuple) formatHideConstants(ctx *FmtCtx) { if len(node.Exprs) < 2 { node.Format(ctx) @@ -135,9 +136,10 @@ func (node *Tuple) formatHideConstants(ctx *FmtCtx) { // literals or placeholders and longer than 1 element as an array // expression of its first two elements, scrubbed. // e.g. array[1] -> array[_] -// array[1, 2] -> array[_, _] -// array[1, 2, 3] -> array[_, _, __more1_10__] -// array[1+2, 2+3, 3+4] -> array[_ + _, _ + _, _ + _] +// +// array[1, 2] -> array[_, _] +// array[1, 2, 3] -> array[_, _, __more1_10__] +// array[1+2, 2+3, 3+4] -> array[_ + _, _ + _, _ + _] func (node *Array) formatHideConstants(ctx *FmtCtx) { if len(node.Exprs) < 2 { node.Format(ctx) diff --git a/pkg/sql/sem/tree/interval.go b/pkg/sql/sem/tree/interval.go index 4831930137aa..e5a2cabdf827 100644 --- a/pkg/sql/sem/tree/interval.go +++ b/pkg/sql/sem/tree/interval.go @@ -183,8 +183,8 @@ func newInvalidSQLDurationError(s string) error { // Parses a SQL standard interval string. // See the following links for examples: -// - http://www.postgresql.org/docs/9.1/static/datatype-datetime.html#DATATYPE-INTERVAL-INPUT-EXAMPLES -// - http://www.ibm.com/support/knowledgecenter/SSGU8G_12.1.0/com.ibm.esqlc.doc/ids_esqlc_0190.htm +// - http://www.postgresql.org/docs/9.1/static/datatype-datetime.html#DATATYPE-INTERVAL-INPUT-EXAMPLES +// - http://www.ibm.com/support/knowledgecenter/SSGU8G_12.1.0/com.ibm.esqlc.doc/ids_esqlc_0190.htm func sqlStdToDuration(s string, itm types.IntervalTypeMetadata) (duration.Duration, error) { var d duration.Duration parts := strings.Fields(s) @@ -409,9 +409,9 @@ func sqlStdToDuration(s string, itm types.IntervalTypeMetadata) (duration.Durati // Parses an ISO8601 (with designators) string. // See the following links for examples: -// - http://www.postgresql.org/docs/9.1/static/datatype-datetime.html#DATATYPE-INTERVAL-INPUT-EXAMPLES -// - https://en.wikipedia.org/wiki/ISO_8601#Time_intervals -// - https://en.wikipedia.org/wiki/ISO_8601#Durations +// - http://www.postgresql.org/docs/9.1/static/datatype-datetime.html#DATATYPE-INTERVAL-INPUT-EXAMPLES +// - https://en.wikipedia.org/wiki/ISO_8601#Time_intervals +// - https://en.wikipedia.org/wiki/ISO_8601#Durations func iso8601ToDuration(s string) (duration.Duration, error) { var d duration.Duration if len(s) == 0 || s[0] != 'P' { diff --git a/pkg/sql/sem/tree/operators_test.go b/pkg/sql/sem/tree/operators_test.go index 43ab1a2cadef..6abca2cf137d 100644 --- a/pkg/sql/sem/tree/operators_test.go +++ b/pkg/sql/sem/tree/operators_test.go @@ -33,9 +33,11 @@ import ( // // Dump command below: // COPY ( -// SELECT o.oprname, o.oprleft, o.oprright, o.oprresult, p.provolatile, p.proleakproof -// FROM pg_operator AS o JOIN pg_proc AS p ON (o.oprcode = p.oid) -// ORDER BY o.oprname, o.oprleft, o.oprright, o.oprresult +// +// SELECT o.oprname, o.oprleft, o.oprright, o.oprresult, p.provolatile, p.proleakproof +// FROM pg_operator AS o JOIN pg_proc AS p ON (o.oprcode = p.oid) +// ORDER BY o.oprname, o.oprleft, o.oprright, o.oprresult +// // ) TO STDOUT WITH CSV DELIMITER '|' HEADER; func TestOperatorVolatilityMatchesPostgres(t *testing.T) { defer leaktest.AfterTest(t)() diff --git a/pkg/sql/sem/tree/overload.go b/pkg/sql/sem/tree/overload.go index 2b94b7e00a34..dd897addce8b 100644 --- a/pkg/sql/sem/tree/overload.go +++ b/pkg/sql/sem/tree/overload.go @@ -574,9 +574,11 @@ type typeCheckOverloadState struct { // expression parameters, along with an optional desired return type. It returns the expression // parameters after being type checked, along with a slice of candidate overloadImpls. The // slice may have length: -// 0: overload resolution failed because no compatible overloads were found -// 1: overload resolution succeeded -// 2+: overload resolution failed because of ambiguity +// +// 0: overload resolution failed because no compatible overloads were found +// 1: overload resolution succeeded +// 2+: overload resolution failed because of ambiguity +// // The inBinOp parameter denotes whether this type check is occurring within a binary operator, // in which case we may need to make a guess that the two parameters are of the same type if one // of them is NULL. diff --git a/pkg/sql/sem/tree/pretty.go b/pkg/sql/sem/tree/pretty.go index 412c0a0f1253..b3c36327167c 100644 --- a/pkg/sql/sem/tree/pretty.go +++ b/pkg/sql/sem/tree/pretty.go @@ -414,25 +414,24 @@ func (node *Exprs) doc(p *PrettyCfg) pretty.Doc { // peelBinaryOperand conditionally (p.Simplify) removes the // parentheses around an expression. The parentheses are always // removed in the following conditions: -// - if the operand is a unary operator (these are always -// of higher precedence): "(-a) * b" -> "-a * b" -// - if the operand is a binary operator and its precedence -// is guaranteed to be higher: "(a * b) + c" -> "a * b + c" +// - if the operand is a unary operator (these are always +// of higher precedence): "(-a) * b" -> "-a * b" +// - if the operand is a binary operator and its precedence +// is guaranteed to be higher: "(a * b) + c" -> "a * b + c" // // Additionally, iff sameLevel is set, then parentheses are removed // around any binary operator that has the same precedence level as // the parent. // sameLevel can be set: // -// - for the left operand of all binary expressions, because -// (in pg SQL) all binary expressions are left-associative. -// This rewrites e.g. "(a + b) - c" -> "a + b - c" -// and "(a - b) + c" -> "a - b + c" -// - for the right operand when the parent operator is known -// to be fully associative, e.g. -// "a + (b - c)" -> "a + b - c" because "+" is fully assoc, -// but "a - (b + c)" cannot be simplified because "-" is not fully associative. -// +// - for the left operand of all binary expressions, because +// (in pg SQL) all binary expressions are left-associative. +// This rewrites e.g. "(a + b) - c" -> "a + b - c" +// and "(a - b) + c" -> "a - b + c" +// - for the right operand when the parent operator is known +// to be fully associative, e.g. +// "a + (b - c)" -> "a + b - c" because "+" is fully assoc, +// but "a - (b + c)" cannot be simplified because "-" is not fully associative. func (p *PrettyCfg) peelBinaryOperand(e Expr, sameLevel bool, parenPrio int) Expr { if !p.Simplify { return e diff --git a/pkg/sql/sem/tree/select.go b/pkg/sql/sem/tree/select.go index 3743b8423ec3..e3550cdd0792 100644 --- a/pkg/sql/sem/tree/select.go +++ b/pkg/sql/sem/tree/select.go @@ -308,14 +308,15 @@ type IndexID = catid.IndexID // IndexFlags represents "@" or "@{param[,param]}" where // param is one of: -// - FORCE_INDEX= -// - ASC / DESC -// - NO_INDEX_JOIN -// - NO_ZIGZAG_JOIN -// - NO_FULL_SCAN -// - IGNORE_FOREIGN_KEYS -// - FORCE_ZIGZAG -// - FORCE_ZIGZAG=* +// - FORCE_INDEX= +// - ASC / DESC +// - NO_INDEX_JOIN +// - NO_ZIGZAG_JOIN +// - NO_FULL_SCAN +// - IGNORE_FOREIGN_KEYS +// - FORCE_ZIGZAG +// - FORCE_ZIGZAG=* +// // It is used optionally after a table name in SELECT statements. type IndexFlags struct { Index UnrestrictedName @@ -423,8 +424,8 @@ func (ih *IndexFlags) CombineWith(other *IndexFlags) error { } // Check verifies if the flags are valid: -// - ascending/descending is not specified without an index; -// - no_index_join isn't specified with an index. +// - ascending/descending is not specified without an index; +// - no_index_join isn't specified with an index. func (ih *IndexFlags) Check() error { if ih.NoIndexJoin && ih.ForceIndex() { return errors.New("FORCE_INDEX cannot be specified in conjunction with NO_INDEX_JOIN") diff --git a/pkg/sql/sem/tree/table_name.go b/pkg/sql/sem/tree/table_name.go index d6b017a4ff98..bbcc40af19c7 100644 --- a/pkg/sql/sem/tree/table_name.go +++ b/pkg/sql/sem/tree/table_name.go @@ -143,18 +143,18 @@ func (ts *TableNames) String() string { return AsString(ts) } // TableIndexName refers to a table index. There are a few cases: // -// - if both the table name and the index name are set, refers to a specific -// index in a specific table. +// - if both the table name and the index name are set, refers to a specific +// index in a specific table. // -// - if the table name is set and index name is empty, refers to the primary -// index of that table. +// - if the table name is set and index name is empty, refers to the primary +// index of that table. // -// - if the table name is empty and the index name is set, refers to an index -// of that name among all tables within a catalog/schema; if there is a -// duplicate name, that will result in an error. Note that it is possible to -// specify the schema or catalog without specifying a table name; in this -// case, Table.ObjectNamePrefix has the fields set but Table.ObjectName is -// empty. +// - if the table name is empty and the index name is set, refers to an index +// of that name among all tables within a catalog/schema; if there is a +// duplicate name, that will result in an error. Note that it is possible to +// specify the schema or catalog without specifying a table name; in this +// case, Table.ObjectNamePrefix has the fields set but Table.ObjectName is +// empty. type TableIndexName struct { Table TableName Index UnrestrictedName diff --git a/pkg/sql/sem/tree/type_check.go b/pkg/sql/sem/tree/type_check.go index 1f410004e958..f76ff7416820 100644 --- a/pkg/sql/sem/tree/type_check.go +++ b/pkg/sql/sem/tree/type_check.go @@ -2861,17 +2861,17 @@ func (*placeholderAnnotationVisitor) VisitPost(expr Expr) Expr { return expr } // provided Statement, annotating all placeholders with a type in either of the following // situations: // -// - the placeholder is the subject of an explicit type annotation in at least one -// of its occurrences. If it is subject to multiple explicit type annotations -// where the types are not all in agreement, or if the placeholder already has -// a type hint in the placeholder map which conflicts with the explicit type -// annotation type, an error will be thrown. +// - the placeholder is the subject of an explicit type annotation in at least one +// of its occurrences. If it is subject to multiple explicit type annotations +// where the types are not all in agreement, or if the placeholder already has +// a type hint in the placeholder map which conflicts with the explicit type +// annotation type, an error will be thrown. // -// - the placeholder is the subject to a cast of the same type in all -// occurrences of the placeholder. If the placeholder is subject to casts of -// multiple types, or if it has occurrences without a cast, no error will be -// thrown but the type will not be inferred. If the placeholder already has a -// type hint, that type will be kept regardless of any casts. +// - the placeholder is the subject to a cast of the same type in all +// occurrences of the placeholder. If the placeholder is subject to casts of +// multiple types, or if it has occurrences without a cast, no error will be +// thrown but the type will not be inferred. If the placeholder already has a +// type hint, that type will be kept regardless of any casts. // // See docs/RFCS/20160203_typing.md for more details on placeholder typing (in // particular section "First pass: placeholder annotations"). diff --git a/pkg/sql/sem/tree/typing_test.go b/pkg/sql/sem/tree/typing_test.go index f785fac92c1a..049a92771634 100644 --- a/pkg/sql/sem/tree/typing_test.go +++ b/pkg/sql/sem/tree/typing_test.go @@ -20,10 +20,10 @@ import ( // TestTypingBinaryAssumptions ensures that binary overloads conform to certain // assumptions we're making in the type inference code: -// 1. The return type can be inferred from the operator type and the data -// types of its operands. -// 2. When of the operands is null, and if CalledOnNullInput is true, then the -// return type can be inferred from just the non-null operand. +// 1. The return type can be inferred from the operator type and the data +// types of its operands. +// 2. When of the operands is null, and if CalledOnNullInput is true, then the +// return type can be inferred from just the non-null operand. func TestTypingBinaryAssumptions(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) diff --git a/pkg/sql/sem/tree/var_name.go b/pkg/sql/sem/tree/var_name.go index 735a57fc90ac..5bcb227d4f7f 100644 --- a/pkg/sql/sem/tree/var_name.go +++ b/pkg/sql/sem/tree/var_name.go @@ -19,9 +19,9 @@ import ( // // Immediately after parsing, the following types can occur: // -// - UnqualifiedStar: a naked star as argument to a function, e.g. count(*), -// or at the top level of a SELECT clause. -// See also uses of StarExpr() and StarSelectExpr() in the grammar. +// - UnqualifiedStar: a naked star as argument to a function, e.g. count(*), +// or at the top level of a SELECT clause. +// See also uses of StarExpr() and StarSelectExpr() in the grammar. // // - UnresolvedName: other names of the form `a.b....e` or `a.b...e.*`. // diff --git a/pkg/sql/sequence.go b/pkg/sql/sequence.go index 0679ba61332c..915ebfe8685f 100644 --- a/pkg/sql/sequence.go +++ b/pkg/sql/sequence.go @@ -792,6 +792,7 @@ func (p *planner) dropSequencesOwnedByCol( // - removes the reference from the column descriptor to the sequence descriptor. // - removes the reference from the sequence descriptor to the column descriptor. // - writes the sequence descriptor and notifies a schema change. +// // The column descriptor is mutated but not saved to persistent storage; the caller must save it. func (p *planner) removeSequenceDependencies( ctx context.Context, tableDesc *tabledesc.Mutable, col catalog.Column, diff --git a/pkg/sql/set_cluster_setting.go b/pkg/sql/set_cluster_setting.go index 17555e451473..88e8fb032d27 100644 --- a/pkg/sql/set_cluster_setting.go +++ b/pkg/sql/set_cluster_setting.go @@ -628,7 +628,8 @@ func (n *setClusterSettingNode) Close(_ context.Context) {} // // Args: // prev: Only specified if the setting is a StateMachineSetting. Represents the -// current value of the setting, read from the system.settings table. +// +// current value of the setting, read from the system.settings table. func toSettingString( ctx context.Context, st *cluster.Settings, name string, s settings.Setting, d tree.Datum, ) (string, error) { diff --git a/pkg/sql/set_var.go b/pkg/sql/set_var.go index 8a1de41ca01b..e8324b254e25 100644 --- a/pkg/sql/set_var.go +++ b/pkg/sql/set_var.go @@ -47,7 +47,8 @@ type resetAllNode struct{} // SetVar sets session variables. // Privileges: None. -// Notes: postgres/mysql do not require privileges for session variables (some exceptions). +// +// Notes: postgres/mysql do not require privileges for session variables (some exceptions). func (p *planner) SetVar(ctx context.Context, n *tree.SetVar) (planNode, error) { if n.ResetAll { return &resetAllNode{}, nil diff --git a/pkg/sql/show_fingerprints.go b/pkg/sql/show_fingerprints.go index 888eb4f29faf..8f284fb3c3e0 100644 --- a/pkg/sql/show_fingerprints.go +++ b/pkg/sql/show_fingerprints.go @@ -48,7 +48,8 @@ type showFingerprintsNode struct { // // To extract the fingerprints at some point in the past, the following // query can be used: -// SELECT * FROM [SHOW EXPERIMENTAL_FINGERPRINTS FROM TABLE foo] AS OF SYSTEM TIME xxx +// +// SELECT * FROM [SHOW EXPERIMENTAL_FINGERPRINTS FROM TABLE foo] AS OF SYSTEM TIME xxx func (p *planner) ShowFingerprints( ctx context.Context, n *tree.ShowFingerprints, ) (planNode, error) { diff --git a/pkg/sql/show_trace.go b/pkg/sql/show_trace.go index 3384a1f05b74..eedab704d8b2 100644 --- a/pkg/sql/show_trace.go +++ b/pkg/sql/show_trace.go @@ -60,8 +60,9 @@ func (p *planner) ShowTrace(ctx context.Context, n *tree.ShowTraceForSession) (p // // Args: // kvTracingEnabled: If set, the trace will also include "KV trace" messages - -// verbose messages around the interaction of SQL with KV. Some of the -// messages are per-row. +// +// verbose messages around the interaction of SQL with KV. Some of the +// messages are per-row. func (p *planner) makeShowTraceNode(compact bool, kvTracingEnabled bool) *showTraceNode { n := &showTraceNode{ kvTracingEnabled: kvTracingEnabled, diff --git a/pkg/sql/span/span_builder.go b/pkg/sql/span/span_builder.go index b03918922118..2887e21cd373 100644 --- a/pkg/sql/span/span_builder.go +++ b/pkg/sql/span/span_builder.go @@ -214,7 +214,8 @@ func (s *Builder) SpanToPointSpan(span roachpb.Span, family descpb.FamilyID) roa // A span.Splitter can be used to generate more specific family spans. // // TODO (rohany): In future work, there should be a single API to generate spans -// from constraints, datums and encdatums. +// +// from constraints, datums and encdatums. func (s *Builder) SpansFromConstraint( c *constraint.Constraint, splitter Splitter, ) (roachpb.Spans, error) { diff --git a/pkg/sql/sqlstats/cluster_settings.go b/pkg/sql/sqlstats/cluster_settings.go index cd5920561fa7..74e9d2bb6be3 100644 --- a/pkg/sql/sqlstats/cluster_settings.go +++ b/pkg/sql/sqlstats/cluster_settings.go @@ -133,12 +133,12 @@ var MaxMemReportedSQLStatsTxnFingerprints = settings.RegisterIntSetting( // This results in 4 statement fingerprints and 1 txn fingerprint. // Let's suppose currently our statement fingerprint limit is 6. // If we are to execute the same statement again: -// * BEGIN; <- this increments current statement fingerprint count to 5 -// since we hold statement stats for explicit transaction in a -// temporary container before we can perform the upsert. -// * SELECT 1; <- this increments the count to 6 -// * SELECT 1, 1; <- ERR: this causes the count to exceed our stmt fingerprint -// limit before we can perform the upsert. +// - BEGIN; <- this increments current statement fingerprint count to 5 +// since we hold statement stats for explicit transaction in a +// temporary container before we can perform the upsert. +// - SELECT 1; <- this increments the count to 6 +// - SELECT 1, 1; <- ERR: this causes the count to exceed our stmt fingerprint +// limit before we can perform the upsert. // // The total amount of memory consumed will still be constrained by the // top-level memory monitor created for SQL Stats. diff --git a/pkg/sql/sqlstats/insights/registry.go b/pkg/sql/sqlstats/insights/registry.go index 97d76a96e3d9..f4cdf6235779 100644 --- a/pkg/sql/sqlstats/insights/registry.go +++ b/pkg/sql/sqlstats/insights/registry.go @@ -102,10 +102,11 @@ func (r *lockingRegistry) IterateInsights( } // TODO(todd): -// Once we can handle sufficient throughput to live on the hot -// execution path in #81021, we can probably get rid of this external -// concept of "enabled" and let the detectors just decide for themselves -// internally. +// +// Once we can handle sufficient throughput to live on the hot +// execution path in #81021, we can probably get rid of this external +// concept of "enabled" and let the detectors just decide for themselves +// internally. func (r *lockingRegistry) enabled() bool { return r.detector.enabled() } diff --git a/pkg/sql/sqlstats/persistedsqlstats/cluster_settings.go b/pkg/sql/sqlstats/persistedsqlstats/cluster_settings.go index 084163531d15..cd1a53c85705 100644 --- a/pkg/sql/sqlstats/persistedsqlstats/cluster_settings.go +++ b/pkg/sql/sqlstats/persistedsqlstats/cluster_settings.go @@ -68,7 +68,8 @@ var SQLStatsFlushEnabled = settings.RegisterBoolSetting( // attempts to flush SQL Stats. // // [(1 - SQLStatsFlushJitter) * SQLStatsFlushInterval), -// (1 + SQLStatsFlushJitter) * SQLStatsFlushInterval)] +// +// (1 + SQLStatsFlushJitter) * SQLStatsFlushInterval)] var SQLStatsFlushJitter = settings.RegisterFloatSetting( settings.TenantWritable, "sql.stats.flush.jitter", diff --git a/pkg/sql/sqlstats/persistedsqlstats/compaction_exec.go b/pkg/sql/sqlstats/persistedsqlstats/compaction_exec.go index 01ef3008c9fc..65e26cc8b00b 100644 --- a/pkg/sql/sqlstats/persistedsqlstats/compaction_exec.go +++ b/pkg/sql/sqlstats/persistedsqlstats/compaction_exec.go @@ -273,7 +273,8 @@ type cleanupOperations struct { } // N.B. when changing the constraint queries below, make sure also change -// the test file in pkg/sql/opt/exec/execbuilder/testdata/sql_activity_stats_compaction. +// +// the test file in pkg/sql/opt/exec/execbuilder/testdata/sql_activity_stats_compaction. var ( stmtStatsCleanupOps = &cleanupOperations{ initialScanStmtTemplate: ` diff --git a/pkg/sql/sqlstats/persistedsqlstats/datadriven_test.go b/pkg/sql/sqlstats/persistedsqlstats/datadriven_test.go index ed8d83fa8007..88e8085100b6 100644 --- a/pkg/sql/sqlstats/persistedsqlstats/datadriven_test.go +++ b/pkg/sql/sqlstats/persistedsqlstats/datadriven_test.go @@ -47,23 +47,23 @@ const ( // TestSQLStatsDataDriven runs the data-driven tests in // pkg/sql/sqlstats/persistedsqlstats/testdata. It has the following directives: -// * exec-sql: executes SQL statements in the exec connection. This should be -// executed under a specific app_name in order to get deterministic -// results when testing for stmt/txn statistics. No output will be -// returned for SQL statements executed under this directive. -// * observe-sql: executes SQL statements in the observer connection. This -// should be executed under a different app_name. This is used -// to test the statement/transaction statistics executed under -// exec-sql. Running them in different connection ensures that -// observing statements will not mess up the statistics of the -// statements that they are observing. -// * sql-stats-flush: this triggers the SQL Statistics to be flushed into -// system table. -// * set-time: this changes the clock time perceived by SQL Stats subsystem. -// This is useful when unit tests need to manipulate times. -// * should-sample-logical-plan: this checks if the given tuple of -// (db, implicitTxn, fingerprint) will be sampled -// next time it is being executed. +// - exec-sql: executes SQL statements in the exec connection. This should be +// executed under a specific app_name in order to get deterministic +// results when testing for stmt/txn statistics. No output will be +// returned for SQL statements executed under this directive. +// - observe-sql: executes SQL statements in the observer connection. This +// should be executed under a different app_name. This is used +// to test the statement/transaction statistics executed under +// exec-sql. Running them in different connection ensures that +// observing statements will not mess up the statistics of the +// statements that they are observing. +// - sql-stats-flush: this triggers the SQL Statistics to be flushed into +// system table. +// - set-time: this changes the clock time perceived by SQL Stats subsystem. +// This is useful when unit tests need to manipulate times. +// - should-sample-logical-plan: this checks if the given tuple of +// (db, implicitTxn, fingerprint) will be sampled +// next time it is being executed. func TestSQLStatsDataDriven(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) diff --git a/pkg/sql/sqlstats/persistedsqlstats/provider.go b/pkg/sql/sqlstats/persistedsqlstats/provider.go index 1f3b93913d97..eaaa3a665ceb 100644 --- a/pkg/sql/sqlstats/persistedsqlstats/provider.go +++ b/pkg/sql/sqlstats/persistedsqlstats/provider.go @@ -164,7 +164,8 @@ func (s *PersistedSQLStats) GetNextFlushAt() time.Time { // nextFlushInterval calculates the wait interval that is between: // [(1 - SQLStatsFlushJitter) * SQLStatsFlushInterval), -// (1 + SQLStatsFlushJitter) * SQLStatsFlushInterval)] +// +// (1 + SQLStatsFlushJitter) * SQLStatsFlushInterval)] func (s *PersistedSQLStats) nextFlushInterval() time.Duration { baseInterval := SQLStatsFlushInterval.Get(&s.cfg.Settings.SV) waitInterval := s.jitterInterval(baseInterval) diff --git a/pkg/sql/sqlstats/persistedsqlstats/sqlstatsutil/json_encoding.go b/pkg/sql/sqlstats/persistedsqlstats/sqlstatsutil/json_encoding.go index 6e39a812ee05..ecb549889717 100644 --- a/pkg/sql/sqlstats/persistedsqlstats/sqlstatsutil/json_encoding.go +++ b/pkg/sql/sqlstats/persistedsqlstats/sqlstatsutil/json_encoding.go @@ -45,21 +45,22 @@ func ExplainTreePlanNodeToJSON(node *roachpb.ExplainTreePlanNode) json.JSON { // BuildStmtMetadataJSON returns a json.JSON object for the metadata section of // the roachpb.CollectedStatementStatistics. // JSON Schema for statement metadata: -// { -// "$schema": "https://json-schema.org/draft/2020-12/schema", -// "title": "system.statement_statistics.metadata", -// "type": "object", -// "properties": { -// "stmtTyp": { "type": "string" }, -// "query": { "type": "string" }, -// "db": { "type": "string" }, -// "distsql": { "type": "boolean" }, -// "failed": { "type": "boolean" }, -// "implicitTxn": { "type": "boolean" }, -// "vec": { "type": "boolean" }, -// "fullScan": { "type": "boolean" }, -// } -// } +// +// { +// "$schema": "https://json-schema.org/draft/2020-12/schema", +// "title": "system.statement_statistics.metadata", +// "type": "object", +// "properties": { +// "stmtTyp": { "type": "string" }, +// "query": { "type": "string" }, +// "db": { "type": "string" }, +// "distsql": { "type": "boolean" }, +// "failed": { "type": "boolean" }, +// "implicitTxn": { "type": "boolean" }, +// "vec": { "type": "boolean" }, +// "fullScan": { "type": "boolean" }, +// } +// } func BuildStmtMetadataJSON(statistics *roachpb.CollectedStatementStatistics) (json.JSON, error) { return (*stmtStatsMetadata)(statistics).jsonFields().encodeJSON() } @@ -68,85 +69,86 @@ func BuildStmtMetadataJSON(statistics *roachpb.CollectedStatementStatistics) (js // roachpb.CollectedStatementStatistics into a json.JSON object. // // JSON Schema for stats portion: -// { -// "$schema": "https://json-schema.org/draft/2020-12/schema", -// "title": "system.statement_statistics.statistics", -// "type": "object", // -// "definitions": { -// "numeric_stats": { -// "type": "object", -// "properties": { -// "mean": { "type": "number" }, -// "sqDiff": { "type": "number" } -// }, -// "required": ["mean", "sqDiff"] -// }, -// "node_ids": { -// "type": "array", -// "items": { -// "type": "int", -// }, -// }, -// "statistics": { -// "type": "object", -// "properties": { -// "firstAttemptCnt": { "type": "number" }, -// "maxRetries": { "type": "number" }, -// "numRows": { "$ref": "#/definitions/numeric_stats" }, -// "parseLat": { "$ref": "#/definitions/numeric_stats" }, -// "planLat": { "$ref": "#/definitions/numeric_stats" }, -// "runLat": { "$ref": "#/definitions/numeric_stats" }, -// "svcLat": { "$ref": "#/definitions/numeric_stats" }, -// "ovhLat": { "$ref": "#/definitions/numeric_stats" }, -// "bytesRead": { "$ref": "#/definitions/numeric_stats" }, -// "rowsRead": { "$ref": "#/definitions/numeric_stats" } -// "firstExecAt": { "type": "string" }, -// "lastExecAt": { "type": "string" }, -// "nodes": { "type": "node_ids" }, -// }, -// "required": [ -// "firstAttemptCnt", -// "maxRetries", -// "numRows", -// "parseLat", -// "planLat", -// "runLat", -// "svcLat", -// "ovhLat", -// "bytesRead", -// "rowsRead", -// "nodes" -// ] -// }, -// "execution_statistics": { -// "type": "object", -// "properties": { -// "cnt": { "type": "number" }, -// "networkBytes": { "$ref": "#/definitions/numeric_stats" }, -// "maxMemUsage": { "$ref": "#/definitions/numeric_stats" }, -// "contentionTime": { "$ref": "#/definitions/numeric_stats" }, -// "networkMsgs": { "$ref": "#/definitions/numeric_stats" }, -// "maxDiskUsage": { "$ref": "#/definitions/numeric_stats" }, -// }, -// "required": [ -// "cnt", -// "networkBytes", -// "maxMemUsage", -// "contentionTime", -// "networkMsgs", -// "maxDiskUsage", -// ] -// } -// }, +// { +// "$schema": "https://json-schema.org/draft/2020-12/schema", +// "title": "system.statement_statistics.statistics", +// "type": "object", +// +// "definitions": { +// "numeric_stats": { +// "type": "object", +// "properties": { +// "mean": { "type": "number" }, +// "sqDiff": { "type": "number" } +// }, +// "required": ["mean", "sqDiff"] +// }, +// "node_ids": { +// "type": "array", +// "items": { +// "type": "int", +// }, +// }, +// "statistics": { +// "type": "object", +// "properties": { +// "firstAttemptCnt": { "type": "number" }, +// "maxRetries": { "type": "number" }, +// "numRows": { "$ref": "#/definitions/numeric_stats" }, +// "parseLat": { "$ref": "#/definitions/numeric_stats" }, +// "planLat": { "$ref": "#/definitions/numeric_stats" }, +// "runLat": { "$ref": "#/definitions/numeric_stats" }, +// "svcLat": { "$ref": "#/definitions/numeric_stats" }, +// "ovhLat": { "$ref": "#/definitions/numeric_stats" }, +// "bytesRead": { "$ref": "#/definitions/numeric_stats" }, +// "rowsRead": { "$ref": "#/definitions/numeric_stats" } +// "firstExecAt": { "type": "string" }, +// "lastExecAt": { "type": "string" }, +// "nodes": { "type": "node_ids" }, +// }, +// "required": [ +// "firstAttemptCnt", +// "maxRetries", +// "numRows", +// "parseLat", +// "planLat", +// "runLat", +// "svcLat", +// "ovhLat", +// "bytesRead", +// "rowsRead", +// "nodes" +// ] +// }, +// "execution_statistics": { +// "type": "object", +// "properties": { +// "cnt": { "type": "number" }, +// "networkBytes": { "$ref": "#/definitions/numeric_stats" }, +// "maxMemUsage": { "$ref": "#/definitions/numeric_stats" }, +// "contentionTime": { "$ref": "#/definitions/numeric_stats" }, +// "networkMsgs": { "$ref": "#/definitions/numeric_stats" }, +// "maxDiskUsage": { "$ref": "#/definitions/numeric_stats" }, +// }, +// "required": [ +// "cnt", +// "networkBytes", +// "maxMemUsage", +// "contentionTime", +// "networkMsgs", +// "maxDiskUsage", +// ] +// } +// }, // -// "properties": { -// "stats": { "$ref": "#/definitions/statistics" }, -// "execStats": { -// "$ref": "#/definitions/execution_statistics" -// } -// } -// } +// "properties": { +// "stats": { "$ref": "#/definitions/statistics" }, +// "execStats": { +// "$ref": "#/definitions/execution_statistics" +// } +// } +// } func BuildStmtStatisticsJSON(statistics *roachpb.StatementStatistics) (json.JSON, error) { return (*stmtStats)(statistics).encodeJSON() } @@ -155,21 +157,23 @@ func BuildStmtStatisticsJSON(statistics *roachpb.StatementStatistics) (json.JSON // roachpb.CollectedTransactionStatistics into a json.JSON object. // // JSON Schema: -// { -// "$schema": "https://json-schema.org/draft/2020-12/schema", -// "title": "system.transaction_statistics.metadata", -// "type": "object", -// "properties": { -// "stmtFingerprintIDs": { -// "type": "array", -// "items": { -// "type": "string" -// } -// }, -// "firstExecAt": { "type": "string" }, -// "lastExecAt": { "type": "string" } -// } -// } +// +// { +// "$schema": "https://json-schema.org/draft/2020-12/schema", +// "title": "system.transaction_statistics.metadata", +// "type": "object", +// "properties": { +// "stmtFingerprintIDs": { +// "type": "array", +// "items": { +// "type": "string" +// } +// }, +// "firstExecAt": { "type": "string" }, +// "lastExecAt": { "type": "string" } +// } +// } +// // TODO(azhng): add `firstExecAt` and `lastExecAt` into the protobuf definition. func BuildTxnMetadataJSON(statistics *roachpb.CollectedTransactionStatistics) (json.JSON, error) { return jsonFields{ @@ -181,69 +185,70 @@ func BuildTxnMetadataJSON(statistics *roachpb.CollectedTransactionStatistics) (j // roachpb.CollectedTransactionStatistics into a json.JSON. // // JSON Schema -// { -// "$schema": "https://json-schema.org/draft/2020-12/schema", -// "title": "system.statement_statistics.statistics", -// "type": "object", // -// "definitions": { -// "numeric_stats": { -// "type": "object", -// "properties": { -// "mean": { "type": "number" }, -// "sqDiff": { "type": "number" } -// }, -// "required": ["mean", "sqDiff"] -// }, -// "statistics": { -// "type": "object", -// "properties": { -// "maxRetries": { "type": "number" }, -// "numRows": { "$ref": "#/definitions/numeric_stats" }, -// "svcLat": { "$ref": "#/definitions/numeric_stats" }, -// "retryLat": { "$ref": "#/definitions/numeric_stats" }, -// "commitLat": { "$ref": "#/definitions/numeric_stats" }, -// "bytesRead": { "$ref": "#/definitions/numeric_stats" }, -// "rowsRead": { "$ref": "#/definitions/numeric_stats" } -// }, -// "required": [ -// "maxRetries", -// "numRows", -// "svcLat", -// "retryLat", -// "commitLat", -// "bytesRead", -// "rowsRead", -// ] -// }, -// "execution_statistics": { -// "type": "object", -// "properties": { -// "cnt": { "type": "number" }, -// "networkBytes": { "$ref": "#/definitions/numeric_stats" }, -// "maxMemUsage": { "$ref": "#/definitions/numeric_stats" }, -// "contentionTime": { "$ref": "#/definitions/numeric_stats" }, -// "networkMsg": { "$ref": "#/definitions/numeric_stats" }, -// "maxDiskUsage": { "$ref": "#/definitions/numeric_stats" }, -// }, -// "required": [ -// "cnt", -// "networkBytes", -// "maxMemUsage", -// "contentionTime", -// "networkMsg", -// "maxDiskUsage", -// ] -// } -// }, +// { +// "$schema": "https://json-schema.org/draft/2020-12/schema", +// "title": "system.statement_statistics.statistics", +// "type": "object", +// +// "definitions": { +// "numeric_stats": { +// "type": "object", +// "properties": { +// "mean": { "type": "number" }, +// "sqDiff": { "type": "number" } +// }, +// "required": ["mean", "sqDiff"] +// }, +// "statistics": { +// "type": "object", +// "properties": { +// "maxRetries": { "type": "number" }, +// "numRows": { "$ref": "#/definitions/numeric_stats" }, +// "svcLat": { "$ref": "#/definitions/numeric_stats" }, +// "retryLat": { "$ref": "#/definitions/numeric_stats" }, +// "commitLat": { "$ref": "#/definitions/numeric_stats" }, +// "bytesRead": { "$ref": "#/definitions/numeric_stats" }, +// "rowsRead": { "$ref": "#/definitions/numeric_stats" } +// }, +// "required": [ +// "maxRetries", +// "numRows", +// "svcLat", +// "retryLat", +// "commitLat", +// "bytesRead", +// "rowsRead", +// ] +// }, +// "execution_statistics": { +// "type": "object", +// "properties": { +// "cnt": { "type": "number" }, +// "networkBytes": { "$ref": "#/definitions/numeric_stats" }, +// "maxMemUsage": { "$ref": "#/definitions/numeric_stats" }, +// "contentionTime": { "$ref": "#/definitions/numeric_stats" }, +// "networkMsg": { "$ref": "#/definitions/numeric_stats" }, +// "maxDiskUsage": { "$ref": "#/definitions/numeric_stats" }, +// }, +// "required": [ +// "cnt", +// "networkBytes", +// "maxMemUsage", +// "contentionTime", +// "networkMsg", +// "maxDiskUsage", +// ] +// } +// }, // -// "properties": { -// "stats": { "$ref": "#/definitions/statistics" }, -// "execStats": { -// "$ref": "#/definitions/execution_statistics" -// } -// } -// } +// "properties": { +// "stats": { "$ref": "#/definitions/statistics" }, +// "execStats": { +// "$ref": "#/definitions/execution_statistics" +// } +// } +// } func BuildTxnStatisticsJSON(statistics *roachpb.CollectedTransactionStatistics) (json.JSON, error) { return (*txnStats)(&statistics.Stats).encodeJSON() } @@ -251,29 +256,30 @@ func BuildTxnStatisticsJSON(statistics *roachpb.CollectedTransactionStatistics) // BuildStmtDetailsMetadataJSON returns a json.JSON object for the aggregated metadata // roachpb.AggregatedStatementMetadata. // JSON Schema for statement aggregated metadata: -// { -// "$schema": "https://json-schema.org/draft/2020-12/schema", -// "title": "system.statement_statistics.aggregated_metadata", -// "type": "object", // -// "properties": { -// "stmtType": { "type": "string" }, -// "query": { "type": "string" }, -// "querySummary": { "type": "string" }, -// "implicitTxn": { "type": "boolean" }, -// "distSQLCount": { "type": "number" }, -// "failedCount": { "type": "number" }, -// "vecCount": { "type": "number" }, -// "fullScanCount": { "type": "number" }, -// "totalCount": { "type": "number" }, -// "db": { -// "type": "array", -// "items": { -// "type": "string" -// } -// }, -// } -// } +// { +// "$schema": "https://json-schema.org/draft/2020-12/schema", +// "title": "system.statement_statistics.aggregated_metadata", +// "type": "object", +// +// "properties": { +// "stmtType": { "type": "string" }, +// "query": { "type": "string" }, +// "querySummary": { "type": "string" }, +// "implicitTxn": { "type": "boolean" }, +// "distSQLCount": { "type": "number" }, +// "failedCount": { "type": "number" }, +// "vecCount": { "type": "number" }, +// "fullScanCount": { "type": "number" }, +// "totalCount": { "type": "number" }, +// "db": { +// "type": "array", +// "items": { +// "type": "string" +// } +// }, +// } +// } func BuildStmtDetailsMetadataJSON( metadata *roachpb.AggregatedStatementMetadata, ) (json.JSON, error) { diff --git a/pkg/sql/sqlstats/ssprovider.go b/pkg/sql/sqlstats/ssprovider.go index eb4d7b92a66a..59fa3d79fc01 100644 --- a/pkg/sql/sqlstats/ssprovider.go +++ b/pkg/sql/sqlstats/ssprovider.go @@ -103,8 +103,9 @@ type ApplicationStats interface { // IteratorOptions provides the ability to the caller to change how it iterates // the statements and transactions. // TODO(azhng): introduce StartTime and EndTime field so we can implement -// virtual indexes on crdb_internal.{statement,transaction}_statistics -// using the iterators. +// +// virtual indexes on crdb_internal.{statement,transaction}_statistics +// using the iterators. type IteratorOptions struct { // SortedAppNames determines whether or not the application names will be // sorted when iterating through statistics. diff --git a/pkg/sql/sqltelemetry/doc.go b/pkg/sql/sqltelemetry/doc.go index dc752e15e188..4730709b6e8b 100644 --- a/pkg/sql/sqltelemetry/doc.go +++ b/pkg/sql/sqltelemetry/doc.go @@ -14,13 +14,13 @@ for various SQL features. Centralizing the counters in a single place achieves three objectives: -- the comments that accompany the counters enable non-technical users - to comprehend what is being reported without having to read code. + - the comments that accompany the counters enable non-technical users + to comprehend what is being reported without having to read code. -- the counters are placed side-by-side, grouped by category, so as to - enable exploratory discovery of available telemetry. + - the counters are placed side-by-side, grouped by category, so as to + enable exploratory discovery of available telemetry. -- the counters are pre-registered and their unicity is asserted, - so that no two features end up using the same counter name. + - the counters are pre-registered and their unicity is asserted, + so that no two features end up using the same counter name. */ package sqltelemetry diff --git a/pkg/sql/sqltestutils/large_schema.go b/pkg/sql/sqltestutils/large_schema.go index b626faa78851..e50a108fe5f4 100644 --- a/pkg/sql/sqltestutils/large_schema.go +++ b/pkg/sql/sqltestutils/large_schema.go @@ -33,19 +33,21 @@ type GenerateViewBasedGraphSchemaParams struct { // GenerateViewBasedGraphSchema generates a complex nested schema that takes // the following form: -// 1) Tables generated at depth 0 will have NumColumnsPerTable, where -// NumTablesPerDepth tables will be created with the name format: -// table{tableIndex}. -// Columns will have the name format: {tableName}_{columnIndex}. -// 2) All greater than zero depths we will generate views, where NumTablesPerDepth -// views will be generated. The views at a given depth will select from all -// the columns from the views/tables from the previous depth. This means -// a view at depth N will have (NumTablesPerDepth^(depth)) * NumColumnsPerTable -// Each generate view will have the name format: view{depth}_{tableIndex}. -// Columns will have the name format: {viewName}_{columnIndex}. // -// This setup will generate a large number of column references between views, -// as the depth increases. +// 1. Tables generated at depth 0 will have NumColumnsPerTable, where +// NumTablesPerDepth tables will be created with the name format: +// table{tableIndex}. +// Columns will have the name format: {tableName}_{columnIndex}. +// +// 2. All greater than zero depths we will generate views, where NumTablesPerDepth +// views will be generated. The views at a given depth will select from all +// the columns from the views/tables from the previous depth. This means +// a view at depth N will have (NumTablesPerDepth^(depth)) * NumColumnsPerTable +// Each generate view will have the name format: view{depth}_{tableIndex}. +// Columns will have the name format: {viewName}_{columnIndex}. +// +// This setup will generate a large number of column references between views, +// as the depth increases. func GenerateViewBasedGraphSchema( params GenerateViewBasedGraphSchemaParams, ) (parser.Statements, error) { diff --git a/pkg/sql/sqltestutils/telemetry.go b/pkg/sql/sqltestutils/telemetry.go index d3b240364e22..59153767f880 100644 --- a/pkg/sql/sqltestutils/telemetry.go +++ b/pkg/sql/sqltestutils/telemetry.go @@ -43,43 +43,42 @@ import ( // database and a testing diagnostics reporting server. The test implements the // following data-driven commands: // -// - exec +// - exec // -// Executes SQL statements against the database. Outputs no results on -// success. In case of error, outputs the error message. +// Executes SQL statements against the database. Outputs no results on +// success. In case of error, outputs the error message. // -// - feature-allowlist +// - feature-allowlist // -// The input for this command is not SQL, but a list of regular expressions. -// Tests that follow (until the next feature-allowlist command) will only -// output counters that match a regexp in this allow list. +// The input for this command is not SQL, but a list of regular expressions. +// Tests that follow (until the next feature-allowlist command) will only +// output counters that match a regexp in this allow list. // -// - feature-usage, feature-counters +// - feature-usage, feature-counters // -// Executes SQL statements and then outputs the feature counters from the -// allowlist that have been reported to the diagnostic server. The first -// variant outputs only the names of the counters that changed; the second -// variant outputs the counts as well. It is necessary to use -// feature-allowlist before these commands to avoid test flakes (e.g. because -// of counters that are changed by looking up descriptors). -// TODO(yuzefovich): counters currently don't really work because they are -// reset before executing every statement by reporter.ReportDiagnostics. +// Executes SQL statements and then outputs the feature counters from the +// allowlist that have been reported to the diagnostic server. The first +// variant outputs only the names of the counters that changed; the second +// variant outputs the counts as well. It is necessary to use +// feature-allowlist before these commands to avoid test flakes (e.g. because +// of counters that are changed by looking up descriptors). +// TODO(yuzefovich): counters currently don't really work because they are +// reset before executing every statement by reporter.ReportDiagnostics. // -// - schema +// - schema // -// Outputs reported schema information. +// Outputs reported schema information. // -// - sql-stats +// - sql-stats // -// Executes SQL statements and then outputs information about reported sql -// statement statistics. +// Executes SQL statements and then outputs information about reported sql +// statement statistics. // -// - rewrite -// -// Installs a rule to rewrite all matches of the regexp in the first -// line to the string in the second line. This is useful to eliminate -// non-determinism in the output. +// - rewrite // +// Installs a rule to rewrite all matches of the regexp in the first +// line to the string in the second line. This is useful to eliminate +// non-determinism in the output. func TelemetryTest(t *testing.T, serverArgs []base.TestServerArgs, testTenant bool) { // Note: these tests cannot be run in parallel (with each other or with other // tests) because telemetry counters are global. diff --git a/pkg/sql/stats/automatic_stats.go b/pkg/sql/stats/automatic_stats.go index c4e63088c79e..24cf7a6204e0 100644 --- a/pkg/sql/stats/automatic_stats.go +++ b/pkg/sql/stats/automatic_stats.go @@ -178,9 +178,9 @@ const ( // we use random number generation to refresh stats with probability // 10/(1M * 0.2) = 0.00005. The general formula is: // -// # rows updated/inserted/deleted -// p = -------------------------------------------------------------------- -// (# rows in table) * (target fraction of rows updated before refresh) +// # rows updated/inserted/deleted +// p = -------------------------------------------------------------------- +// (# rows in table) * (target fraction of rows updated before refresh) // // The existing statistics in the stats cache are used to get the number of // rows in the table. @@ -208,7 +208,6 @@ const ( // metadata to the Refresher thread over a non-blocking buffered channel. The // signaling is best-effort; if the channel is full, the metadata will not be // sent. -// type Refresher struct { log.AmbientContext st *cluster.Settings diff --git a/pkg/sql/stats/automatic_stats_manual_test.go b/pkg/sql/stats/automatic_stats_manual_test.go index 4dabeaa24a02..31e97f8a397c 100644 --- a/pkg/sql/stats/automatic_stats_manual_test.go +++ b/pkg/sql/stats/automatic_stats_manual_test.go @@ -37,21 +37,21 @@ var runManual = flag.Bool( // varying load on the system and prints out the times. It should be run on a // lightly loaded system using: // -// make test PKG=./pkg/sql/stats TESTS=AdaptiveThrottling TESTFLAGS='-v --run-manual -logtostderr NONE' +// make test PKG=./pkg/sql/stats TESTS=AdaptiveThrottling TESTFLAGS='-v --run-manual -logtostderr NONE' // // Sample output: // // --- PASS: TestAdaptiveThrottling (114.51s) -// automatic_stats_manual_test.go:72: Populate table took 7.639067726s -// automatic_stats_manual_test.go:72: --- Load 0% --- -// automatic_stats_manual_test.go:72: Create stats took 1.198634729s -// automatic_stats_manual_test.go:72: --- Load 30% --- -// automatic_stats_manual_test.go:72: Create stats took 2.270165784s -// automatic_stats_manual_test.go:72: --- Load 50% --- -// automatic_stats_manual_test.go:72: Create stats took 7.324599981s -// automatic_stats_manual_test.go:72: --- Load 70% --- -// automatic_stats_manual_test.go:72: Create stats took 15.886412857s // +// automatic_stats_manual_test.go:72: Populate table took 7.639067726s +// automatic_stats_manual_test.go:72: --- Load 0% --- +// automatic_stats_manual_test.go:72: Create stats took 1.198634729s +// automatic_stats_manual_test.go:72: --- Load 30% --- +// automatic_stats_manual_test.go:72: Create stats took 2.270165784s +// automatic_stats_manual_test.go:72: --- Load 50% --- +// automatic_stats_manual_test.go:72: Create stats took 7.324599981s +// automatic_stats_manual_test.go:72: --- Load 70% --- +// automatic_stats_manual_test.go:72: Create stats took 15.886412857s func TestAdaptiveThrottling(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) diff --git a/pkg/sql/stats/quantile.go b/pkg/sql/stats/quantile.go index e8f68d5e8a66..52d37b4c0c86 100644 --- a/pkg/sql/stats/quantile.go +++ b/pkg/sql/stats/quantile.go @@ -34,11 +34,11 @@ import ( // https://en.wikipedia.org/wiki/Quantile_function for more background. // // We use quantile functions within our modeling for a few reasons: -// * Unlike histograms, quantile functions are independent of the absolute -// counts. They are a "shape" not a "size". -// * Unlike cumulative distribution functions or probability density functions, -// we can always take the definite integral of a quantile function from p=0 to -// p=1. We use this when performing linear regression over quantiles. +// - Unlike histograms, quantile functions are independent of the absolute +// counts. They are a "shape" not a "size". +// - Unlike cumulative distribution functions or probability density functions, +// we can always take the definite integral of a quantile function from p=0 to +// p=1. We use this when performing linear regression over quantiles. // // Type quantile represents a piecewise quantile function with float64 values as // a series of quantilePoints. The pieces of the quantile function are line @@ -59,29 +59,29 @@ import ( // // For example, given this population of 10 values: // -// {200, 200, 210, 210, 210, 211, 212, 221, 222, 230} +// {200, 200, 210, 210, 210, 211, 212, 221, 222, 230} // // One possible histogram might be: // -// {{UpperBound: 200, NumRange: 0, NumEq: 2}, -// {UpperBound: 210, NumRange: 0, NumEq: 3}, -// {UpperBound: 220, NumRange: 2, NumEq: 0}, -// {UpperBound: 230, NumRange: 2, NumEq: 1}} +// {{UpperBound: 200, NumRange: 0, NumEq: 2}, +// {UpperBound: 210, NumRange: 0, NumEq: 3}, +// {UpperBound: 220, NumRange: 2, NumEq: 0}, +// {UpperBound: 230, NumRange: 2, NumEq: 1}} // // And the corresponding quantile function would be: // -// {{0, 200}, {0.2, 200}, {0.2, 210}, {0.5, 210}, {0.7, 220}, {0.9, 230}, {1, 230}} +// {{0, 200}, {0.2, 200}, {0.2, 210}, {0.5, 210}, {0.7, 220}, {0.9, 230}, {1, 230}} // -// 230 | *-* -// | / -// 220 | * -// | / -// 210 | o-----* -// | -// 200 o---* -// | -// 190 + - - - - - - - - - - -// 0 .2 .4 .6 .8 1 +// 230 | *-* +// | / +// 220 | * +// | / +// 210 | o-----* +// | +// 200 o---* +// | +// 190 + - - - - - - - - - - +// 0 .2 .4 .6 .8 1 // // All quantile functions and methods treat quantiles as immutable. We always // allocate new quantiles rather than modifying them in-place. @@ -406,11 +406,12 @@ var ( // fromQuantileValue converts from a quantile value back to a datum suitable for // use in a histogram. It is the inverse of toQuantileValue. It differs from // eval.PerformCast in a few ways: -// 1. It supports conversions that are not legal casts (e.g. FLOAT to DATE). -// 2. It errors on NaN and infinite values because they indicate a problem with -// the regression model rather than valid values. -// 3. On overflow or underflow it clamps to maximum or minimum finite values -// rather than failing the conversion (and thus the entire histogram). +// 1. It supports conversions that are not legal casts (e.g. FLOAT to DATE). +// 2. It errors on NaN and infinite values because they indicate a problem with +// the regression model rather than valid values. +// 3. On overflow or underflow it clamps to maximum or minimum finite values +// rather than failing the conversion (and thus the entire histogram). +// // TODO(michae2): Add support for DECIMAL, TIME, TIMETZ, and INTERVAL. func fromQuantileValue(colType *types.T, val float64) (tree.Datum, error) { if math.IsNaN(val) || math.IsInf(val, 0) { diff --git a/pkg/sql/stats/stats_cache.go b/pkg/sql/stats/stats_cache.go index 8cc8e5d8f56e..74e66d38c6b9 100644 --- a/pkg/sql/stats/stats_cache.go +++ b/pkg/sql/stats/stats_cache.go @@ -51,10 +51,13 @@ type TableStatistic struct { // A TableStatisticsCache contains two underlying LRU caches: // (1) A cache of []*TableStatistic objects, keyed by table ID. -// Each entry consists of all the statistics for different columns and -// column groups for the given table. +// +// Each entry consists of all the statistics for different columns and +// column groups for the given table. +// // (2) A cache of *HistogramData objects, keyed by -// HistogramCacheKey{table ID, statistic ID}. +// +// HistogramCacheKey{table ID, statistic ID}. type TableStatisticsCache struct { // NB: This can't be a RWMutex for lookup because UnorderedCache.Get // manipulates an internal LRU list. @@ -364,11 +367,10 @@ func (sc *TableStatisticsCache) lookupStatsLocked( // addCacheEntryLocked creates a new cache entry and retrieves table statistics // from the database. It does this in a way so that the other goroutines that // need the same stats can wait on us: -// - an cache entry with wait=true is created; -// - mutex is unlocked; -// - stats are retrieved from database: -// - mutex is locked again and the entry is updated. -// +// - an cache entry with wait=true is created; +// - mutex is unlocked; +// - stats are retrieved from database: +// - mutex is locked again and the entry is updated. func (sc *TableStatisticsCache) addCacheEntryLocked( ctx context.Context, tableID descpb.ID, forecast bool, ) (stats []*TableStatistic, err error) { @@ -408,11 +410,10 @@ func (sc *TableStatisticsCache) addCacheEntryLocked( // an existing cache entry. It does this in a way so that the other goroutines // can continue using the stale stats from the existing entry until the new // stats are added: -// - the existing cache entry is retrieved; -// - mutex is unlocked; -// - stats are retrieved from database: -// - mutex is locked again and the entry is updated. -// +// - the existing cache entry is retrieved; +// - mutex is unlocked; +// - stats are retrieved from database: +// - mutex is locked again and the entry is updated. func (sc *TableStatisticsCache) refreshCacheEntry( ctx context.Context, tableID descpb.ID, ts hlc.Timestamp, ) { diff --git a/pkg/sql/tablewriter.go b/pkg/sql/tablewriter.go index 159873519f6f..7353c150d898 100644 --- a/pkg/sql/tablewriter.go +++ b/pkg/sql/tablewriter.go @@ -40,15 +40,16 @@ type expressionCarrier interface { // tableWriter handles writing kvs and forming table rows. // // Usage: -// err := tw.init(txn, evalCtx) -// // Handle err. -// for { -// values := ... -// row, err := tw.row(values) -// // Handle err. -// } -// err := tw.finalize() -// // Handle err. +// +// err := tw.init(txn, evalCtx) +// // Handle err. +// for { +// values := ... +// row, err := tw.row(values) +// // Handle err. +// } +// err := tw.finalize() +// // Handle err. type tableWriter interface { expressionCarrier diff --git a/pkg/sql/tablewriter_upsert_opt.go b/pkg/sql/tablewriter_upsert_opt.go index 30c53a9f7eb7..05fb2f31e201 100644 --- a/pkg/sql/tablewriter_upsert_opt.go +++ b/pkg/sql/tablewriter_upsert_opt.go @@ -30,15 +30,15 @@ import ( // and other upsert operations into the input query, rather than requiring the // upserter to do it. For example: // -// CREATE TABLE abc (a INT PRIMARY KEY, b INT, c INT) -// INSERT INTO abc VALUES (1, 2) ON CONFLICT (a) DO UPDATE SET b=10 +// CREATE TABLE abc (a INT PRIMARY KEY, b INT, c INT) +// INSERT INTO abc VALUES (1, 2) ON CONFLICT (a) DO UPDATE SET b=10 // // The CBO will generate an input expression similar to this: // -// SELECT ins_a, ins_b, ins_c, fetch_a, fetch_b, fetch_c, 10 AS upd_b -// FROM (VALUES (1, 2, NULL)) AS ins(ins_a, ins_b, ins_c) -// LEFT OUTER JOIN abc AS fetch(fetch_a, fetch_b, fetch_c) -// ON ins_a = fetch_a +// SELECT ins_a, ins_b, ins_c, fetch_a, fetch_b, fetch_c, 10 AS upd_b +// FROM (VALUES (1, 2, NULL)) AS ins(ins_a, ins_b, ins_c) +// LEFT OUTER JOIN abc AS fetch(fetch_a, fetch_b, fetch_c) +// ON ins_a = fetch_a // // The other non-CBO upserters perform custom left lookup joins. However, that // doesn't allow sharing of optimization rules and doesn't work with correlated diff --git a/pkg/sql/tests/kv_test.go b/pkg/sql/tests/kv_test.go index dfee00041bb5..7693b2aeac86 100644 --- a/pkg/sql/tests/kv_test.go +++ b/pkg/sql/tests/kv_test.go @@ -349,29 +349,30 @@ func BenchmarkKV(b *testing.B) { // are small and already fast are not really beneficial to the user. // Specifically, these transactional update queries run as a 1PC transaction // and do two significant pieces of work in storage: -// - A read-only batch with 100 ScanRequests (this should eventually be -// optimized by SQL to 100 GetRequests -// https://github.com/cockroachdb/cockroach/issues/46758). The spans in the -// batch are in sorted order. At the storage layer, the same iterator is -// reused across the requests in a batch, and results in the following -// sequence of calls repeated a 100 times: SetBounds, SeekGE, . -// The part is looking for the next MVCCKey (not version) within -// the span, and will not find such a key, but needs to step over the -// versions of the key that it did find. This exercises the -// pebbleMVCCScanner's itersBeforeSeek optimization, and will only involve -// Next calls if the versions are <= 5. Else it will Seek after doing Next 5 -// times. That is, if there are k version per key and k <= 5, will -// be k Next calls. If k > 5, there will be 5 Next calls followed by a -// SeekGE. The maxVersions=8 benchmark below has some iterations that will -// need to do this seek. -// - A write batch with 100 PutRequests, again in sorted order. At -// the storage layer, the same iterator will get reused across the requests -// in a batch, and results in 100 SeekPrefixGE calls to that iterator. -// Note that in this case the Distinct batch optimization is not being used. -// Even the experimental approach in -// https://github.com/sumeerbhola/cockroach/commit/eeeec51bd40ef47e743dc0c9ca47cf15710bae09 -// indicates that we cannot use an unindexed Pebble batch (which would have -// been an optimization). +// - A read-only batch with 100 ScanRequests (this should eventually be +// optimized by SQL to 100 GetRequests +// https://github.com/cockroachdb/cockroach/issues/46758). The spans in the +// batch are in sorted order. At the storage layer, the same iterator is +// reused across the requests in a batch, and results in the following +// sequence of calls repeated a 100 times: SetBounds, SeekGE, . +// The part is looking for the next MVCCKey (not version) within +// the span, and will not find such a key, but needs to step over the +// versions of the key that it did find. This exercises the +// pebbleMVCCScanner's itersBeforeSeek optimization, and will only involve +// Next calls if the versions are <= 5. Else it will Seek after doing Next 5 +// times. That is, if there are k version per key and k <= 5, will +// be k Next calls. If k > 5, there will be 5 Next calls followed by a +// SeekGE. The maxVersions=8 benchmark below has some iterations that will +// need to do this seek. +// - A write batch with 100 PutRequests, again in sorted order. At +// the storage layer, the same iterator will get reused across the requests +// in a batch, and results in 100 SeekPrefixGE calls to that iterator. +// Note that in this case the Distinct batch optimization is not being used. +// Even the experimental approach in +// https://github.com/sumeerbhola/cockroach/commit/eeeec51bd40ef47e743dc0c9ca47cf15710bae09 +// indicates that we cannot use an unindexed Pebble batch (which would have +// been an optimization). +// // This workload has keys that are clustered in the storage key space. Also, // the volume of data is small, so the Pebble iterator stack is not deep. Both // these things may not be representative of the real world. I like to run diff --git a/pkg/sql/tests/monotonic_insert_test.go b/pkg/sql/tests/monotonic_insert_test.go index 66f26785a469..4482df34b7ae 100644 --- a/pkg/sql/tests/monotonic_insert_test.go +++ b/pkg/sql/tests/monotonic_insert_test.go @@ -74,7 +74,8 @@ type mtClient struct { // TestMonotonicInserts replicates the 'monotonic' test from the Jepsen // CockroachDB test suite: -// https://github.com/jepsen-io/jepsen/blob/master/cockroachdb/src/jepsen/cockroach/monotonic.clj +// +// https://github.com/jepsen-io/jepsen/blob/master/cockroachdb/src/jepsen/cockroach/monotonic.clj func TestMonotonicInserts(t *testing.T) { defer leaktest.AfterTest(t)() diff --git a/pkg/sql/tests/server_params.go b/pkg/sql/tests/server_params.go index 5b5ca60a6e0a..b1b31b10b54e 100644 --- a/pkg/sql/tests/server_params.go +++ b/pkg/sql/tests/server_params.go @@ -49,20 +49,21 @@ func CreateTestTenantParams(tenantID roachpb.TenantID) base.TestTenantArgs { // CreateTestingKnobs creates a testing knob in the unit tests. // // Note: SQL Stats’s read path uses follower read -// (AS OF SYSTEM TIME follower_read_timestamp()) to ensure that contention -// between reads and writes (SQL Stats flush / SQL Stats cleanup) is -// minimized. -// However, in a new cluster in unit tests, system tables are created -// using the migration framework. The migration framework goes through a -// list of registered migrations and creates the stats system tables. By -// using follower read, we shift the transaction read timestamp far enough -// to the past. This means it is possible in the unit tests, the read -// timestamp would be chosen to be before the creation of the stats table. -// This can cause 'descriptor not found' error when accessing the stats -// system table. -// Additionally, we don't want to completely remove the AOST clause in the -// unit test. Therefore, `AS OF SYSTEM TIME '-1us'` is a compromise -// used to get around the 'descriptor not found' error. +// +// (AS OF SYSTEM TIME follower_read_timestamp()) to ensure that contention +// between reads and writes (SQL Stats flush / SQL Stats cleanup) is +// minimized. +// However, in a new cluster in unit tests, system tables are created +// using the migration framework. The migration framework goes through a +// list of registered migrations and creates the stats system tables. By +// using follower read, we shift the transaction read timestamp far enough +// to the past. This means it is possible in the unit tests, the read +// timestamp would be chosen to be before the creation of the stats table. +// This can cause 'descriptor not found' error when accessing the stats +// system table. +// Additionally, we don't want to completely remove the AOST clause in the +// unit test. Therefore, `AS OF SYSTEM TIME '-1us'` is a compromise +// used to get around the 'descriptor not found' error. func CreateTestingKnobs() base.TestingKnobs { return base.TestingKnobs{ SQLStatsKnobs: &sqlstats.TestingKnobs{ diff --git a/pkg/sql/truncate.go b/pkg/sql/truncate.go index ad4778caa0c1..60bbb5defca2 100644 --- a/pkg/sql/truncate.go +++ b/pkg/sql/truncate.go @@ -42,8 +42,9 @@ type truncateNode struct { // Truncate deletes all rows from a table. // Privileges: DROP on table. -// Notes: postgres requires TRUNCATE. -// mysql requires DROP (for mysql >= 5.1.16, DELETE before that). +// +// Notes: postgres requires TRUNCATE. +// mysql requires DROP (for mysql >= 5.1.16, DELETE before that). func (p *planner) Truncate(ctx context.Context, n *tree.Truncate) (planNode, error) { return &truncateNode{n: n}, nil } diff --git a/pkg/sql/ttl/ttlbase/ttl_helpers.go b/pkg/sql/ttl/ttlbase/ttl_helpers.go index a7d1dac2818a..f445ee21f9a4 100644 --- a/pkg/sql/ttl/ttlbase/ttl_helpers.go +++ b/pkg/sql/ttl/ttlbase/ttl_helpers.go @@ -38,8 +38,9 @@ AND (%s) IN (%s)` // MakeColumnNamesSQL converts columns into an escape string // for an order by clause, e.g.: -// {"a", "b"} => a, b -// {"escape-me", "b"} => "escape-me", b +// +// {"a", "b"} => a, b +// {"escape-me", "b"} => "escape-me", b func MakeColumnNamesSQL(columns []string) string { var b bytes.Buffer for i, pkColumn := range columns { diff --git a/pkg/sql/txn_restart_test.go b/pkg/sql/txn_restart_test.go index bc5473cb2849..96f87a153df3 100644 --- a/pkg/sql/txn_restart_test.go +++ b/pkg/sql/txn_restart_test.go @@ -197,7 +197,9 @@ func checkRestarts(t *testing.T, magicVals *filterVals) { // // The aborter only works with INSERT statements operating on the table t.test // defined as: +// // `CREATE DATABASE t; CREATE TABLE t.test (k INT PRIMARY KEY, v TEXT)` +// // The TxnAborter runs transactions deleting the row for the `k` that the // trapped transactions were writing to. // diff --git a/pkg/sql/txn_state.go b/pkg/sql/txn_state.go index 42f9024a8c1d..ff7830260a3b 100644 --- a/pkg/sql/txn_state.go +++ b/pkg/sql/txn_state.go @@ -147,21 +147,30 @@ const ( // and returns the ID of the new transaction. // // connCtx: The context in which the new transaction is started (usually a -// connection's context). ts.Ctx will be set to a child context and should be -// used for everything that happens within this SQL transaction. +// +// connection's context). ts.Ctx will be set to a child context and should be +// used for everything that happens within this SQL transaction. +// // txnType: The type of the starting txn. // sqlTimestamp: The timestamp to report for current_timestamp(), now() etc. // historicalTimestamp: If non-nil indicates that the transaction is historical -// and should be fixed to this timestamp. +// +// and should be fixed to this timestamp. +// // priority: The transaction's priority. Pass roachpb.UnspecifiedUserPriority if the txn arg is -// not nil. +// +// not nil. +// // readOnly: The read-only character of the new txn. // txn: If not nil, this txn will be used instead of creating a new txn. If so, -// all the other arguments need to correspond to the attributes of this txn -// (unless otherwise specified). +// +// all the other arguments need to correspond to the attributes of this txn +// (unless otherwise specified). +// // tranCtx: A bag of extra execution context. // qualityOfService: If txn is nil, the QoSLevel/WorkPriority to assign the new -// transaction for use in admission queues. +// +// transaction for use in admission queues. func (ts *txnState) resetForNewSQLTxn( connCtx context.Context, txnType txnType, diff --git a/pkg/sql/types/types.go b/pkg/sql/types/types.go index 497715ab9e61..977a63987188 100644 --- a/pkg/sql/types/types.go +++ b/pkg/sql/types/types.go @@ -36,26 +36,26 @@ import ( // nullable and non-nullable types. It is up to the caller to store that // information separately if it is needed. Here are some example types: // -// INT4 - any 32-bit integer -// DECIMAL(10, 3) - any base-10 value with at most 10 digits, with -// up to 3 to right of decimal point -// FLOAT[] - array of 64-bit IEEE 754 floating-point values -// TUPLE[TIME, VARCHAR(20)] - any pair of values where first value is a time -// of day and the second value is a string having -// up to 20 characters +// INT4 - any 32-bit integer +// DECIMAL(10, 3) - any base-10 value with at most 10 digits, with +// up to 3 to right of decimal point +// FLOAT[] - array of 64-bit IEEE 754 floating-point values +// TUPLE[TIME, VARCHAR(20)] - any pair of values where first value is a time +// of day and the second value is a string having +// up to 20 characters // // Fundamentally, a type consists of the following attributes, each of which has // a corresponding accessor method. Some of these attributes are only defined // for a subset of types. See the method comments for more details. // -// Family - equivalence group of the type (enumeration) -// Oid - Postgres Object ID that describes the type (enumeration) -// Precision - maximum accuracy of the type (numeric) -// Width - maximum size or scale of the type (numeric) -// Locale - location which governs sorting, formatting, etc. (string) -// ArrayContents - array element type (T) -// TupleContents - slice of types of each tuple field ([]*T) -// TupleLabels - slice of labels of each tuple field ([]string) +// Family - equivalence group of the type (enumeration) +// Oid - Postgres Object ID that describes the type (enumeration) +// Precision - maximum accuracy of the type (numeric) +// Width - maximum size or scale of the type (numeric) +// Locale - location which governs sorting, formatting, etc. (string) +// ArrayContents - array element type (T) +// TupleContents - slice of types of each tuple field ([]*T) +// TupleLabels - slice of labels of each tuple field ([]string) // // Some types are not currently allowed as the type of a column (e.g. nested // arrays). Other usages of the types package may have similar restrictions. @@ -878,9 +878,8 @@ func oidCanBeCollatedString(o oid.Oid) bool { // that is collated according to the given locale. The new type is based upon // the given string type, having the same oid and width values. For example: // -// STRING => STRING COLLATE EN -// VARCHAR(20) => VARCHAR(20) COLLATE EN -// +// STRING => STRING COLLATE EN +// VARCHAR(20) => VARCHAR(20) COLLATE EN func MakeCollatedString(strType *T, locale string) *T { if oidCanBeCollatedString(strType.Oid()) { return &T{InternalType: InternalType{ @@ -1202,12 +1201,12 @@ func (t *T) Locale() string { // Width is the size or scale of the type, such as number of bits or characters. // -// INT : # of bits (64, 32, 16) -// FLOAT : # of bits (64, 32) -// DECIMAL : max # of digits after decimal point (must be <= Precision) -// STRING : max # of characters -// COLLATEDSTRING: max # of characters -// BIT : max # of bits +// INT : # of bits (64, 32, 16) +// FLOAT : # of bits (64, 32) +// DECIMAL : max # of digits after decimal point (must be <= Precision) +// STRING : max # of characters +// COLLATEDSTRING: max # of characters +// BIT : max # of bits // // Width is always 0 for other types. func (t *T) Width() int32 { @@ -1216,12 +1215,12 @@ func (t *T) Width() int32 { // Precision is the accuracy of the data type. // -// DECIMAL : max # digits (must be >= Width/Scale) -// INTERVAL : max # fractional second digits -// TIME : max # fractional second digits -// TIMETZ : max # fractional second digits -// TIMESTAMP : max # fractional second digits -// TIMESTAMPTZ: max # fractional second digits +// DECIMAL : max # digits (must be >= Width/Scale) +// INTERVAL : max # fractional second digits +// TIME : max # fractional second digits +// TIMETZ : max # fractional second digits +// TIMESTAMP : max # fractional second digits +// TIMESTAMPTZ: max # fractional second digits // // Precision for time-related families has special rules for 0 -- see // `precision_is_set` on the `InternalType` proto. @@ -1517,13 +1516,12 @@ func (t *T) Name() string { // than the native CRDB name for it (i.e. the Name function). It is used when // compatibility with PG is important. Examples of differences: // -// Name() PGName() -// -------------------------- -// char bpchar -// "char" char -// bytes bytea -// int4[] _int4 -// +// Name() PGName() +// -------------------------- +// char bpchar +// "char" char +// bytes bytea +// int4[] _int4 func (t *T) PGName() string { name, ok := oidext.TypeName(t.Oid()) if ok { @@ -1547,8 +1545,7 @@ func (t *T) PGName() string { // standard (or by Postgres for any non-standard types). This can be looked up // for any type in Postgres using a query similar to this: // -// SELECT format_type(pg_typeof(1::int)::regtype, NULL) -// +// SELECT format_type(pg_typeof(1::int)::regtype, NULL) func (t *T) SQLStandardName() string { return t.SQLStandardNameWithTypmod(false, 0) } @@ -1564,7 +1561,7 @@ func (t *T) TelemetryName() string { // typmod argument, and a boolean which indicates whether or not a typmod was // even specified. The expected results of this function should be, in Postgres: // -// SELECT format_type('thetype'::regype, typmod) +// SELECT format_type('thetype'::regype, typmod) // // Generally, what this does with a non-0 typmod is append the scale, precision // or length of a datatype to the name of the datatype. For example, a @@ -2093,8 +2090,8 @@ func (t *InternalType) Identical(other *InternalType) bool { // protobuf serialization rules. It is backwards-compatible with formats used // by older versions of CRDB. // -// var t T -// err := protoutil.Unmarshal(data, &t) +// var t T +// err := protoutil.Unmarshal(data, &t) // // Unmarshal is part of the protoutil.Message interface. func (t *T) Unmarshal(data []byte) error { @@ -2307,8 +2304,7 @@ func (t *T) upgradeType() error { // version of CRDB so that clusters can run in mixed version mode during // upgrade. // -// bytes, err := protoutil.Marshal(&typ) -// +// bytes, err := protoutil.Marshal(&typ) func (t *T) Marshal() (data []byte, err error) { // First downgrade to a struct that will be serialized in a backwards- // compatible bytes format. @@ -2647,9 +2643,8 @@ func IsWildcardTupleType(t *T) bool { // or []COLLATEDSTRING type. This is tricky in the case of an array of collated // string, since brackets must precede the COLLATE identifier: // -// STRING COLLATE EN -// VARCHAR(20)[] COLLATE DE -// +// STRING COLLATE EN +// VARCHAR(20)[] COLLATE DE func (t *T) collatedStringTypeSQL(isArray bool) string { var buf bytes.Buffer buf.WriteString(t.stringTypeSQL()) @@ -2721,9 +2716,9 @@ func init() { // TypeForNonKeywordTypeName returns the column type for the string name of a // type, if one exists. The third return value indicates: // -// 0 if no error or the type is not known in postgres. -// -1 if the type is known in postgres. -// >0 for a github issue number. +// 0 if no error or the type is not known in postgres. +// -1 if the type is known in postgres. +// >0 for a github issue number. func TypeForNonKeywordTypeName(name string) (*T, bool, int) { t, ok := typNameLiterals[name] if ok { diff --git a/pkg/sql/union.go b/pkg/sql/union.go index c3dc105087f3..05c544343dc6 100644 --- a/pkg/sql/union.go +++ b/pkg/sql/union.go @@ -53,11 +53,12 @@ import ( // 1 // There are three 1s on the left and two 1s on the right, so we emit 1, 1. // Nothing else is in both. -// emitRight: For each row, increment the map entry. -// emitLeft: For each row, if the row is not present in the map, it was not in -// both, don't emit. Otherwise, if the count for the row was > 0, emit and -// decrement the entry. Otherwise, the row was on the right, but we've -// already emitted as many as were on the right, don't emit. +// +// emitRight: For each row, increment the map entry. +// emitLeft: For each row, if the row is not present in the map, it was not in +// both, don't emit. Otherwise, if the count for the row was > 0, emit and +// decrement the entry. Otherwise, the row was on the right, but we've +// already emitted as many as were on the right, don't emit. type unionNode struct { // right and left are the data source operands. // right is read first, to populate the `emit` field. diff --git a/pkg/sql/user.go b/pkg/sql/user.go index b7cbb4d3234e..1720839cfced 100644 --- a/pkg/sql/user.go +++ b/pkg/sql/user.go @@ -51,32 +51,32 @@ import ( // The function is tolerant of unavailable clusters (or unavailable // system database) as follows: // -// - if the user is root, the user is reported to exist immediately -// without querying system.users at all. The password retrieval -// is delayed until actually needed by the authentication method. -// This way, if the client presents a valid TLS certificate -// the password is not even needed at all. This is useful for e.g. -// `cockroach node status`. +// - if the user is root, the user is reported to exist immediately +// without querying system.users at all. The password retrieval +// is delayed until actually needed by the authentication method. +// This way, if the client presents a valid TLS certificate +// the password is not even needed at all. This is useful for e.g. +// `cockroach node status`. // -// If root is forced to use a password (e.g. logging in onto the UI) -// then a user login timeout greater than 5 seconds is also -// ignored. This ensures that root has a modicum of comfort -// logging into an unavailable cluster. +// If root is forced to use a password (e.g. logging in onto the UI) +// then a user login timeout greater than 5 seconds is also +// ignored. This ensures that root has a modicum of comfort +// logging into an unavailable cluster. // -// TODO(knz): this does not yet quite work because even if the pw -// auth on the UI succeeds writing to system.web_sessions will still -// stall on an unavailable cluster and prevent root from logging in. +// TODO(knz): this does not yet quite work because even if the pw +// auth on the UI succeeds writing to system.web_sessions will still +// stall on an unavailable cluster and prevent root from logging in. // -// - if the user is another user than root, then the function fails -// after a timeout instead of blocking. The timeout is configurable -// via the cluster setting server.user_login.timeout. Note that this -// is a single timeout for looking up the password, role options, and -// default session variable settings. +// - if the user is another user than root, then the function fails +// after a timeout instead of blocking. The timeout is configurable +// via the cluster setting server.user_login.timeout. Note that this +// is a single timeout for looking up the password, role options, and +// default session variable settings. // -// - there is a cache for the the information from system.users, -// system.role_options, and system.database_role_settings. As long as the -// lookup succeeded before and there haven't been any CREATE/ALTER/DROP ROLE -// commands since, then the cache is used without a KV lookup. +// - there is a cache for the the information from system.users, +// system.role_options, and system.database_role_settings. As long as the +// lookup succeeded before and there haven't been any CREATE/ALTER/DROP ROLE +// commands since, then the cache is used without a KV lookup. func GetUserSessionInitInfo( ctx context.Context, execCfg *ExecutorConfig, diff --git a/pkg/sql/virtual_table.go b/pkg/sql/virtual_table.go index 30b656d465e2..1147e79bc6b0 100644 --- a/pkg/sql/virtual_table.go +++ b/pkg/sql/virtual_table.go @@ -62,10 +62,10 @@ type virtualTableGeneratorResponse struct { // setupGenerator takes in a worker that generates rows eagerly and transforms // it into a lazy row generator. It returns two functions: -// * next: A handle that can be called to generate a row from the worker. Next -// cannot be called once cleanup has been called. -// * cleanup: Performs all cleanup. This function must be called exactly once -// to ensure that resources are cleaned up. +// - next: A handle that can be called to generate a row from the worker. Next +// cannot be called once cleanup has been called. +// - cleanup: Performs all cleanup. This function must be called exactly once +// to ensure that resources are cleaned up. func setupGenerator( ctx context.Context, worker func(ctx context.Context, pusher rowPusher) error, diff --git a/pkg/sql/vtable/pg_catalog.go b/pkg/sql/vtable/pg_catalog.go index 09ebaa1293e7..d4bdbd26a16a 100644 --- a/pkg/sql/vtable/pg_catalog.go +++ b/pkg/sql/vtable/pg_catalog.go @@ -967,7 +967,7 @@ CREATE TABLE pg_catalog.pg_statistic_ext ( stxkind "char"[] )` -//PgCatalogSequences is an empty table in the pg_catalog that is not implemented yet +// PgCatalogSequences is an empty table in the pg_catalog that is not implemented yet const PgCatalogSequences = ` CREATE TABLE pg_catalog.pg_sequences ( schemaname NAME, @@ -983,7 +983,7 @@ CREATE TABLE pg_catalog.pg_sequences ( last_value INT )` -//PgCatalogStatDatabaseConflicts is an empty table in the pg_catalog that is not implemented yet +// PgCatalogStatDatabaseConflicts is an empty table in the pg_catalog that is not implemented yet const PgCatalogStatDatabaseConflicts = ` CREATE TABLE pg_catalog.pg_stat_database_conflicts ( datid OID, @@ -995,14 +995,14 @@ CREATE TABLE pg_catalog.pg_stat_database_conflicts ( confl_deadlock INT )` -//PgCatalogReplicationOrigin is an empty table in the pg_catalog that is not implemented yet +// PgCatalogReplicationOrigin is an empty table in the pg_catalog that is not implemented yet const PgCatalogReplicationOrigin = ` CREATE TABLE pg_catalog.pg_replication_origin ( roident OID, roname STRING )` -//PgCatalogStatistic is an empty table in the pg_catalog that is not implemented yet +// PgCatalogStatistic is an empty table in the pg_catalog that is not implemented yet const PgCatalogStatistic = ` CREATE TABLE pg_catalog.pg_statistic ( starelid OID, @@ -1038,7 +1038,7 @@ CREATE TABLE pg_catalog.pg_statistic ( stavalues5 STRING[] )` -//PgCatalogStatXactSysTables is an empty table in the pg_catalog that is not implemented yet +// PgCatalogStatXactSysTables is an empty table in the pg_catalog that is not implemented yet const PgCatalogStatXactSysTables = ` CREATE TABLE pg_catalog.pg_stat_xact_sys_tables ( relid OID, @@ -1054,7 +1054,7 @@ CREATE TABLE pg_catalog.pg_stat_xact_sys_tables ( n_tup_hot_upd INT )` -//PgCatalogAmop is an empty table in the pg_catalog that is not implemented yet +// PgCatalogAmop is an empty table in the pg_catalog that is not implemented yet const PgCatalogAmop = ` CREATE TABLE pg_catalog.pg_amop ( oid OID, @@ -1068,7 +1068,7 @@ CREATE TABLE pg_catalog.pg_amop ( amopsortfamily OID )` -//PgCatalogStatProgressVacuum is an empty table in the pg_catalog that is not implemented yet +// PgCatalogStatProgressVacuum is an empty table in the pg_catalog that is not implemented yet const PgCatalogStatProgressVacuum = ` CREATE TABLE pg_catalog.pg_stat_progress_vacuum ( pid INT4, @@ -1084,7 +1084,7 @@ CREATE TABLE pg_catalog.pg_stat_progress_vacuum ( num_dead_tuples INT )` -//PgCatalogStatSysIndexes is an empty table in the pg_catalog that is not implemented yet +// PgCatalogStatSysIndexes is an empty table in the pg_catalog that is not implemented yet const PgCatalogStatSysIndexes = ` CREATE TABLE pg_catalog.pg_stat_sys_indexes ( relid OID, @@ -1097,7 +1097,7 @@ CREATE TABLE pg_catalog.pg_stat_sys_indexes ( idx_tup_fetch INT )` -//PgCatalogStatioAllTables is an empty table in the pg_catalog that is not implemented yet +// PgCatalogStatioAllTables is an empty table in the pg_catalog that is not implemented yet const PgCatalogStatioAllTables = ` CREATE TABLE pg_catalog.pg_statio_all_tables ( relid OID, @@ -1113,7 +1113,7 @@ CREATE TABLE pg_catalog.pg_statio_all_tables ( tidx_blks_hit INT )` -//PgCatalogTsTemplate is an empty table in the pg_catalog that is not implemented yet +// PgCatalogTsTemplate is an empty table in the pg_catalog that is not implemented yet const PgCatalogTsTemplate = ` CREATE TABLE pg_catalog.pg_ts_template ( oid OID, @@ -1123,7 +1123,7 @@ CREATE TABLE pg_catalog.pg_ts_template ( tmpllexize REGPROC )` -//PgCatalogPublicationRel is an empty table in the pg_catalog that is not implemented yet +// PgCatalogPublicationRel is an empty table in the pg_catalog that is not implemented yet const PgCatalogPublicationRel = ` CREATE TABLE pg_catalog.pg_publication_rel ( oid OID, @@ -1131,7 +1131,7 @@ CREATE TABLE pg_catalog.pg_publication_rel ( prrelid OID )` -//PgCatalogAvailableExtensionVersions is an empty table in the pg_catalog that is not implemented yet +// PgCatalogAvailableExtensionVersions is an empty table in the pg_catalog that is not implemented yet const PgCatalogAvailableExtensionVersions = ` CREATE TABLE pg_catalog.pg_available_extension_versions ( name NAME, @@ -1145,7 +1145,7 @@ CREATE TABLE pg_catalog.pg_available_extension_versions ( comment STRING )` -//PgCatalogStatReplication is an empty table in the pg_catalog that is not implemented yet +// PgCatalogStatReplication is an empty table in the pg_catalog that is not implemented yet const PgCatalogStatReplication = ` CREATE TABLE pg_catalog.pg_stat_replication ( pid INT4, @@ -1170,7 +1170,7 @@ CREATE TABLE pg_catalog.pg_stat_replication ( reply_time TIMESTAMPTZ )` -//PgCatalogOpfamily is an empty table in the pg_catalog that is not implemented yet +// PgCatalogOpfamily is an empty table in the pg_catalog that is not implemented yet const PgCatalogOpfamily = ` CREATE TABLE pg_catalog.pg_opfamily ( oid OID, @@ -1180,7 +1180,7 @@ CREATE TABLE pg_catalog.pg_opfamily ( opfowner OID )` -//PgCatalogStatioAllSequences is an empty table in the pg_catalog that is not implemented yet +// PgCatalogStatioAllSequences is an empty table in the pg_catalog that is not implemented yet const PgCatalogStatioAllSequences = ` CREATE TABLE pg_catalog.pg_statio_all_sequences ( relid OID, @@ -1190,7 +1190,7 @@ CREATE TABLE pg_catalog.pg_statio_all_sequences ( blks_hit INT )` -//PgCatalogInitPrivs is an empty table in the pg_catalog that is not implemented yet +// PgCatalogInitPrivs is an empty table in the pg_catalog that is not implemented yet const PgCatalogInitPrivs = ` CREATE TABLE pg_catalog.pg_init_privs ( objoid OID, @@ -1200,7 +1200,7 @@ CREATE TABLE pg_catalog.pg_init_privs ( initprivs STRING[] )` -//PgCatalogStatProgressCreateIndex is an empty table in the pg_catalog that is not implemented yet +// PgCatalogStatProgressCreateIndex is an empty table in the pg_catalog that is not implemented yet const PgCatalogStatProgressCreateIndex = ` CREATE TABLE pg_catalog.pg_stat_progress_create_index ( pid INT4, @@ -1221,7 +1221,7 @@ CREATE TABLE pg_catalog.pg_stat_progress_create_index ( partitions_done INT )` -//PgCatalogUserMappings is an empty table in the pg_catalog that is not implemented yet +// PgCatalogUserMappings is an empty table in the pg_catalog that is not implemented yet const PgCatalogUserMappings = ` CREATE TABLE pg_catalog.pg_user_mappings ( umid OID, @@ -1232,7 +1232,7 @@ CREATE TABLE pg_catalog.pg_user_mappings ( umoptions STRING[] )` -//PgCatalogStatGssapi is an empty table in the pg_catalog that is not implemented yet +// PgCatalogStatGssapi is an empty table in the pg_catalog that is not implemented yet const PgCatalogStatGssapi = ` CREATE TABLE pg_catalog.pg_stat_gssapi ( pid INT4, @@ -1241,7 +1241,7 @@ CREATE TABLE pg_catalog.pg_stat_gssapi ( encrypted BOOL )` -//PgCatalogPolicies is an empty table in the pg_catalog that is not implemented yet +// PgCatalogPolicies is an empty table in the pg_catalog that is not implemented yet const PgCatalogPolicies = ` CREATE TABLE pg_catalog.pg_policies ( schemaname NAME, @@ -1254,7 +1254,7 @@ CREATE TABLE pg_catalog.pg_policies ( with_check STRING )` -//PgCatalogStatsExt is an empty table in the pg_catalog that is not implemented yet +// PgCatalogStatsExt is an empty table in the pg_catalog that is not implemented yet const PgCatalogStatsExt = ` CREATE TABLE pg_catalog.pg_stats_ext ( schemaname NAME, @@ -1272,7 +1272,7 @@ CREATE TABLE pg_catalog.pg_stats_ext ( most_common_base_freqs FLOAT[] )` -//PgCatalogTimezoneAbbrevs is an empty table in the pg_catalog that is not implemented yet +// PgCatalogTimezoneAbbrevs is an empty table in the pg_catalog that is not implemented yet const PgCatalogTimezoneAbbrevs = ` CREATE TABLE pg_catalog.pg_timezone_abbrevs ( abbrev STRING, @@ -1280,7 +1280,7 @@ CREATE TABLE pg_catalog.pg_timezone_abbrevs ( is_dst BOOL )` -//PgCatalogStatSysTables is an empty table in the pg_catalog that is not implemented yet +// PgCatalogStatSysTables is an empty table in the pg_catalog that is not implemented yet const PgCatalogStatSysTables = ` CREATE TABLE pg_catalog.pg_stat_sys_tables ( relid OID, @@ -1308,7 +1308,7 @@ CREATE TABLE pg_catalog.pg_stat_sys_tables ( autoanalyze_count INT )` -//PgCatalogStatioSysSequences is an empty table in the pg_catalog that is not implemented yet +// PgCatalogStatioSysSequences is an empty table in the pg_catalog that is not implemented yet const PgCatalogStatioSysSequences = ` CREATE TABLE pg_catalog.pg_statio_sys_sequences ( relid OID, @@ -1318,7 +1318,7 @@ CREATE TABLE pg_catalog.pg_statio_sys_sequences ( blks_hit INT )` -//PgCatalogStatDatabase is an empty table in the pg_catalog that is not implemented yet +// PgCatalogStatDatabase is an empty table in the pg_catalog that is not implemented yet const PgCatalogStatDatabase = ` CREATE TABLE pg_catalog.pg_stat_database ( datid OID, @@ -1344,7 +1344,7 @@ CREATE TABLE pg_catalog.pg_stat_database ( stats_reset TIMESTAMPTZ )` -//PgCatalogStatioUserIndexes is an empty table in the pg_catalog that is not implemented yet +// PgCatalogStatioUserIndexes is an empty table in the pg_catalog that is not implemented yet const PgCatalogStatioUserIndexes = ` CREATE TABLE pg_catalog.pg_statio_user_indexes ( relid OID, @@ -1356,7 +1356,7 @@ CREATE TABLE pg_catalog.pg_statio_user_indexes ( idx_blks_hit INT )` -//PgCatalogStatSsl is an empty table in the pg_catalog that is not implemented yet +// PgCatalogStatSsl is an empty table in the pg_catalog that is not implemented yet const PgCatalogStatSsl = ` CREATE TABLE pg_catalog.pg_stat_ssl ( pid INT4, @@ -1370,7 +1370,7 @@ CREATE TABLE pg_catalog.pg_stat_ssl ( issuer_dn STRING )` -//PgCatalogStatioAllIndexes is an empty table in the pg_catalog that is not implemented yet +// PgCatalogStatioAllIndexes is an empty table in the pg_catalog that is not implemented yet const PgCatalogStatioAllIndexes = ` CREATE TABLE pg_catalog.pg_statio_all_indexes ( relid OID, @@ -1382,7 +1382,7 @@ CREATE TABLE pg_catalog.pg_statio_all_indexes ( idx_blks_hit INT )` -//PgCatalogTsConfig is an empty table in the pg_catalog that is not implemented yet +// PgCatalogTsConfig is an empty table in the pg_catalog that is not implemented yet const PgCatalogTsConfig = ` CREATE TABLE pg_catalog.pg_ts_config ( oid OID, @@ -1392,7 +1392,7 @@ CREATE TABLE pg_catalog.pg_ts_config ( cfgparser OID )` -//PgCatalogStats is an empty table in the pg_catalog that is not implemented yet +// PgCatalogStats is an empty table in the pg_catalog that is not implemented yet const PgCatalogStats = ` CREATE TABLE pg_catalog.pg_stats ( schemaname NAME, @@ -1411,7 +1411,7 @@ CREATE TABLE pg_catalog.pg_stats ( elem_count_histogram FLOAT4[] )` -//PgCatalogStatAllTables is an empty table in the pg_catalog that is not implemented yet +// PgCatalogStatAllTables is an empty table in the pg_catalog that is not implemented yet const PgCatalogStatAllTables = ` CREATE TABLE pg_catalog.pg_stat_all_tables ( relid OID, @@ -1439,7 +1439,7 @@ CREATE TABLE pg_catalog.pg_stat_all_tables ( autoanalyze_count INT )` -//PgCatalogStatioSysTables is an empty table in the pg_catalog that is not implemented yet +// PgCatalogStatioSysTables is an empty table in the pg_catalog that is not implemented yet const PgCatalogStatioSysTables = ` CREATE TABLE pg_catalog.pg_statio_sys_tables ( relid OID, @@ -1455,7 +1455,7 @@ CREATE TABLE pg_catalog.pg_statio_sys_tables ( tidx_blks_hit INT )` -//PgCatalogStatXactUserFunctions is an empty table in the pg_catalog that is not implemented yet +// PgCatalogStatXactUserFunctions is an empty table in the pg_catalog that is not implemented yet const PgCatalogStatXactUserFunctions = ` CREATE TABLE pg_catalog.pg_stat_xact_user_functions ( funcid OID, @@ -1466,7 +1466,7 @@ CREATE TABLE pg_catalog.pg_stat_xact_user_functions ( self_time FLOAT )` -//PgCatalogStatUserFunctions is an empty table in the pg_catalog that is not implemented yet +// PgCatalogStatUserFunctions is an empty table in the pg_catalog that is not implemented yet const PgCatalogStatUserFunctions = ` CREATE TABLE pg_catalog.pg_stat_user_functions ( funcid OID, @@ -1477,7 +1477,7 @@ CREATE TABLE pg_catalog.pg_stat_user_functions ( self_time FLOAT )` -//PgCatalogStatProgressBasebackup is an empty table in the pg_catalog that is not implemented yet +// PgCatalogStatProgressBasebackup is an empty table in the pg_catalog that is not implemented yet const PgCatalogStatProgressBasebackup = ` CREATE TABLE pg_catalog.pg_stat_progress_basebackup ( pid INT4, @@ -1488,7 +1488,7 @@ CREATE TABLE pg_catalog.pg_stat_progress_basebackup ( tablespaces_streamed INT )` -//PgCatalogPolicy is an empty table in the pg_catalog that is not implemented yet +// PgCatalogPolicy is an empty table in the pg_catalog that is not implemented yet const PgCatalogPolicy = ` CREATE TABLE pg_catalog.pg_policy ( oid OID, @@ -1501,7 +1501,7 @@ CREATE TABLE pg_catalog.pg_policy ( polwithcheck STRING )` -//PgCatalogStatArchiver is an empty table in the pg_catalog that is not implemented yet +// PgCatalogStatArchiver is an empty table in the pg_catalog that is not implemented yet const PgCatalogStatArchiver = ` CREATE TABLE pg_catalog.pg_stat_archiver ( archived_count INT, @@ -1513,7 +1513,7 @@ CREATE TABLE pg_catalog.pg_stat_archiver ( stats_reset TIMESTAMPTZ )` -//PgCatalogStatXactAllTables is an empty table in the pg_catalog that is not implemented yet +// PgCatalogStatXactAllTables is an empty table in the pg_catalog that is not implemented yet const PgCatalogStatXactAllTables = ` CREATE TABLE pg_catalog.pg_stat_xact_all_tables ( relid OID, @@ -1529,7 +1529,7 @@ CREATE TABLE pg_catalog.pg_stat_xact_all_tables ( n_tup_hot_upd INT )` -//PgCatalogHbaFileRules is an empty table in the pg_catalog that is not implemented yet +// PgCatalogHbaFileRules is an empty table in the pg_catalog that is not implemented yet const PgCatalogHbaFileRules = ` CREATE TABLE pg_catalog.pg_hba_file_rules ( line_number INT4, @@ -1543,7 +1543,7 @@ CREATE TABLE pg_catalog.pg_hba_file_rules ( error STRING )` -//PgCatalogPublication is an empty table in the pg_catalog that is not implemented yet +// PgCatalogPublication is an empty table in the pg_catalog that is not implemented yet const PgCatalogPublication = ` CREATE TABLE pg_catalog.pg_publication ( oid OID, @@ -1557,7 +1557,7 @@ CREATE TABLE pg_catalog.pg_publication ( pubviaroot BOOL )` -//PgCatalogAmproc is an empty table in the pg_catalog that is not implemented yet +// PgCatalogAmproc is an empty table in the pg_catalog that is not implemented yet const PgCatalogAmproc = ` CREATE TABLE pg_catalog.pg_amproc ( oid OID, @@ -1568,7 +1568,7 @@ CREATE TABLE pg_catalog.pg_amproc ( amproc REGPROC )` -//PgCatalogStatProgressAnalyze is an empty table in the pg_catalog that is not implemented yet +// PgCatalogStatProgressAnalyze is an empty table in the pg_catalog that is not implemented yet const PgCatalogStatProgressAnalyze = ` CREATE TABLE pg_catalog.pg_stat_progress_analyze ( pid INT4, @@ -1585,7 +1585,7 @@ CREATE TABLE pg_catalog.pg_stat_progress_analyze ( current_child_table_relid OID )` -//PgCatalogStatSlru is an empty table in the pg_catalog that is not implemented yet +// PgCatalogStatSlru is an empty table in the pg_catalog that is not implemented yet const PgCatalogStatSlru = ` CREATE TABLE pg_catalog.pg_stat_slru ( name STRING, @@ -1599,7 +1599,7 @@ CREATE TABLE pg_catalog.pg_stat_slru ( stats_reset TIMESTAMPTZ )` -//PgCatalogFileSettings is an empty table in the pg_catalog that is not implemented yet +// PgCatalogFileSettings is an empty table in the pg_catalog that is not implemented yet const PgCatalogFileSettings = ` CREATE TABLE pg_catalog.pg_file_settings ( sourcefile STRING, @@ -1611,7 +1611,7 @@ CREATE TABLE pg_catalog.pg_file_settings ( error STRING )` -//PgCatalogCursors is an empty table in the pg_catalog that is not implemented yet +// PgCatalogCursors is an empty table in the pg_catalog that is not implemented yet const PgCatalogCursors = ` CREATE TABLE pg_catalog.pg_cursors ( name STRING, @@ -1622,7 +1622,7 @@ CREATE TABLE pg_catalog.pg_cursors ( creation_time TIMESTAMPTZ )` -//PgCatalogRules is an empty table in the pg_catalog that is not implemented yet +// PgCatalogRules is an empty table in the pg_catalog that is not implemented yet const PgCatalogRules = ` CREATE TABLE pg_catalog.pg_rules ( schemaname NAME, @@ -1631,7 +1631,7 @@ CREATE TABLE pg_catalog.pg_rules ( definition STRING )` -//PgCatalogStatioUserSequences is an empty table in the pg_catalog that is not implemented yet +// PgCatalogStatioUserSequences is an empty table in the pg_catalog that is not implemented yet const PgCatalogStatioUserSequences = ` CREATE TABLE pg_catalog.pg_statio_user_sequences ( relid OID, @@ -1641,7 +1641,7 @@ CREATE TABLE pg_catalog.pg_statio_user_sequences ( blks_hit INT )` -//PgCatalogStatUserIndexes is an empty table in the pg_catalog that is not implemented yet +// PgCatalogStatUserIndexes is an empty table in the pg_catalog that is not implemented yet const PgCatalogStatUserIndexes = ` CREATE TABLE pg_catalog.pg_stat_user_indexes ( relid OID, @@ -1654,7 +1654,7 @@ CREATE TABLE pg_catalog.pg_stat_user_indexes ( idx_tup_fetch INT )` -//PgCatalogStatXactUserTables is an empty table in the pg_catalog that is not implemented yet +// PgCatalogStatXactUserTables is an empty table in the pg_catalog that is not implemented yet const PgCatalogStatXactUserTables = ` CREATE TABLE pg_catalog.pg_stat_xact_user_tables ( relid OID, @@ -1670,7 +1670,7 @@ CREATE TABLE pg_catalog.pg_stat_xact_user_tables ( n_tup_hot_upd INT )` -//PgCatalogPublicationTables is an empty table in the pg_catalog that is not implemented yet +// PgCatalogPublicationTables is an empty table in the pg_catalog that is not implemented yet const PgCatalogPublicationTables = ` CREATE TABLE pg_catalog.pg_publication_tables ( pubname NAME, @@ -1678,7 +1678,7 @@ CREATE TABLE pg_catalog.pg_publication_tables ( tablename NAME )` -//PgCatalogStatProgressCluster is an empty table in the pg_catalog that is not implemented yet +// PgCatalogStatProgressCluster is an empty table in the pg_catalog that is not implemented yet const PgCatalogStatProgressCluster = ` CREATE TABLE pg_catalog.pg_stat_progress_cluster ( pid INT4, @@ -1695,7 +1695,7 @@ CREATE TABLE pg_catalog.pg_stat_progress_cluster ( index_rebuild_count INT )` -//PgCatalogGroup is an empty table in the pg_catalog that is not implemented yet +// PgCatalogGroup is an empty table in the pg_catalog that is not implemented yet const PgCatalogGroup = ` CREATE TABLE pg_catalog.pg_group ( groname NAME, @@ -1703,7 +1703,7 @@ CREATE TABLE pg_catalog.pg_group ( grolist OID[] )` -//PgCatalogStatAllIndexes is an empty table in the pg_catalog that is not implemented yet +// PgCatalogStatAllIndexes is an empty table in the pg_catalog that is not implemented yet const PgCatalogStatAllIndexes = ` CREATE TABLE pg_catalog.pg_stat_all_indexes ( relid OID, @@ -1716,7 +1716,7 @@ CREATE TABLE pg_catalog.pg_stat_all_indexes ( idx_tup_fetch INT )` -//PgCatalogTsConfigMap is an empty table in the pg_catalog that is not implemented yet +// PgCatalogTsConfigMap is an empty table in the pg_catalog that is not implemented yet const PgCatalogTsConfigMap = ` CREATE TABLE pg_catalog.pg_ts_config_map ( mapcfg OID, @@ -1725,7 +1725,7 @@ CREATE TABLE pg_catalog.pg_ts_config_map ( mapdict OID )` -//PgCatalogStatBgwriter is an empty table in the pg_catalog that is not implemented yet +// PgCatalogStatBgwriter is an empty table in the pg_catalog that is not implemented yet const PgCatalogStatBgwriter = ` CREATE TABLE pg_catalog.pg_stat_bgwriter ( checkpoints_timed INT, @@ -1741,7 +1741,7 @@ CREATE TABLE pg_catalog.pg_stat_bgwriter ( stats_reset TIMESTAMPTZ )` -//PgCatalogTransform is an empty table in the pg_catalog that is not implemented yet +// PgCatalogTransform is an empty table in the pg_catalog that is not implemented yet const PgCatalogTransform = ` CREATE TABLE pg_catalog.pg_transform ( oid OID, @@ -1751,7 +1751,7 @@ CREATE TABLE pg_catalog.pg_transform ( trftosql REGPROC )` -//PgCatalogTsParser is an empty table in the pg_catalog that is not implemented yet +// PgCatalogTsParser is an empty table in the pg_catalog that is not implemented yet const PgCatalogTsParser = ` CREATE TABLE pg_catalog.pg_ts_parser ( oid OID, @@ -1764,7 +1764,7 @@ CREATE TABLE pg_catalog.pg_ts_parser ( prslextype REGPROC )` -//PgCatalogStatisticExtData is an empty table in the pg_catalog that is not implemented yet +// PgCatalogStatisticExtData is an empty table in the pg_catalog that is not implemented yet const PgCatalogStatisticExtData = ` CREATE TABLE pg_catalog.pg_statistic_ext_data ( stxoid OID, @@ -1773,7 +1773,7 @@ CREATE TABLE pg_catalog.pg_statistic_ext_data ( stxdmcv BYTES )` -//PgCatalogLargeobjectMetadata is an empty table in the pg_catalog that is not implemented yet +// PgCatalogLargeobjectMetadata is an empty table in the pg_catalog that is not implemented yet const PgCatalogLargeobjectMetadata = ` CREATE TABLE pg_catalog.pg_largeobject_metadata ( oid OID, @@ -1781,7 +1781,7 @@ CREATE TABLE pg_catalog.pg_largeobject_metadata ( lomacl STRING[] )` -//PgCatalogReplicationSlots is an empty table in the pg_catalog that is not implemented yet +// PgCatalogReplicationSlots is an empty table in the pg_catalog that is not implemented yet const PgCatalogReplicationSlots = ` CREATE TABLE pg_catalog.pg_replication_slots ( slot_name NAME, @@ -1800,7 +1800,7 @@ CREATE TABLE pg_catalog.pg_replication_slots ( safe_wal_size INT )` -//PgCatalogSubscriptionRel is an empty table in the pg_catalog that is not implemented yet +// PgCatalogSubscriptionRel is an empty table in the pg_catalog that is not implemented yet const PgCatalogSubscriptionRel = ` CREATE TABLE pg_catalog.pg_subscription_rel ( srsubid OID, @@ -1809,7 +1809,7 @@ CREATE TABLE pg_catalog.pg_subscription_rel ( srsublsn STRING )` -//PgCatalogStatioUserTables is an empty table in the pg_catalog that is not implemented yet +// PgCatalogStatioUserTables is an empty table in the pg_catalog that is not implemented yet const PgCatalogStatioUserTables = ` CREATE TABLE pg_catalog.pg_statio_user_tables ( relid OID, @@ -1825,7 +1825,7 @@ CREATE TABLE pg_catalog.pg_statio_user_tables ( tidx_blks_hit INT )` -//PgCatalogTimezoneNames is an empty table in the pg_catalog that is not implemented yet +// PgCatalogTimezoneNames is an empty table in the pg_catalog that is not implemented yet const PgCatalogTimezoneNames = ` CREATE TABLE pg_catalog.pg_timezone_names ( name STRING, @@ -1834,7 +1834,7 @@ CREATE TABLE pg_catalog.pg_timezone_names ( is_dst BOOL )` -//PgCatalogPartitionedTable is an empty table in the pg_catalog that is not implemented yet +// PgCatalogPartitionedTable is an empty table in the pg_catalog that is not implemented yet const PgCatalogPartitionedTable = ` CREATE TABLE pg_catalog.pg_partitioned_table ( partrelid OID, @@ -1847,7 +1847,7 @@ CREATE TABLE pg_catalog.pg_partitioned_table ( partexprs STRING )` -//PgCatalogStatioSysIndexes is an empty table in the pg_catalog that is not implemented yet +// PgCatalogStatioSysIndexes is an empty table in the pg_catalog that is not implemented yet const PgCatalogStatioSysIndexes = ` CREATE TABLE pg_catalog.pg_statio_sys_indexes ( relid OID, @@ -1859,14 +1859,14 @@ CREATE TABLE pg_catalog.pg_statio_sys_indexes ( idx_blks_hit INT )` -//PgCatalogConfig is an empty table in the pg_catalog that is not implemented yet +// PgCatalogConfig is an empty table in the pg_catalog that is not implemented yet const PgCatalogConfig = ` CREATE TABLE pg_catalog.pg_config ( name STRING, setting STRING )` -//PgCatalogStatUserTables is an empty table in the pg_catalog that is not implemented yet +// PgCatalogStatUserTables is an empty table in the pg_catalog that is not implemented yet const PgCatalogStatUserTables = ` CREATE TABLE pg_catalog.pg_stat_user_tables ( relid OID, @@ -1894,7 +1894,7 @@ CREATE TABLE pg_catalog.pg_stat_user_tables ( autoanalyze_count INT )` -//PgCatalogSubscription is an empty table in the pg_catalog that is not implemented yet +// PgCatalogSubscription is an empty table in the pg_catalog that is not implemented yet const PgCatalogSubscription = ` CREATE TABLE pg_catalog.pg_subscription ( oid OID, @@ -1908,7 +1908,7 @@ CREATE TABLE pg_catalog.pg_subscription ( subpublications STRING[] )` -//PgCatalogTsDict is an empty table in the pg_catalog that is not implemented yet +// PgCatalogTsDict is an empty table in the pg_catalog that is not implemented yet const PgCatalogTsDict = ` CREATE TABLE pg_catalog.pg_ts_dict ( oid OID, @@ -1919,7 +1919,7 @@ CREATE TABLE pg_catalog.pg_ts_dict ( dictinitoption STRING )` -//PgCatalogLargeobject is an empty table in the pg_catalog that is not implemented yet +// PgCatalogLargeobject is an empty table in the pg_catalog that is not implemented yet const PgCatalogLargeobject = ` CREATE TABLE pg_catalog.pg_largeobject ( loid OID, @@ -1927,7 +1927,7 @@ CREATE TABLE pg_catalog.pg_largeobject ( data BYTES )` -//PgCatalogReplicationOriginStatus is an empty table in the pg_catalog that is not implemented yet +// PgCatalogReplicationOriginStatus is an empty table in the pg_catalog that is not implemented yet const PgCatalogReplicationOriginStatus = ` CREATE TABLE pg_catalog.pg_replication_origin_status ( local_id OID, @@ -1936,7 +1936,7 @@ CREATE TABLE pg_catalog.pg_replication_origin_status ( local_lsn STRING )` -//PgCatalogShmemAllocations is an empty table in the pg_catalog that is not implemented yet +// PgCatalogShmemAllocations is an empty table in the pg_catalog that is not implemented yet const PgCatalogShmemAllocations = ` CREATE TABLE pg_catalog.pg_shmem_allocations ( name STRING, @@ -1945,7 +1945,7 @@ CREATE TABLE pg_catalog.pg_shmem_allocations ( allocated_size INT )` -//PgCatalogStatWalReceiver is an empty table in the pg_catalog that is not implemented yet +// PgCatalogStatWalReceiver is an empty table in the pg_catalog that is not implemented yet const PgCatalogStatWalReceiver = ` CREATE TABLE pg_catalog.pg_stat_wal_receiver ( pid INT4, @@ -1965,7 +1965,7 @@ CREATE TABLE pg_catalog.pg_stat_wal_receiver ( conninfo STRING )` -//PgCatalogStatSubscription is an empty table in the pg_catalog that is not implemented yet +// PgCatalogStatSubscription is an empty table in the pg_catalog that is not implemented yet const PgCatalogStatSubscription = ` CREATE TABLE pg_catalog.pg_stat_subscription ( subid OID, diff --git a/pkg/sql/window.go b/pkg/sql/window.go index 4730f12ac3b5..f3bb2f389476 100644 --- a/pkg/sql/window.go +++ b/pkg/sql/window.go @@ -22,17 +22,18 @@ import ( // // windowRender will contain renders that will output the desired result // columns (so len(windowRender) == len(columns)). -// 1. If ith render from the source node does not have any window functions, -// then that column will be simply passed through and windowRender[i] is -// nil. Notably, windowNode will rearrange renders in the source node so -// that all such passed through columns are contiguous and in the beginning. -// (This happens during extractWindowFunctions call.) -// 2. If ith render from the source node has any window functions, then the -// render is stored in windowRender[i]. During -// constructWindowFunctionsDefinitions all variables used in OVER clauses -// of all window functions are being rendered, and during -// setupWindowFunctions all arguments to all window functions are being -// rendered (renders are reused if possible). +// 1. If ith render from the source node does not have any window functions, +// then that column will be simply passed through and windowRender[i] is +// nil. Notably, windowNode will rearrange renders in the source node so +// that all such passed through columns are contiguous and in the beginning. +// (This happens during extractWindowFunctions call.) +// 2. If ith render from the source node has any window functions, then the +// render is stored in windowRender[i]. During +// constructWindowFunctionsDefinitions all variables used in OVER clauses +// of all window functions are being rendered, and during +// setupWindowFunctions all arguments to all window functions are being +// rendered (renders are reused if possible). +// // Therefore, the schema of the source node will be changed to look as follows: // pass through column | OVER clauses columns | arguments to window functions. type windowNode struct { diff --git a/pkg/startupmigrations/doc.go b/pkg/startupmigrations/doc.go index cde09544f68b..4c562daa5960 100644 --- a/pkg/startupmigrations/doc.go +++ b/pkg/startupmigrations/doc.go @@ -16,8 +16,7 @@ // associated with bootstrap state are said to be "baked in" to a certain // version and thus can be deleted in the subsequent major release. // -// -// Differences from migration package +// # Differences from migration package // // This package overlaps in functionality with the migration subsystem. The // major differences are that the "long running" migrations in pkg/migration diff --git a/pkg/storage/batch.go b/pkg/storage/batch.go index 2b6cd5b082c8..9e0b1d86a3c9 100644 --- a/pkg/storage/batch.go +++ b/pkg/storage/batch.go @@ -78,26 +78,29 @@ func decodePebbleBatchHeader(repr []byte) (count int, orepr pebble.BatchReader, // // Example: // r, err := NewPebbleBatchReader(...) -// if err != nil { -// return err -// } -// for r.Next() { -// switch r.BatchType() { -// case BatchTypeDeletion: -// fmt.Printf("delete(%x)", r.Key()) -// case BatchTypeValue: -// fmt.Printf("put(%x,%x)", r.Key(), r.Value()) -// case BatchTypeMerge: -// fmt.Printf("merge(%x,%x)", r.Key(), r.Value()) -// case BatchTypeSingleDeletion: -// fmt.Printf("single_delete(%x)", r.Key()) -// case BatchTypeRangeDeletion: -// fmt.Printf("delete_range(%x,%x)", r.Key(), r.Value()) -// } -// } -// if err := r.Error(); err != nil { -// return err -// } +// +// if err != nil { +// return err +// } +// +// for r.Next() { +// switch r.BatchType() { +// case BatchTypeDeletion: +// fmt.Printf("delete(%x)", r.Key()) +// case BatchTypeValue: +// fmt.Printf("put(%x,%x)", r.Key(), r.Value()) +// case BatchTypeMerge: +// fmt.Printf("merge(%x,%x)", r.Key(), r.Value()) +// case BatchTypeSingleDeletion: +// fmt.Printf("single_delete(%x)", r.Key()) +// case BatchTypeRangeDeletion: +// fmt.Printf("delete_range(%x,%x)", r.Key(), r.Value()) +// } +// } +// +// if err := r.Error(); err != nil { +// return err +// } type PebbleBatchReader struct { batchReader pebble.BatchReader diff --git a/pkg/storage/doc.go b/pkg/storage/doc.go index e7947d5d3358..4987c03cf91d 100644 --- a/pkg/storage/doc.go +++ b/pkg/storage/doc.go @@ -24,7 +24,7 @@ engine. MVCC is the basis for Cockroach's support for distributed transactions. It is intended for direct use from storage.Range objects. -Notes on MVCC architecture +# Notes on MVCC architecture Each MVCC value contains a metadata key/value pair and one or more version key/value pairs. The MVCC metadata key is the actual key for @@ -60,13 +60,13 @@ version. It also allows us to leverage RocksDB's bloom filters. The following is an example of the sort order for MVCC key/value pairs: - ... - keyA: MVCCMetadata of keyA - keyA_Timestamp_n: value of version_n - keyA_Timestamp_n-1: value of version_n-1 - ... - keyA_Timestamp_0: value of version_0 - keyB: MVCCMetadata of keyB + ... + keyA: MVCCMetadata of keyA + keyA_Timestamp_n: value of version_n + keyA_Timestamp_n-1: value of version_n-1 + ... + keyA_Timestamp_0: value of version_0 + keyB: MVCCMetadata of keyB The binary encoding used on the MVCC keys allows arbitrary keys to be stored in the map (no restrictions on intermediate nil-bytes, for @@ -100,7 +100,7 @@ on a Put operation and are cleared from the engine on a delete. Importantly, zero-timestamped MVCC values may be merged, as is necessary for stats counters and time series data. -Versioning +# Versioning CockroachDB uses cluster versions to accommodate backwards-incompatible behavior. Cluster versions are described and documented in diff --git a/pkg/storage/engine.go b/pkg/storage/engine.go index abaf268a099d..4996ef5286b2 100644 --- a/pkg/storage/engine.go +++ b/pkg/storage/engine.go @@ -59,11 +59,11 @@ func init() { // // Consider the following point keys and range keys: // -// 4: a4 b4 -// 3: [-------) -// 2: [-------) -// 1: b1 c1 -// a b c +// 4: a4 b4 +// 3: [-------) +// 2: [-------) +// 1: b1 c1 +// a b c // // Range keys cover a span between two roachpb.Key bounds (start inclusive, end // exclusive) and contain timestamp/value pairs. They overlap *all* point key @@ -109,9 +109,9 @@ func init() { // between two keys form a stack of range key fragments at different timestamps. // For example, writing [a-e)@1 and [c-g)@2 will yield this fragment structure: // -// 2: |---|---| -// 1: |---|---| -// a c e g +// 2: |---|---| +// 1: |---|---| +// a c e g // // Fragmentation makes all range key properties local, which avoids incurring // unnecessary access costs across SSTs and CRDB ranges. It is deterministic @@ -497,12 +497,13 @@ const ( // Reader is the read interface to an engine's data. Certain implementations // of Reader guarantee consistency of the underlying engine state across the // different iterators created by NewMVCCIterator, NewEngineIterator: -// - pebbleSnapshot, because it uses an engine snapshot. -// - pebbleReadOnly, pebbleBatch: when the IterOptions do not specify a -// timestamp hint (see IterOptions). Note that currently the engine state -// visible here is not as of the time of the Reader creation. It is the time -// when the first iterator is created, or earlier if -// PinEngineStateForIterators is called. +// - pebbleSnapshot, because it uses an engine snapshot. +// - pebbleReadOnly, pebbleBatch: when the IterOptions do not specify a +// timestamp hint (see IterOptions). Note that currently the engine state +// visible here is not as of the time of the Reader creation. It is the time +// when the first iterator is created, or earlier if +// PinEngineStateForIterators is called. +// // The ConsistentIterators method returns true when this consistency is // guaranteed by the Reader. // TODO(sumeer): this partial consistency can be a source of bugs if future diff --git a/pkg/storage/intent_interleaving_iter.go b/pkg/storage/intent_interleaving_iter.go index 6bdf59db6380..8723258ab556 100644 --- a/pkg/storage/intent_interleaving_iter.go +++ b/pkg/storage/intent_interleaving_iter.go @@ -37,25 +37,27 @@ const ( // intentInterleavingIter makes separated intents appear as interleaved. It // relies on the following assumptions: -// - There can also be intents that are physically interleaved. -// However, for a particular roachpb.Key there will be at most one intent, -// either interleaved or separated. -// - An intent will have a corresponding provisional value. -// - The only single key locks in the lock table key space are intents. +// - There can also be intents that are physically interleaved. +// However, for a particular roachpb.Key there will be at most one intent, +// either interleaved or separated. +// - An intent will have a corresponding provisional value. +// - The only single key locks in the lock table key space are intents. // // Semantically, the functionality is equivalent to merging two MVCCIterators: -// - A MVCCIterator on the MVCC key space. -// - A MVCCIterator constructed by wrapping an EngineIterator on the lock table -// key space where the EngineKey is transformed into the corresponding -// intent key and appears as MVCCKey{Key: intentKey}. +// - A MVCCIterator on the MVCC key space. +// - A MVCCIterator constructed by wrapping an EngineIterator on the lock table +// key space where the EngineKey is transformed into the corresponding +// intent key and appears as MVCCKey{Key: intentKey}. +// // The implementation below is specialized to reduce unnecessary comparisons // and iteration, by utilizing the aforementioned assumptions. The intentIter // iterates over the lock table key space and iter over the MVCC key space. // They are kept synchronized in the following way (for forward iteration): -// - At the same MVCCKey.Key: the intentIter is at the intent and iter at the -// provisional value. -// - At different MVCCKey.Keys: the intentIter is ahead of iter, at the first -// key after iter's MVCCKey.Key that has a separated intent. +// - At the same MVCCKey.Key: the intentIter is at the intent and iter at the +// provisional value. +// - At different MVCCKey.Keys: the intentIter is ahead of iter, at the first +// key after iter's MVCCKey.Key that has a separated intent. +// // Note that in both cases the iterators are apart by the minimal possible // distance. This minimal distance rule applies for reverse iteration too, and // can be used to construct similar invariants. diff --git a/pkg/storage/intent_interleaving_iter_test.go b/pkg/storage/intent_interleaving_iter_test.go index 877a3260b923..0929d58010a2 100644 --- a/pkg/storage/intent_interleaving_iter_test.go +++ b/pkg/storage/intent_interleaving_iter_test.go @@ -183,25 +183,25 @@ func checkAndOutputIter(iter MVCCIterator, b *strings.Builder) { } // TestIntentInterleavingIter is a datadriven test consisting of two commands: -// - define: defines key-value pairs in the lock table and MVCC key spaces. -// Intents can be in both key spaces, and inline meta and MVCC values in -// the latter. -// meta k= ts= txn= defines an intent -// meta k= defines an inline meta -// value k= ts= v= defines an MVCC value -// It is acceptable to define intents without provisional values to test -// out error checking code paths. -// - iter: for iterating, is defined as -// iter [lower=] [upper=] [prefix=] -// followed by newline separated sequence of operations: +// - define: defines key-value pairs in the lock table and MVCC key spaces. +// Intents can be in both key spaces, and inline meta and MVCC values in +// the latter. +// meta k= ts= txn= defines an intent +// meta k= defines an inline meta +// value k= ts= v= defines an MVCC value +// It is acceptable to define intents without provisional values to test +// out error checking code paths. +// - iter: for iterating, is defined as +// iter [lower=] [upper=] [prefix=] +// followed by newline separated sequence of operations: // next, prev, seek-lt, seek-ge, next-key, stats // // Keys are interpreted as: -// - starting with L is interpreted as a local-range key. -// - starting with S is interpreted as a store local key. -// - starting with Y is interpreted as a local key starting immediately after -// the lock table key space. This is for testing edge cases wrt bounds. -// - a single Z is interpreted as LocalMax +// - starting with L is interpreted as a local-range key. +// - starting with S is interpreted as a store local key. +// - starting with Y is interpreted as a local key starting immediately after +// the lock table key space. This is for testing edge cases wrt bounds. +// - a single Z is interpreted as LocalMax // // Note: This test still manually writes interleaved intents. Even though // we've removed codepaths to write interleaved intents, intentInterleavingIter diff --git a/pkg/storage/metamorphic/meta_test.go b/pkg/storage/metamorphic/meta_test.go index 063896b4e37d..fd89ec2444a4 100644 --- a/pkg/storage/metamorphic/meta_test.go +++ b/pkg/storage/metamorphic/meta_test.go @@ -327,10 +327,10 @@ func TestPebbleCheck(t *testing.T) { // output.meta to `--check` and the diverging run's output.meta to // `--compare-files`: // -// ./dev test -v ./pkg/storage/metamorphic -f TestCompareFiles --ignore-cache \ -// --test-args '--in-mem' \ -// --test-args '--check=/Users/craig/archive/output.meta' \ -// --test-args '--compare-files=/Users/craig/archive/random8.meta' +// ./dev test -v ./pkg/storage/metamorphic -f TestCompareFiles --ignore-cache \ +// --test-args '--in-mem' \ +// --test-args '--check=/Users/craig/archive/output.meta' \ +// --test-args '--compare-files=/Users/craig/archive/random8.meta' // // The above example supplies `--in-mem`. This may be useful to produce quick // reproductions, but if you want to dig through the data directory, omit it. diff --git a/pkg/storage/metamorphic/operations.go b/pkg/storage/metamorphic/operations.go index 6d56e5c2c7e4..0105dd19ee2e 100644 --- a/pkg/storage/metamorphic/operations.go +++ b/pkg/storage/metamorphic/operations.go @@ -803,10 +803,10 @@ func (r restartOp) run(ctx context.Context) string { // List of operation generators, where each operation is defined as one instance of opGenerator. // // TODO(itsbilal): Add more missing MVCC operations, such as: -// - MVCCBlindPut -// - MVCCMerge -// - MVCCIncrement -// - and any others that would be important to test. +// - MVCCBlindPut +// - MVCCMerge +// - MVCCIncrement +// - and any others that would be important to test. var opGenerators = []opGenerator{ { name: "mvcc_inconsistent_get", diff --git a/pkg/storage/mvcc.go b/pkg/storage/mvcc.go index 5167b764b2fc..4f3df0db77df 100644 --- a/pkg/storage/mvcc.go +++ b/pkg/storage/mvcc.go @@ -1087,17 +1087,17 @@ func MVCCGetAsTxn( // A prefix scan using the iterator is performed, resulting in one of the // following successful outcomes: // -// 1) iterator finds nothing; returns (false, 0, 0, nil). -// 2) iterator finds an explicit meta key; unmarshals and returns its size. -// ok is set to true. -// 3) iterator finds a value, i.e. the meta key is implicit. -// In this case, it accounts for the size of the key with the portion -// of the user key found which is not the MVCC timestamp suffix (since -// that is the usual contribution of the meta key). The value size returned -// will be zero, as there is no stored MVCCMetadata. -// ok is set to true. -// 4) iterator finds an MVCC range tombstone above a value. In this case, -// metadata for a synthetic point tombstone is returned. +// 1. iterator finds nothing; returns (false, 0, 0, nil). +// 2. iterator finds an explicit meta key; unmarshals and returns its size. +// ok is set to true. +// 3. iterator finds a value, i.e. the meta key is implicit. +// In this case, it accounts for the size of the key with the portion +// of the user key found which is not the MVCC timestamp suffix (since +// that is the usual contribution of the meta key). The value size returned +// will be zero, as there is no stored MVCCMetadata. +// ok is set to true. +// 4. iterator finds an MVCC range tombstone above a value. In this case, +// metadata for a synthetic point tombstone is returned. // // The timestamp where the real point key last changed is also returned, if a // real point key was found. This may differ from the metadata timestamp when a @@ -2878,7 +2878,8 @@ func MVCCDeleteRange( // t3 // t2 a2 b2 d2 e2 // t1 b1 c1 -// a b c d e +// +// a b c d e func MVCCPredicateDeleteRange( ctx context.Context, rw ReadWriter, @@ -4082,12 +4083,12 @@ func mvccGetIntent( // Note that a transaction can "partially abort" and still commit due to nested // SAVEPOINTs, such as in the below example: // -// BEGIN; -// SAVEPOINT foo; -// INSERT INTO kv VALUES(1, 1); -// ROLLBACK TO SAVEPOINT foo; -// INSERT INTO kv VALUES(1, 2); -// COMMIT; +// BEGIN; +// SAVEPOINT foo; +// INSERT INTO kv VALUES(1, 1); +// ROLLBACK TO SAVEPOINT foo; +// INSERT INTO kv VALUES(1, 2); +// COMMIT; // // This would first remove the intent (1,1) during the ROLLBACK using a Del (the // anomaly below would occur the same if a SingleDel were used here), and thus @@ -4103,12 +4104,12 @@ func mvccGetIntent( // However, this sequence could compact as follows (at the time of writing, bound // to change with #69891): // -// - Set (Del Set') SingleDel -// ↓ -// - Set Set' SingleDel -// - Set (Set' SingleDel) -// ↓ -// - Set +// - Set (Del Set') SingleDel +// ↓ +// - Set Set' SingleDel +// - Set (Set' SingleDel) +// ↓ +// - Set // // which means that a previously deleted intent metadata would erroneously // become visible again. So on top of restricting SingleDel to the COMMIT case, @@ -6154,7 +6155,9 @@ type MVCCIsSpanEmptyOptions struct { // with the (unsafe) range key stack: // // -1: range key to the left not touching the peek key, or no range key found. -// 0: range key to the left ends at the peek key. +// +// 0: range key to the left ends at the peek key. +// // +1: range key to the left overlaps with the peek key, extending to the right. func PeekRangeKeysLeft(iter MVCCIterator, peekKey roachpb.Key) (int, MVCCRangeKeyStack, error) { iter.SeekLT(MVCCKey{Key: peekKey}) @@ -6174,7 +6177,9 @@ func PeekRangeKeysLeft(iter MVCCIterator, peekKey roachpb.Key) (int, MVCCRangeKe // with the (unsafe) range key stack: // // -1: range key to the right overlaps with the peek key, existing to the left. -// 0: range key to the right starts at the peek key. +// +// 0: range key to the right starts at the peek key. +// // +1: range key to the right not touching the peek key, or no range key found. func PeekRangeKeysRight(iter MVCCIterator, peekKey roachpb.Key) (int, MVCCRangeKeyStack, error) { iter.SeekGE(MVCCKey{Key: peekKey}) diff --git a/pkg/storage/mvcc_history_test.go b/pkg/storage/mvcc_history_test.go index a7127f66349e..f9cb6cfdd008 100644 --- a/pkg/storage/mvcc_history_test.go +++ b/pkg/storage/mvcc_history_test.go @@ -134,16 +134,18 @@ var ( // // Additionally, the pseudo-command `with` enables sharing // a group of arguments between multiple commands, for example: -// with t=A -// txn_begin -// with k=a -// put v=b -// resolve_intent +// +// with t=A +// txn_begin +// with k=a +// put v=b +// resolve_intent +// // Really means: -// txn_begin t=A -// put v=b k=a t=A -// resolve_intent k=a t=A // +// txn_begin t=A +// put v=b k=a t=A +// resolve_intent k=a t=A func TestMVCCHistories(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) diff --git a/pkg/storage/mvcc_incremental_iterator.go b/pkg/storage/mvcc_incremental_iterator.go index 0976261edbfc..849d9707cc54 100644 --- a/pkg/storage/mvcc_incremental_iterator.go +++ b/pkg/storage/mvcc_incremental_iterator.go @@ -45,17 +45,18 @@ var mvccIncrementalIteratorMetamorphicTBI = util.ConstantWithMetamorphicTestBool // CockroachDB uses that as a sentinel for key metadata anyway. // // Expected usage: -// iter := NewMVCCIncrementalIterator(e, IterOptions{ -// StartTime: startTime, -// EndTime: endTime, -// UpperBound: endKey, -// }) -// defer iter.Close() -// for iter.SeekGE(startKey); ; iter.Next() { -// ok, err := iter.Valid() -// if !ok { ... } -// [code using iter.Key() and iter.Value()] -// } +// +// iter := NewMVCCIncrementalIterator(e, IterOptions{ +// StartTime: startTime, +// EndTime: endTime, +// UpperBound: endKey, +// }) +// defer iter.Close() +// for iter.SeekGE(startKey); ; iter.Next() { +// ok, err := iter.Valid() +// if !ok { ... } +// [code using iter.Key() and iter.Value()] +// } // // Note regarding the correctness of the time-bound iterator optimization: // @@ -703,8 +704,8 @@ func (i *MVCCIncrementalIterator) updateIgnoreTime() { // // * HasPointAndRange() will return false,false if on a bare range key. // -// * RangeKeyChanged() will not fire, unless stepping off of a range key -// within the time bounds. +// - RangeKeyChanged() will not fire, unless stepping off of a range key +// within the time bounds. // // * RangeBounds() and RangeKeys() will return empty results. func (i *MVCCIncrementalIterator) NextIgnoringTime() { diff --git a/pkg/storage/mvcc_stats_test.go b/pkg/storage/mvcc_stats_test.go index 57b2e945302c..fafb064e831c 100644 --- a/pkg/storage/mvcc_stats_test.go +++ b/pkg/storage/mvcc_stats_test.go @@ -1041,14 +1041,15 @@ func TestMVCCStatsDelDelGC(t *testing.T) { // because when computing the stats updates, there was an implicit assumption // that the meta entries would always move forward in time. // UPDATE: since there should be no way for a txn to write older intents, -// mvccPutInternal now makes sure that writes are always done at the most -// recent intent timestamp within the same txn. Note that this case occurs -// when the txn timestamp is moved forward due to a write too old condition, -// which writes the first intent at a higher timestamp. We don't allow the -// second intent to then be written at a lower timestamp, because that breaks -// the contract that the intent is always the newest version. -// This test now merely verifies that even when we try to write an older -// version, we're upgraded to write the MVCCMetadata.Timestamp. +// +// mvccPutInternal now makes sure that writes are always done at the most +// recent intent timestamp within the same txn. Note that this case occurs +// when the txn timestamp is moved forward due to a write too old condition, +// which writes the first intent at a higher timestamp. We don't allow the +// second intent to then be written at a lower timestamp, because that breaks +// the contract that the intent is always the newest version. +// This test now merely verifies that even when we try to write an older +// version, we're upgraded to write the MVCCMetadata.Timestamp. func TestMVCCStatsPutIntentTimestampNotPutTimestamp(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) diff --git a/pkg/storage/mvcc_test.go b/pkg/storage/mvcc_test.go index ca6e9a17bec4..c27481c6e8da 100644 --- a/pkg/storage/mvcc_test.go +++ b/pkg/storage/mvcc_test.go @@ -2664,7 +2664,9 @@ func TestMVCCReverseScanSeeksOverRepeatedKeys(t *testing.T) { // The bug happened in this scenario. // (1) reverse scan is positioned at the range's smallest key and calls `prevKey()` // (2) `prevKey()` peeks and sees newer versions of the same logical key -// `iters_before_seek_-1` times, moving the iterator backwards each time +// +// `iters_before_seek_-1` times, moving the iterator backwards each time +// // (3) on the `iters_before_seek_`th peek, there are no previous keys found // // Then, the problem was `prevKey()` treated finding no previous key as if it had found a diff --git a/pkg/storage/mvcc_value.go b/pkg/storage/mvcc_value.go index a80691d2c291..82a847a40a0f 100644 --- a/pkg/storage/mvcc_value.go +++ b/pkg/storage/mvcc_value.go @@ -47,11 +47,11 @@ const ( // // Simple (identical to the roachpb.Value encoding): // -// <4-byte-checksum><1-byte-tag> +// <4-byte-checksum><1-byte-tag> // // Extended (header prepended to roachpb.Value encoding): // -// <4-byte-header-len><1-byte-sentinel><4-byte-checksum><1-byte-tag> +// <4-byte-header-len><1-byte-sentinel><4-byte-checksum><1-byte-tag> // // The two encoding scheme variants are distinguished using the 5th byte, which // is either the roachpb.Value tag (which has many values) or a sentinel tag not @@ -62,17 +62,16 @@ const ( // be empty, i.e., no checksum, tag, or encoded-data. In that case the extended // encoding above is simply: // -// <4-byte-header-len><1-byte-sentinel> +// <4-byte-header-len><1-byte-sentinel> // // To identify a deletion tombstone from an encoded MVCCValue, callers should // decode the value using DecodeMVCCValue and then use the IsTombstone method. // For example: // -// valRaw := iter.UnsafeValue() -// val, err := DecodeMVCCValue(valRaw) -// if err != nil { ... } -// isTombstone := val.IsTombstone() -// +// valRaw := iter.UnsafeValue() +// val, err := DecodeMVCCValue(valRaw) +// if err != nil { ... } +// isTombstone := val.IsTombstone() type MVCCValue struct { enginepb.MVCCValueHeader Value roachpb.Value @@ -179,6 +178,7 @@ var emptyValueHeader = func() enginepb.MVCCValueHeader { }() // encodedMVCCValueSize returns the size of the MVCCValue when encoded. +// //gcassert:inline func encodedMVCCValueSize(v MVCCValue) int { if v.MVCCValueHeader == emptyValueHeader { @@ -189,6 +189,7 @@ func encodedMVCCValueSize(v MVCCValue) int { // EncodeMVCCValue encodes an MVCCValue into its Pebble representation. See the // comment on MVCCValue for a description of the encoding scheme. +// //gcassert:inline func EncodeMVCCValue(v MVCCValue) ([]byte, error) { if v.MVCCValueHeader == emptyValueHeader { @@ -251,6 +252,7 @@ var errMVCCValueMissingHeader = errors.Errorf("invalid encoded mvcc value, missi // simple encoding. If successful, returns the decoded value and true. If the // value was using the extended encoding, returns false, in which case the // caller should call decodeExtendedMVCCValue. +// //gcassert:inline func tryDecodeSimpleMVCCValue(buf []byte) (MVCCValue, bool, error) { if len(buf) == 0 { diff --git a/pkg/storage/point_synthesizing_iter.go b/pkg/storage/point_synthesizing_iter.go index e1adf7bafc9c..868bb196a2a0 100644 --- a/pkg/storage/point_synthesizing_iter.go +++ b/pkg/storage/point_synthesizing_iter.go @@ -45,13 +45,13 @@ var pointSynthesizingIterPool = sync.Pool{ // The relative positioning of pointSynthesizingIter and the underlying iterator // is as follows in the forward direction: // -// - atPoint=true: rangeKeysIdx points to a range key following the point key, -// or beyond rangeKeysEnd when there are no further range keys at this -// key position. +// - atPoint=true: rangeKeysIdx points to a range key following the point key, +// or beyond rangeKeysEnd when there are no further range keys at this +// key position. // -// - atPoint=false: the underlying iterator is on a following key or exhausted. -// This can either be a different version of the current key or a different -// point/range key. +// - atPoint=false: the underlying iterator is on a following key or exhausted. +// This can either be a different version of the current key or a different +// point/range key. // // This positioning is mirrored in the reverse direction. For example, when // atPoint=true and rangeKeys are exhausted, rangeKeysIdx will be rangeKeysEnd diff --git a/pkg/testutils/docker/BUILD.bazel b/pkg/testutils/docker/BUILD.bazel index ec23c56c9623..43755bec58a7 100644 --- a/pkg/testutils/docker/BUILD.bazel +++ b/pkg/testutils/docker/BUILD.bazel @@ -5,11 +5,13 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_test( name = "docker_test", srcs = ["single_node_docker_test.go"], + args = ["-test.timeout=295s"], data = glob(["testdata/**"]) + [ "//pkg/testutils/docker:testdata", "//pkg/testutils/docker/docker-fsnotify:docker-fsnotify", ], gotags = ["docker"], + tags = ["integration"], deps = [ "//pkg/util/contextutil", "//pkg/util/log", diff --git a/pkg/testutils/docker/single_node_docker_test.go b/pkg/testutils/docker/single_node_docker_test.go index 1bf212259a86..c76bbd4832d2 100644 --- a/pkg/testutils/docker/single_node_docker_test.go +++ b/pkg/testutils/docker/single_node_docker_test.go @@ -548,7 +548,7 @@ func (dn *dockerNode) execSQLQuery( return res, nil } -//rmContainer performs a forced deletion of the current container. +// rmContainer performs a forced deletion of the current container. func (dn *dockerNode) rmContainer(ctx context.Context) error { if err := dn.cl.ContainerRemove(ctx, dn.contID, types.ContainerRemoveOptions{ Force: true, diff --git a/pkg/testutils/files.go b/pkg/testutils/files.go index 252175e6b837..adc08d8cf42a 100644 --- a/pkg/testutils/files.go +++ b/pkg/testutils/files.go @@ -13,7 +13,6 @@ package testutils import ( "bufio" "io" - "io/ioutil" "os" "path/filepath" ) @@ -30,7 +29,7 @@ func ReadAllFiles(pattern string) { if err != nil { continue } - _, _ = io.Copy(ioutil.Discard, bufio.NewReader(f)) + _, _ = io.Copy(io.Discard, bufio.NewReader(f)) f.Close() } } diff --git a/pkg/testutils/floatcmp/floatcmp.go b/pkg/testutils/floatcmp/floatcmp.go index 3b50c4615dad..75f6d1f2649e 100644 --- a/pkg/testutils/floatcmp/floatcmp.go +++ b/pkg/testutils/floatcmp/floatcmp.go @@ -52,15 +52,15 @@ const ( // fraction is used to calculate the tolerance as a relative fraction of the // smaller of expected and actual: // -// tolerance_frac = (fraction * min(|expected|, |actual|)) +// tolerance_frac = (fraction * min(|expected|, |actual|)) // // margin specifies the tolerance as an absolute value: // -// tolerance_marg = margin +// tolerance_marg = margin // // The tolerance used to determine approximate equality is: // -// tolerance = max(tolerance_frac, tolerance_marg) +// tolerance = max(tolerance_frac, tolerance_marg) // // To use only one of fraction or margin, set the other to 0. // @@ -68,7 +68,7 @@ const ( // should be set to the smallest relative fraction to tolerate. The margin // should be set to a much smaller value so that it is only used when: // -// (fraction * min(|expected|, |actual|)) < margin +// (fraction * min(|expected|, |actual|)) < margin // // which allows expected and actual to be approximately equal within margin when // either is 0. diff --git a/pkg/testutils/lint/BUILD.bazel b/pkg/testutils/lint/BUILD.bazel index 466c87f00e26..c1839e659efe 100644 --- a/pkg/testutils/lint/BUILD.bazel +++ b/pkg/testutils/lint/BUILD.bazel @@ -20,6 +20,7 @@ go_test( "lint_test.go", "nightly_lint_test.go", ], + args = ["-test.timeout=295s"], data = glob(["testdata/**"]), embed = [":lint"], gotags = ["lint"], diff --git a/pkg/testutils/lint/lint_test.go b/pkg/testutils/lint/lint_test.go index 6f29b5d24d5f..f13838028284 100644 --- a/pkg/testutils/lint/lint_test.go +++ b/pkg/testutils/lint/lint_test.go @@ -1772,6 +1772,13 @@ func TestLint(t *testing.T) { // goschedstats contains partial copies of go runtime structures, with // many fields that we're not using. stream.GrepNot(`pkg/util/goschedstats/runtime.*\.go:.*is unused`), + // Ignore ioutil.ReadDir uses that I couldn't get rid of easily. + stream.GrepNot(`pkg/roachprod/.*\.go:.*"io/ioutil" has been deprecated since Go 1\.16: As of Go 1\.16`), + stream.GrepNot(`pkg/security/securityassets/security_assets\.go.*"io/ioutil" has been deprecated since Go 1\.16`), + stream.GrepNot(`pkg/security/securitytest/embedded\.go:.*"io/ioutil" has been deprecated since Go 1\.16`), + stream.GrepNot(`pkg/server/dumpstore.*\.go.*"io/ioutil" has been deprecated since Go 1\.16: As of Go 1\.16`), + stream.GrepNot(`pkg/server/heapprofiler/profilestore_test\.go.*"io/ioutil" has been deprecated since Go 1\.16`), + stream.GrepNot(`pkg/util/log/file_api\.go.*"io/ioutil" has been deprecated since Go 1\.16`), ), func(s string) { t.Errorf("\n%s", s) }); err != nil { @@ -2016,6 +2023,7 @@ func TestLint(t *testing.T) { }) t.Run("TestGCAssert", func(t *testing.T) { + skip.WithIssue(t, 86714) skip.UnderShort(t) skip.UnderBazelWithIssue(t, 65485, "Doesn't work in Bazel -- not really sure why yet") diff --git a/pkg/testutils/lint/passes/hash/hash.go b/pkg/testutils/lint/passes/hash/hash.go index e5309ebea311..c7c3c8a8e569 100644 --- a/pkg/testutils/lint/passes/hash/hash.go +++ b/pkg/testutils/lint/passes/hash/hash.go @@ -34,7 +34,7 @@ var Analyzer = &analysis.Analyzer{ // mistake is to assume that the Sum function returns the hash of its input, // like so: // -// hashedBytes := sha256.New().Sum(inputBytes) +// hashedBytes := sha256.New().Sum(inputBytes) // // In fact, the parameter to Sum is not the bytes to be hashed, but a slice that // will be used as output in case the caller wants to avoid an allocation. In @@ -43,14 +43,14 @@ var Analyzer = &analysis.Analyzer{ // // Correct uses of the hash.Hash interface are as follows: // -// h := sha256.New() -// h.Write(inputBytes) -// hashedBytes := h.Sum(nil) +// h := sha256.New() +// h.Write(inputBytes) +// hashedBytes := h.Sum(nil) // -// h := sha256.New() -// h.Write(inputBytes) -// var hashedBytes [sha256.Size]byte -// h.Sum(hashedBytes[:0]) +// h := sha256.New() +// h.Write(inputBytes) +// var hashedBytes [sha256.Size]byte +// h.Sum(hashedBytes[:0]) // // To differentiate between correct and incorrect usages, hashChecker applies a // simple heuristic: it flags calls to Sum where a) the parameter is non-nil and diff --git a/pkg/testutils/lint/passes/loopvarcapture/loopvarcapture.go b/pkg/testutils/lint/passes/loopvarcapture/loopvarcapture.go index 402c8c3fcbf0..4200abce9848 100644 --- a/pkg/testutils/lint/passes/loopvarcapture/loopvarcapture.go +++ b/pkg/testutils/lint/passes/loopvarcapture/loopvarcapture.go @@ -159,20 +159,22 @@ func (v *Visitor) FindCaptures() []analysis.Diagnostic { // reported by this linter: // // 1: -// for k, v := range myMap { -// // same for `defer`, errgroup.Group.Go(), etc -// go func() { -// fmt.Printf("k = %v, v = %v\n", k, v) -// }() -// } +// +// for k, v := range myMap { +// // same for `defer`, errgroup.Group.Go(), etc +// go func() { +// fmt.Printf("k = %v, v = %v\n", k, v) +// }() +// } // // 2: -// for k, v := range myMap { -// // same for `defer`, errgroup.Group.Go(), etc -// go doWork(func() { -// doMoreWork(k, v) -// }) -// } +// +// for k, v := range myMap { +// // same for `defer`, errgroup.Group.Go(), etc +// go doWork(func() { +// doMoreWork(k, v) +// }) +// } // // If a `go` routine (or `defer`) calls a previously-defined closure // that captures a loop variable, that is also reported. diff --git a/pkg/testutils/lint/passes/nocopy/nocopy.go b/pkg/testutils/lint/passes/nocopy/nocopy.go index eb1ff772bac5..7206d966644c 100644 --- a/pkg/testutils/lint/passes/nocopy/nocopy.go +++ b/pkg/testutils/lint/passes/nocopy/nocopy.go @@ -36,10 +36,10 @@ const noCopyType = "github.com/cockroachdb/cockroach/pkg/util.NoCopy" // ensures that the type is always embedded without a name as the first field in // a parent struct like: // -// type s struct { -// _ util.NoCopy -// ... -// } +// type s struct { +// _ util.NoCopy +// ... +// } // // We lint against including the type in other positions in structs both for // uniformity and because it can have runtime performance effects. Specifically, diff --git a/pkg/testutils/lint/passes/passesutil/passes_util.go b/pkg/testutils/lint/passes/passesutil/passes_util.go index e652fa6059ae..5b2a54d2753f 100644 --- a/pkg/testutils/lint/passes/passesutil/passes_util.go +++ b/pkg/testutils/lint/passes/passesutil/passes_util.go @@ -79,16 +79,15 @@ func HasNolintComment(pass *analysis.Pass, n ast.Node, nolintName string) bool { // For example, imagine that n is the *ast.CallExpr on the method foo in the // following snippet: // -// func nonsense() bool { -// if v := (g.foo() + 1) > 2; !v { -// return true -// } -// return false -// } +// func nonsense() bool { +// if v := (g.foo() + 1) > 2; !v { +// return true +// } +// return false +// } // // This function would return all of the nodes up to the `IfStmt` as relevant // and would return the `BlockStmt` of the function nonsense as containing. -// func findNodesInBlock(f *ast.File, n ast.Node) (relevant []ast.Node, containing ast.Node) { stack, _ := astutil.PathEnclosingInterval(f, n.Pos(), n.End()) // Add all of the children of n to the set of relevant nodes. diff --git a/pkg/testutils/lint/passes/timer/timer.go b/pkg/testutils/lint/passes/timer/timer.go index b924368190e8..f1513a1112ad 100644 --- a/pkg/testutils/lint/passes/timer/timer.go +++ b/pkg/testutils/lint/passes/timer/timer.go @@ -43,15 +43,14 @@ var Analyzer = &analysis.Analyzer{ // statement. The timers are usually used as timeouts on these select // statements, and need to be reset after each iteration. // -// for { -// timer.Reset(...) -// select { -// case <-timer.C: -// timer.Read = true <-- lint verifies that this line is present -// case ...: -// } -// } -// +// for { +// timer.Reset(...) +// select { +// case <-timer.C: +// timer.Read = true <-- lint verifies that this line is present +// case ...: +// } +// } func run(pass *analysis.Pass) (interface{}, error) { selectorIsTimer := func(s *ast.SelectorExpr) bool { tv, ok := pass.TypesInfo.Types[s.X] diff --git a/pkg/testutils/localtestcluster/local_test_cluster.go b/pkg/testutils/localtestcluster/local_test_cluster.go index abdb29b26b08..3a64df49b7e0 100644 --- a/pkg/testutils/localtestcluster/local_test_cluster.go +++ b/pkg/testutils/localtestcluster/local_test_cluster.go @@ -48,10 +48,10 @@ import ( // cockroach node with a single store using a local sender. Example // usage of a LocalTestCluster follows: // -// s := &LocalTestCluster{} -// s.Start(t, testutils.NewNodeTestBaseContext(), -// kv.InitFactoryForLocalTestCluster) -// defer s.Stop() +// s := &LocalTestCluster{} +// s.Start(t, testutils.NewNodeTestBaseContext(), +// kv.InitFactoryForLocalTestCluster) +// defer s.Stop() // // Note that the LocalTestCluster is different from server.TestCluster // in that although it uses a distributed sender, there is no RPC traffic. diff --git a/pkg/testutils/pprof.go b/pkg/testutils/pprof.go index b2dc4586e5fc..36201f5e81cd 100644 --- a/pkg/testutils/pprof.go +++ b/pkg/testutils/pprof.go @@ -36,13 +36,15 @@ func WriteProfile(t testing.TB, name string, path string) { // -memprofile does not. // // Example usage: -// setupCode() -// AllocProfileDiff(t, "mem.before", "mem.after", func() { -// interestingCode() -// }) +// +// setupCode() +// AllocProfileDiff(t, "mem.before", "mem.after", func() { +// interestingCode() +// }) // // The resulting profiles are then diffed via: -// go tool pprof -base mem.before mem.after +// +// go tool pprof -base mem.before mem.after func AllocProfileDiff(t testing.TB, beforePath, afterPath string, fn func()) { // Use "allocs" instead of "heap" to match what -memprofile does. Also run // runtime.GC immediately before grabbing the profile because the allocs diff --git a/pkg/testutils/sqlutils/pg_url.go b/pkg/testutils/sqlutils/pg_url.go index ca899a6c9760..a5c319173752 100644 --- a/pkg/testutils/sqlutils/pg_url.go +++ b/pkg/testutils/sqlutils/pg_url.go @@ -39,7 +39,8 @@ func PGUrl(t testing.TB, servingAddr, prefix string, user *url.Userinfo) (url.UR // copies of the certificates, so the cleanup function must always be called. // // Args: -// prefix: A prefix to be prepended to the temp file names generated, for debugging. +// +// prefix: A prefix to be prepended to the temp file names generated, for debugging. func PGUrlE(servingAddr, prefix string, user *url.Userinfo) (url.URL, func(), error) { return PGUrlWithOptionalClientCertsE(servingAddr, prefix, user, true /* withCerts */) } diff --git a/pkg/testutils/sqlutils/table_gen.go b/pkg/testutils/sqlutils/table_gen.go index 7fe710536cb2..4f3cd67e5cff 100644 --- a/pkg/testutils/sqlutils/table_gen.go +++ b/pkg/testutils/sqlutils/table_gen.go @@ -110,7 +110,8 @@ func RowModuloFn(modulo int) GenValueFn { // IntToEnglish returns an English (pilot style) string for the given integer, // for example: -// IntToEnglish(135) = "one-three-five" +// +// IntToEnglish(135) = "one-three-five" func IntToEnglish(val int) string { if val < 0 { panic(val) diff --git a/pkg/ts/doc.go b/pkg/ts/doc.go index 5b8dd3592bb7..23f3ccb4ca0e 100644 --- a/pkg/ts/doc.go +++ b/pkg/ts/doc.go @@ -27,8 +27,7 @@ good step towards that goal. This package provides a specialized time series database, relatively narrow in scope, that can store this data with good performance characteristics. - -Organization Structure +# Organization Structure Time series data is organized on disk according to two basic, sortable properties: + Time series name (i.e "sql.operations.selects") @@ -38,8 +37,7 @@ This is optimized for querying data for a single series over multiple timestamps: data for the same series at different timestamps is stored contiguously. - -Downsampling +# Downsampling The amount of data produced by time series sampling can be considerable; storing every incoming data point with perfect fidelity can command a tremendous amount @@ -63,8 +61,7 @@ server side before returning the data. One restriction is that a query cannot request a downsampling period which is shorter than the smallest on-disk resolution (e.g. one data point per second). - -Slab Storage +# Slab Storage In order to use key space efficiently, we pack data for multiple contiguous samples into "slab" values, with data for each slab stored in a CockroachDB key. @@ -75,8 +72,7 @@ a "slab duration" of 1 hour, meaning that all samples that fall in the same hour are stored at the same key. This strategy helps reduce the number of keys scanned during a query. - -Source Keys +# Source Keys Another common use case of time series queries is the aggregation of multiple series; for example, you may want to query the same metric (e.g. "queries per @@ -93,8 +89,7 @@ this means that data that is from the same series and time period, but from different nodes, will be stored contiguously in the key space. Data from all sources in a series can thus be queried in a single scan. - -Multiple resolutions +# Multiple resolutions In order to save space on disk, the database stores older data for time series at lower resolution, more commonly known as a "rollup". @@ -110,8 +105,7 @@ count and variance of the original 10 second points used to create the 30 minute point; this means that any downsampler that could have been used on the original data is still accessible in the rolled-up data. - -Example +# Example A hypothetical example from CockroachDB: we want to record the available capacity of all stores in the cluster. diff --git a/pkg/ts/server.go b/pkg/ts/server.go index fa54007bb455..ec2c434c5a2a 100644 --- a/pkg/ts/server.go +++ b/pkg/ts/server.go @@ -67,13 +67,13 @@ type ServerConfig struct { // The server attempts to constrain the total amount of memory it uses for // processing incoming queries. This is accomplished with a multi-pronged // strategy: -// + The server has a worker memory limit, which is a quota for the amount of -// memory that can be used across all currently executing queries. -// + The server also has a pre-set limit on the number of parallel workers that -// can be executing at one time. Each worker is given an even share of the -// server's total memory limit, which it should not exceed. -// + Each worker breaks its task into chunks which it will process sequentially; -// the size of each chunk is calculated to avoid exceeding the memory limit. +// - The server has a worker memory limit, which is a quota for the amount of +// memory that can be used across all currently executing queries. +// - The server also has a pre-set limit on the number of parallel workers that +// can be executing at one time. Each worker is given an even share of the +// server's total memory limit, which it should not exceed. +// - Each worker breaks its task into chunks which it will process sequentially; +// the size of each chunk is calculated to avoid exceeding the memory limit. // // In addition to this strategy, the server uses a memory monitor to track the // amount of memory being used in reality by worker tasks. This is intended to diff --git a/pkg/ts/testmodel/data.go b/pkg/ts/testmodel/data.go index 0ec1271172b7..ba9eec7c1534 100644 --- a/pkg/ts/testmodel/data.go +++ b/pkg/ts/testmodel/data.go @@ -104,7 +104,9 @@ func (data DataSeries) fillForResolution(resolution int64, fillFunc fillFunc) Da // rateOfChange returns the rate of change (over the supplied period) for each // point in the supplied series, which is defined as: -// (value - valuePrev) / ((time - timePrev) / period) +// +// (value - valuePrev) / ((time - timePrev) / period) +// // The returned series will be shorter than the original series by one, since // the rate of change for the first datapoint cannot be computed in this // fashion. diff --git a/pkg/upgrade/upgrade.go b/pkg/upgrade/upgrade.go index b00f3b5d00ae..52f9fb198a9c 100644 --- a/pkg/upgrade/upgrade.go +++ b/pkg/upgrade/upgrade.go @@ -60,7 +60,6 @@ import ( // [2]: pkg/clusterversion/cockroach_versions.go // [3]: truncatedStateMigration // [4]: pkg/kv/kvserver/batch_eval/cmd_migrate.go -// type Upgrade interface { ClusterVersion() clusterversion.ClusterVersion Name() string diff --git a/pkg/upgrade/upgradecluster/tenant_cluster.go b/pkg/upgrade/upgradecluster/tenant_cluster.go index c99af14e262f..7c40b2dbeab1 100644 --- a/pkg/upgrade/upgradecluster/tenant_cluster.go +++ b/pkg/upgrade/upgradecluster/tenant_cluster.go @@ -24,38 +24,40 @@ import ( // upgrades in the secondary tenants. In the current iteration, it assumes // that there is a single pod per tenant. // -// Tenants and cluster upgrades +// # Tenants and cluster upgrades // // Tenants have their own system tables and settings, which get bootstrapped in // CreateTenant along with an initial cluster version. The interplay between // tenant cluster version and KV cluster version is complicated. First, recall // that there are multiple "versions": // -// - the cluster version is the version at which the cluster operates. The -// cluster version of the system tenant is *the* cluster version of the KV -// layer. This is a single value (stored in the KV store) but while it -// changes, some nodes will be using the old value, and some the new one. KV -// does a lot of work to be able to tell when *all* nodes have adopted the -// new value. Non-system tenants also have a similar cluster value, which is -// stored in a KV pair under the tenant's jurisdiction. Explaining how this -// relates to that of the system tenant is the main aim of this -// documentation. -// - the binary version is the largest cluster version a binary (i.e. cockroach -// executable) can in principle support. For most of the time, the binary -// version equals the cluster version, but during upgrades, nodes will -// temporarily run with a binary version larger than the cluster version. -// - the binary minimum supported version is the smallest cluster version a -// binary can in principle support. It is typically set to the previous major -// release, for binary with a 20.2 binary version has a binary minimum -// supported version of 20.1, meaning that it can participate in a cluster -// running at cluster version 20.1 (which is necessary when a 20.1 cluster is -// upgraded to 20.2). -// -// BinaryMinSupportedVersion BinaryVersion -// | | -// v...........................................v -// (possible range of active -// cluster versions) +// - the cluster version is the version at which the cluster operates. The +// cluster version of the system tenant is *the* cluster version of the KV +// layer. This is a single value (stored in the KV store) but while it +// changes, some nodes will be using the old value, and some the new one. KV +// does a lot of work to be able to tell when *all* nodes have adopted the +// new value. Non-system tenants also have a similar cluster value, which is +// stored in a KV pair under the tenant's jurisdiction. Explaining how this +// relates to that of the system tenant is the main aim of this +// documentation. +// +// - the binary version is the largest cluster version a binary (i.e. cockroach +// executable) can in principle support. For most of the time, the binary +// version equals the cluster version, but during upgrades, nodes will +// temporarily run with a binary version larger than the cluster version. +// +// - the binary minimum supported version is the smallest cluster version a +// binary can in principle support. It is typically set to the previous major +// release, for binary with a 20.2 binary version has a binary minimum +// supported version of 20.1, meaning that it can participate in a cluster +// running at cluster version 20.1 (which is necessary when a 20.1 cluster is +// upgraded to 20.2). +// +// BinaryMinSupportedVersion BinaryVersion +// | | +// v...........................................v +// (possible range of active +// cluster versions) // // Versions are used in many checks to prevent issues due to operator error. The // main one of interest here is that RPC connections between nodes (including @@ -117,7 +119,6 @@ import ( // 4. KV finalizes 21.1. // 5. All tenants have to be at 21.1 cluster version before KV gets upgraded // again in the next release. -// type TenantCluster struct { db *kv.DB } diff --git a/pkg/upgrade/upgrades/system_users_role_id_migration.go b/pkg/upgrade/upgrades/system_users_role_id_migration.go index ed65caabc863..aff11405a04e 100644 --- a/pkg/upgrade/upgrades/system_users_role_id_migration.go +++ b/pkg/upgrade/upgrades/system_users_role_id_migration.go @@ -28,11 +28,12 @@ import ( ) // The migration is broken down into four steps. -// 1. Adding the "user_id" column as NULL. -// All newly created users will have an ID after step 1, this gives us a -// cut off point on which users we need to backfill. -// 2. Manually backfill the id column. -// 3. Set the column to not null. +// 1. Adding the "user_id" column as NULL. +// All newly created users will have an ID after step 1, this gives us a +// cut off point on which users we need to backfill. +// 2. Manually backfill the id column. +// 3. Set the column to not null. +// // We need to do this because we cannot add a column with a nextval call as a // default expression. // It results in: unimplemented: cannot evaluate scalar expressions diff --git a/pkg/util/admission/admission.go b/pkg/util/admission/admission.go index ba1c78318806..831411eac33e 100644 --- a/pkg/util/admission/admission.go +++ b/pkg/util/admission/admission.go @@ -387,7 +387,8 @@ type WorkKind int8 // units that are expected to be primarily CPU bound (with disk IO for KVWork, // but cache hit rates are typically high), and expected to be where most of // the CPU consumption happens. These are prioritized in the order -// KVWork > SQLKVResponseWork > SQLSQLResponseWork +// +// KVWork > SQLKVResponseWork > SQLSQLResponseWork // // The high prioritization of KVWork reduces the likelihood that non-SQL KV // work will be starved. SQLKVResponseWork is prioritized over @@ -399,7 +400,9 @@ type WorkKind int8 // // Furthermore, SQLStatementLeafStartWork and SQLStatementRootStartWork are // prioritized lowest with -// SQLStatementLeafStartWork > SQLStatementRootStartWork +// +// SQLStatementLeafStartWork > SQLStatementRootStartWork +// // This follows the same idea of prioritizing lower layers above higher layers // since it releases memory caught up in lower layers, and exerts natural // backpressure on the higher layer. @@ -428,39 +431,39 @@ type WorkKind int8 // The aforementioned prioritization also enables us to get instantaneous // feedback on CPU resource overload. This instantaneous feedback for a grant // chain (mentioned earlier) happens in two ways: -// - the chain requires the grantee's goroutine to run. -// - the cpuOverloadIndicator (see later), specifically the implementation -// provided by kvSlotAdjuster, provides instantaneous feedback (which is -// viable only because KVWork is the highest priority). +// - the chain requires the grantee's goroutine to run. +// - the cpuOverloadIndicator (see later), specifically the implementation +// provided by kvSlotAdjuster, provides instantaneous feedback (which is +// viable only because KVWork is the highest priority). // // Weaknesses of this strict prioritization across WorkKinds: -// - Priority inversion: Lower importance KVWork, not derived from SQL, like -// GC of MVCC versions, will happen before user-facing SQLKVResponseWork. -// This is because the backpressure, described in the example above, does -// not apply to work generated from within the KV layer. -// TODO(sumeer): introduce a KVLowPriWork and put it last in this ordering, -// to get over this limitation. -// - Insufficient competition leading to poor isolation: Putting -// SQLStatementLeafStartWork, SQLStatementRootStartWork in this list, within -// the same GrantCoordinator, does provide node overload protection, but not -// necessarily performance isolation when we have WorkKinds of different -// importance. Consider the same OLAP example above: if the KVWork slots -// being full due to the OLAP query prevents SQLStatementRootStartWork for -// the OLTP queries, the competition is starved out before it has an -// opportunity to submit any KVWork. Given that control over admitting -// SQLStatement{Leaf,Root}StartWork is not primarily about CPU control (the -// lower-level work items are where cpu is consumed), we could decouple -// these two into a separate GrantCoordinator and only gate them with (high) -// fixed slot counts that allow for enough competition, plus a memory -// overload indicator. -// TODO(sumeer): experiment with this approach. -// - Continuing the previous bullet, low priority long-lived -// {SQLStatementLeafStartWork, SQLStatementRootStartWork} could use up all -// the slots, if there was no high priority work for some period of time, -// and therefore starve admission of the high priority work when it does -// appear. The typical solution to this is to put a max on the number of -// slots low priority can use. This would be viable if we did not allow -// arbitrary int8 values to be set for Priority. +// - Priority inversion: Lower importance KVWork, not derived from SQL, like +// GC of MVCC versions, will happen before user-facing SQLKVResponseWork. +// This is because the backpressure, described in the example above, does +// not apply to work generated from within the KV layer. +// TODO(sumeer): introduce a KVLowPriWork and put it last in this ordering, +// to get over this limitation. +// - Insufficient competition leading to poor isolation: Putting +// SQLStatementLeafStartWork, SQLStatementRootStartWork in this list, within +// the same GrantCoordinator, does provide node overload protection, but not +// necessarily performance isolation when we have WorkKinds of different +// importance. Consider the same OLAP example above: if the KVWork slots +// being full due to the OLAP query prevents SQLStatementRootStartWork for +// the OLTP queries, the competition is starved out before it has an +// opportunity to submit any KVWork. Given that control over admitting +// SQLStatement{Leaf,Root}StartWork is not primarily about CPU control (the +// lower-level work items are where cpu is consumed), we could decouple +// these two into a separate GrantCoordinator and only gate them with (high) +// fixed slot counts that allow for enough competition, plus a memory +// overload indicator. +// TODO(sumeer): experiment with this approach. +// - Continuing the previous bullet, low priority long-lived +// {SQLStatementLeafStartWork, SQLStatementRootStartWork} could use up all +// the slots, if there was no high priority work for some period of time, +// and therefore starve admission of the high priority work when it does +// appear. The typical solution to this is to put a max on the number of +// slots low priority can use. This would be viable if we did not allow +// arbitrary int8 values to be set for Priority. const ( // KVWork represents requests submitted to the KV layer, from the same node // or a different node. They may originate from the SQL layer or the KV diff --git a/pkg/util/admission/io_load_listener.go b/pkg/util/admission/io_load_listener.go index 6d4539447dfc..d8bc33cdecdf 100644 --- a/pkg/util/admission/io_load_listener.go +++ b/pkg/util/admission/io_load_listener.go @@ -83,18 +83,18 @@ var L0SubLevelCountOverloadThreshold = settings.RegisterIntSetting( l0SubLevelCountOverloadThreshold, settings.PositiveInt) // Experimental observations: -// - Sub-level count of ~40 caused a node heartbeat latency p90, p99 of 2.5s, -// 4s. With a setting that limits sub-level count to 10, before the system -// is considered overloaded, and adjustmentInterval = 60, we see the actual -// sub-level count ranging from 5-30, with p90, p99 node heartbeat latency -// showing a similar wide range, with 1s, 2s being the middle of the range -// respectively. -// - With tpcc, we sometimes see a sub-level count > 10 with only 100 files in -// L0. We don't want to restrict tokens in this case since the store is able -// to recover on its own. One possibility would be to require both the -// thresholds to be exceeded before we consider the store overloaded. But -// then we run the risk of having 100+ sub-levels when we hit a file count -// of 1000. Instead we use a sub-level overload threshold of 20. +// - Sub-level count of ~40 caused a node heartbeat latency p90, p99 of 2.5s, +// 4s. With a setting that limits sub-level count to 10, before the system +// is considered overloaded, and adjustmentInterval = 60, we see the actual +// sub-level count ranging from 5-30, with p90, p99 node heartbeat latency +// showing a similar wide range, with 1s, 2s being the middle of the range +// respectively. +// - With tpcc, we sometimes see a sub-level count > 10 with only 100 files in +// L0. We don't want to restrict tokens in this case since the store is able +// to recover on its own. One possibility would be to require both the +// thresholds to be exceeded before we consider the store overloaded. But +// then we run the risk of having 100+ sub-levels when we hit a file count +// of 1000. Instead we use a sub-level overload threshold of 20. // // We've set these overload thresholds in a way that allows the system to // absorb short durations (say a few minutes) of heavy write load. @@ -107,16 +107,15 @@ const l0SubLevelCountOverloadThreshold = 20 // just means that the write has been applied to the WAL. Most of the work is // in flushing to sstables and the following compactions, which happens later. // -// // Token units are in bytes and used to protect a number of virtual or // physical resource bottlenecks: -// - Compactions out of L0: compactions out of L0 can fall behind and cause -// too many sub-levels or files in L0. -// - Flushes into L0: flushes of memtables to L0 can fall behind and cause -// write stalls due to too many memtables. -// - Disk bandwidth: there is typically an aggregate read+write provisioned -// bandwidth, and if it is fully utilized, IO operations can start queueing -// and encounter high latency. +// - Compactions out of L0: compactions out of L0 can fall behind and cause +// too many sub-levels or files in L0. +// - Flushes into L0: flushes of memtables to L0 can fall behind and cause +// write stalls due to too many memtables. +// - Disk bandwidth: there is typically an aggregate read+write provisioned +// bandwidth, and if it is fully utilized, IO operations can start queueing +// and encounter high latency. // // For simplicity, after ioLoadListener computes the tokens due to compaction // or flush bottlenecks, it computes the minimum and passes that value to @@ -139,19 +138,19 @@ const l0SubLevelCountOverloadThreshold = 20 // dimension. However, since modeling as a separate dimension everywhere would // lead to code complexity, we adopt the following compromise: // -// - Like the other token dimensions, ioLoadListener computes a different -// elastic token count (using diskBandwidthLimiter), and a different model -// for consumption (via -// storePerWorkTokenEstimator.atDoneDiskBWTokensLinearModel). +// - Like the other token dimensions, ioLoadListener computes a different +// elastic token count (using diskBandwidthLimiter), and a different model +// for consumption (via +// storePerWorkTokenEstimator.atDoneDiskBWTokensLinearModel). // -// - granterWithIOTokens, implemented by kvStoreTokenGranter, which enforces -// the token count, also treats this as a separate dimension. +// - granterWithIOTokens, implemented by kvStoreTokenGranter, which enforces +// the token count, also treats this as a separate dimension. // -// - WorkQueue works with a single dimension, so the tokens consumed at -// admission time are based on L0-bytes estimate. However, when -// StoreWorkQueue informs kvStoreTokenGranter of work completion (by calling -// storeWriteDone), the tokens are adjusted differently for the -// flush/compaction L0 tokens and for the "disk bandwidth" tokens. +// - WorkQueue works with a single dimension, so the tokens consumed at +// admission time are based on L0-bytes estimate. However, when +// StoreWorkQueue informs kvStoreTokenGranter of work completion (by calling +// storeWriteDone), the tokens are adjusted differently for the +// flush/compaction L0 tokens and for the "disk bandwidth" tokens. type ioLoadListener struct { storeID int32 settings *cluster.Settings @@ -220,11 +219,11 @@ const unlimitedTokens = math.MaxInt64 // compactions can take ~10s to complete. The totalNumByteTokens to give out over // the 15s interval are given out in a smoothed manner, at 250ms intervals. // This has similarities with the following kinds of token buckets: -// - Zero replenishment rate and a burst value that is changed every 15s. We -// explicitly don't want a huge burst every 15s. -// - A replenishment rate equal to totalNumByteTokens/60, with a burst capped at -// totalNumByteTokens/60. The only difference with the code here is that if -// totalNumByteTokens is small, the integer rounding effects are compensated for. +// - Zero replenishment rate and a burst value that is changed every 15s. We +// explicitly don't want a huge burst every 15s. +// - A replenishment rate equal to totalNumByteTokens/60, with a burst capped at +// totalNumByteTokens/60. The only difference with the code here is that if +// totalNumByteTokens is small, the integer rounding effects are compensated for. // // In an experiment with extreme overload using KV0 with block size 64KB, // and 4096 clients, we observed the following states of L0 at 1min @@ -232,13 +231,13 @@ const unlimitedTokens = math.MaxInt64 // admission control: // // __level_____count____size___score______in__ingest(sz_cnt)____move(sz_cnt)___write(sz_cnt)____read___r-amp___w-amp› -// 0 96 158 M 2.09 315 M 0 B 0 0 B 0 305 M 178 0 B 3 1.0› -// 0 1026 1.7 G 3.15 4.7 G 0 B 0 0 B 0 4.7 G 2.8 K 0 B 24 1.0› -// 0 1865 3.0 G 2.86 9.1 G 0 B 0 0 B 0 9.1 G 5.5 K 0 B 38 1.0› -// 0 3225 4.9 G 3.46 13 G 0 B 0 0 B 0 13 G 8.3 K 0 B 59 1.0› -// 0 4720 7.0 G 3.46 17 G 0 B 0 0 B 0 17 G 11 K 0 B 85 1.0› -// 0 6120 9.0 G 4.13 21 G 0 B 0 0 B 0 21 G 14 K 0 B 109 1.0› // +// 0 96 158 M 2.09 315 M 0 B 0 0 B 0 305 M 178 0 B 3 1.0› +// 0 1026 1.7 G 3.15 4.7 G 0 B 0 0 B 0 4.7 G 2.8 K 0 B 24 1.0› +// 0 1865 3.0 G 2.86 9.1 G 0 B 0 0 B 0 9.1 G 5.5 K 0 B 38 1.0› +// 0 3225 4.9 G 3.46 13 G 0 B 0 0 B 0 13 G 8.3 K 0 B 59 1.0› +// 0 4720 7.0 G 3.46 17 G 0 B 0 0 B 0 17 G 11 K 0 B 85 1.0› +// 0 6120 9.0 G 4.13 21 G 0 B 0 0 B 0 21 G 14 K 0 B 109 1.0› // // Note the fast growth in sub-level count. Production issues typically have // slower growth towards an unhealthy state (remember that similar stats in diff --git a/pkg/util/admission/tokens_linear_model.go b/pkg/util/admission/tokens_linear_model.go index 57b499172357..06aea50a0a46 100644 --- a/pkg/util/admission/tokens_linear_model.go +++ b/pkg/util/admission/tokens_linear_model.go @@ -31,7 +31,6 @@ func (m tokensLinearModel) applyLinearModel(b int64) int64 { // However, the simple approach here should be an improvement on the additive // approach we previously used. // -// // TODO(sumeer): improve the model based on realistic combinations of // workloads (e.g. foreground writes + index backfills). type tokensLinearModelFitter struct { @@ -73,21 +72,21 @@ func makeTokensLinearModelFitter( // probably poor, though an improvement on what we had previously. The approach // taken is: // -// - Fit the best model we can for the interval, -// multiplier*accountedBytes + workCount*constant = actualBytes, while -// minimizing the constant. We prefer the model to use the multiplier for -// most of what it needs to account for actualBytes. -// This exact model ignores inaccuracies due to integer arithmetic -- we -// don't care about rounding errors since an error of 2 bytes per request is -// inconsequential. +// - Fit the best model we can for the interval, +// multiplier*accountedBytes + workCount*constant = actualBytes, while +// minimizing the constant. We prefer the model to use the multiplier for +// most of what it needs to account for actualBytes. +// This exact model ignores inaccuracies due to integer arithmetic -- we +// don't care about rounding errors since an error of 2 bytes per request is +// inconsequential. // -// - The multiplier has to conform to the [min,max] configured for this model, -// and constant has to conform to a value >= 1. The constant is constrained -// to be >=1 on the intuition that we want a request to consume at least 1 -// token -- it isn't clear that this intuition is meaningful in any way. +// - The multiplier has to conform to the [min,max] configured for this model, +// and constant has to conform to a value >= 1. The constant is constrained +// to be >=1 on the intuition that we want a request to consume at least 1 +// token -- it isn't clear that this intuition is meaningful in any way. // -// - Exponentially smooth this exact model's multiplier and constant based on -// history. +// - Exponentially smooth this exact model's multiplier and constant based on +// history. func (f *tokensLinearModelFitter) updateModelUsingIntervalStats( accountedBytes int64, actualBytes int64, workCount int64, ) { diff --git a/pkg/util/admission/work_queue.go b/pkg/util/admission/work_queue.go index 60a5e5133be8..dc7ac1df8714 100644 --- a/pkg/util/admission/work_queue.go +++ b/pkg/util/admission/work_queue.go @@ -192,19 +192,20 @@ type WorkInfo struct { // used slots or tokens. // // Usage example: -// var grantCoord *GrantCoordinator -// -// kvQueue := grantCoord.GetWorkQueue(KVWork) -// // -// // Before starting some kv server work -// if enabled, err := kvQueue.Admit(ctx, WorkInfo{TenantID: tid, ...}); err != nil { -// return err -// } -// -// if enabled { -// kvQueue.AdmittedWorkDone(tid) -// } +// var grantCoord *GrantCoordinator +// +// kvQueue := grantCoord.GetWorkQueue(KVWork) +// +// +// // Before starting some kv server work +// if enabled, err := kvQueue.Admit(ctx, WorkInfo{TenantID: tid, ...}); err != nil { +// return err +// } +// +// if enabled { +// kvQueue.AdmittedWorkDone(tid) +// } type WorkQueue struct { ambientCtx context.Context workKind WorkKind @@ -1371,10 +1372,11 @@ func (wwh *waitingWorkHeap) Len() int { return len(*wwh) } // satisfied in the presence of elements that have different values of // arrivalTimeWorkOrdering. This is acceptable for heap maintenance. // Example: Three work items with the same epoch where t1 < t2 < t3 -// w3: (fifo, create: t3, epoch: e) -// w2: (lifo, create: t2, epoch: e) -// w1: (fifo, create: t1, epoch: e) -// w1 < w3, w3 < w2, w2 < w1, which is a cycle. +// +// w3: (fifo, create: t3, epoch: e) +// w2: (lifo, create: t2, epoch: e) +// w1: (fifo, create: t1, epoch: e) +// w1 < w3, w3 < w2, w2 < w1, which is a cycle. func (wwh *waitingWorkHeap) Less(i, j int) bool { if (*wwh)[i].priority == (*wwh)[j].priority { if (*wwh)[i].arrivalTimeWorkOrdering == lifoWorkOrdering || diff --git a/pkg/util/binfetcher/binfetcher.go b/pkg/util/binfetcher/binfetcher.go index 2eab790a15d3..df03bb687e06 100644 --- a/pkg/util/binfetcher/binfetcher.go +++ b/pkg/util/binfetcher/binfetcher.go @@ -146,9 +146,9 @@ var httpClient = httputil.NewClientWithTimeout(300 * time.Second) // // `version` can be: // -// - a SHA from the master branch, e.g. bd828feaa309578142fe7ad2d89ee1b70adbd52d -// - the string "LATEST" for the most recent SHA from the master branch. Note that -// caching is disabled in that case. +// - a SHA from the master branch, e.g. bd828feaa309578142fe7ad2d89ee1b70adbd52d +// - the string "LATEST" for the most recent SHA from the master branch. Note that +// caching is disabled in that case. // // Returns the path to the (executable) binary. func Download(ctx context.Context, opts Options) (string, error) { diff --git a/pkg/util/cgroups/cgroups.go b/pkg/util/cgroups/cgroups.go index 4d1eb47b2a87..cceb9d01a304 100644 --- a/pkg/util/cgroups/cgroups.go +++ b/pkg/util/cgroups/cgroups.go @@ -79,13 +79,15 @@ func GetMemoryInactiveFileUsage() (usage int64, warnings string, err error) { // getCgroupMemInactiveFileUsage reads the memory cgroup's current inactive // file-backed memory usage (in bytes) on the inactive LRU list for both cgroups // v1 and v2. The associated files and keys are: -// cgroupv1: cgroupV1MemInactiveFileUsageStatKey in cgroupV1MemStatFilename -// cgroupv2: cgroupV2MemInactiveFileUsageStatKey in cgroupV2MemStatFilename +// +// cgroupv1: cgroupV1MemInactiveFileUsageStatKey in cgroupV1MemStatFilename +// cgroupv2: cgroupV2MemInactiveFileUsageStatKey in cgroupV2MemStatFilename // // The `root` parameter is set to "/" in production code and exists only for // testing. The cgroup inactive file usage detection path is implemented as: +// // /proc/self/cgroup file -// |-> /proc/self/mountinfo mounts +// |-> /proc/self/mountinfo mounts // |-> cgroup version // |-> version specific usage check func getCgroupMemInactiveFileUsage(root string) (usage int64, warnings string, err error) { @@ -118,13 +120,15 @@ func getCgroupMemInactiveFileUsage(root string) (usage int64, warnings string, e // getCgroupMemUsage reads the memory cgroup's current memory usage (in bytes) // for both cgroups v1 and v2. The associated files are: -// cgroupv1: cgroupV1MemUsageFilename -// cgroupv2: cgroupV2MemUsageFilename +// +// cgroupv1: cgroupV1MemUsageFilename +// cgroupv2: cgroupV2MemUsageFilename // // The `root` parameter is set to "/" in production code and exists only for // testing. The cgroup memory usage detection path is implemented here as: +// // /proc/self/cgroup file -// |-> /proc/self/mountinfo mounts +// |-> /proc/self/mountinfo mounts // |-> cgroup version // |-> version specific usage check func getCgroupMemUsage(root string) (usage int64, warnings string, err error) { @@ -157,13 +161,15 @@ func getCgroupMemUsage(root string) (usage int64, warnings string, err error) { // getCgroupMemLimit reads the memory cgroup's memory limit (in bytes) for both // cgroups v1 and v2. The associated files (and for cgroupv2, keys) are: -// cgroupv1: cgroupV2MemLimitFilename -// cgroupv2: cgroupV1MemLimitStatKey in cgroupV2MemStatFilename +// +// cgroupv1: cgroupV2MemLimitFilename +// cgroupv2: cgroupV1MemLimitStatKey in cgroupV2MemStatFilename // // The `root` parameter is set to "/" in production code and exists only for // testing. The cgroup memory limit detection path is implemented here as: +// // /proc/self/cgroup file -// |-> /proc/self/mountinfo mounts +// |-> /proc/self/mountinfo mounts // |-> cgroup version // |-> version specific limit check func getCgroupMemLimit(root string) (limit int64, warnings string, err error) { diff --git a/pkg/util/cloudinfo/cloudinfo_test.go b/pkg/util/cloudinfo/cloudinfo_test.go index 39c48559b74b..2f3e0f769ef8 100644 --- a/pkg/util/cloudinfo/cloudinfo_test.go +++ b/pkg/util/cloudinfo/cloudinfo_test.go @@ -14,7 +14,7 @@ import ( "bytes" "context" "fmt" - "io/ioutil" + "io" "net/http" "testing" @@ -44,7 +44,7 @@ func NewInstanceMetadataTestClient() *httputil.Client { case awsMetadataEndpoint: // Response taken from the AWS instance identity // document internal endpoint on May 2 2019 - res.Body = ioutil.NopCloser(bytes.NewBufferString(`{ + res.Body = io.NopCloser(bytes.NewBufferString(`{ "devpayProductCodes" : null, "marketplaceProductCodes" : null, "version" : "2017-09-30", @@ -64,19 +64,19 @@ func NewInstanceMetadataTestClient() *httputil.Client { case (gcpMetadataEndpoint + "machine-type"): // response taken from the GCP internal metadata // endpoint on May 2 2019 - res.Body = ioutil.NopCloser(bytes.NewBufferString( + res.Body = io.NopCloser(bytes.NewBufferString( `projects/93358566124/machineTypes/g1-small`, )) case (gcpMetadataEndpoint + "zone"): // response taken from the GCP internal metadata // endpoint on June 3 2019 - res.Body = ioutil.NopCloser(bytes.NewBufferString( + res.Body = io.NopCloser(bytes.NewBufferString( `projects/93358566124/zones/us-east4-c`, )) case azureMetadataEndpoint: // response taken from the Azure internal metadata // endpoint on May 2 2019 - res.Body = ioutil.NopCloser(bytes.NewBufferString( + res.Body = io.NopCloser(bytes.NewBufferString( `{ "compute":{ "azEnvironment":"AzurePublicCloud", @@ -139,7 +139,7 @@ func NewInstanceMetadataTestClient() *httputil.Client { }`, )) default: - res.Body = ioutil.NopCloser(bytes.NewBufferString(``)) + res.Body = io.NopCloser(bytes.NewBufferString(``)) } return res diff --git a/pkg/util/ctxgroup/ctxgroup.go b/pkg/util/ctxgroup/ctxgroup.go index 39530c6bbf95..f83d6aeddcbb 100644 --- a/pkg/util/ctxgroup/ctxgroup.go +++ b/pkg/util/ctxgroup/ctxgroup.go @@ -44,7 +44,7 @@ more difficult. Example usage: } api.Call(ctx, "done") -Problems with errgroup +# Problems with errgroup The bugs this package attempts to prevent are: misuse of shadowed ctx variables after errgroup closure and confusion in the face of @@ -116,7 +116,6 @@ Now the final api.Call is correct. But the other api.Call is incorrect and the ctx.Done receive is incorrect because they are using the wrong context and thus won't correctly exit early if the errgroup needs to exit early. - */ package ctxgroup diff --git a/pkg/util/encoding/decimal.go b/pkg/util/encoding/decimal.go index 8d89acaa9401..8ce518452c0c 100644 --- a/pkg/util/encoding/decimal.go +++ b/pkg/util/encoding/decimal.go @@ -495,9 +495,9 @@ func decodeLargeNumber( // EncodeNonsortingDecimal returns the resulting byte slice with the // encoded decimal appended to b. The encoding is limited compared to // standard encodings in this package in that -// - It will not sort lexicographically -// - It does not encode its length or terminate itself, so decoding -// functions must be provided the exact encoded bytes +// - It will not sort lexicographically +// - It does not encode its length or terminate itself, so decoding +// functions must be provided the exact encoded bytes // // The encoding assumes that any number can be written as ±0.xyz... * 10^exp, // where xyz is a digit string, x != 0, and the last decimal in xyz is also @@ -511,21 +511,22 @@ func decodeLargeNumber( // the digit string is added as a big-endian byte slice. // // All together, the encoding looks like: -// . +// +// . // // The markers are shared with the sorting decimal encoding as follows: -// decimalNaN -> decimalNaN -// decimalNegativeInfinity -> decimalNegativeInfinity -// decimalNegLarge -> decimalNegValPosExp -// decimalNegMedium -> decimalNegValZeroExp -// decimalNegSmall -> decimalNegValNegExp -// decimalZero -> decimalZero -// decimalPosSmall -> decimalPosValNegExp -// decimalPosMedium -> decimalPosValZeroExp -// decimalPosLarge -> decimalPosValPosExp -// decimalInfinity -> decimalInfinity -// decimalNaNDesc -> decimalNaNDesc // +// decimalNaN -> decimalNaN +// decimalNegativeInfinity -> decimalNegativeInfinity +// decimalNegLarge -> decimalNegValPosExp +// decimalNegMedium -> decimalNegValZeroExp +// decimalNegSmall -> decimalNegValNegExp +// decimalZero -> decimalZero +// decimalPosSmall -> decimalPosValNegExp +// decimalPosMedium -> decimalPosValZeroExp +// decimalPosLarge -> decimalPosValPosExp +// decimalInfinity -> decimalInfinity +// decimalNaNDesc -> decimalNaNDesc func EncodeNonsortingDecimal(b []byte, d *apd.Decimal) []byte { neg := d.Negative switch d.Form { diff --git a/pkg/util/encoding/encoding.go b/pkg/util/encoding/encoding.go index f3db0756247e..39a4583f1fd2 100644 --- a/pkg/util/encoding/encoding.go +++ b/pkg/util/encoding/encoding.go @@ -1463,12 +1463,13 @@ func DecodeDurationDescending(b []byte) ([]byte, duration.Duration, error) { // backing word array as a byte array, using byte array encoding and escaped // special bytes (via `encodeBytesAscendingWithoutTerminatorOrPrefix`). // There are two arguments against this alternative: -// - the bytes must be encoded big endian, but the most common architectures -// running CockroachDB are little-endian, so the bytes would need -// to be reordered prior to encoding. -// - when decoding or skipping over a value, the decoding/sizing loop -// would need to look at every byte of the encoding to find the -// terminator. +// - the bytes must be encoded big endian, but the most common architectures +// running CockroachDB are little-endian, so the bytes would need +// to be reordered prior to encoding. +// - when decoding or skipping over a value, the decoding/sizing loop +// would need to look at every byte of the encoding to find the +// terminator. +// // In contrast, the chosen encoding using varints is endianness-agnostic // and enables fast decoding/skipping thanks ot the tag bytes. func EncodeBitArrayAscending(b []byte, d bitarray.BitArray) []byte { @@ -1591,6 +1592,7 @@ func DecodeBitArrayDescending(b []byte) ([]byte, bitarray.BitArray, error) { // Type represents the type of a value encoded by // Encode{Null,NotNull,Varint,Uvarint,Float,Bytes}. +// //go:generate stringer -type=Type type Type encodingtype.T @@ -2140,8 +2142,8 @@ func prettyPrintFirstValue(dir Direction, b []byte) ([]byte, string, error) { // // Formally: // -// PrefixEnd(UndoPrefixEnd(p)) = p for all non-minimal prefixes p -// UndoPrefixEnd(PrefixEnd(p)) = p for all non-maximal prefixes p +// PrefixEnd(UndoPrefixEnd(p)) = p for all non-minimal prefixes p +// UndoPrefixEnd(PrefixEnd(p)) = p for all non-maximal prefixes p // // A minimal prefix is any prefix that consists only of one or more 0x00 bytes; // analogously, a maximal prefix is any prefix that consists only of one or more @@ -2189,10 +2191,10 @@ const MaxNonsortingUvarintLen = 10 // EncodeNonsortingUvarint encodes a uint64, appends it to the supplied buffer, // and returns the final buffer. The encoding used is similar to // encoding/binary, but with the most significant bits first: -// - Unsigned integers are serialized 7 bits at a time, starting with the -// most significant bits. -// - The most significant bit (msb) in each output byte indicates if there -// is a continuation byte (msb = 1). +// - Unsigned integers are serialized 7 bits at a time, starting with the +// most significant bits. +// - The most significant bit (msb) in each output byte indicates if there +// is a continuation byte (msb = 1). func EncodeNonsortingUvarint(appendTo []byte, x uint64) []byte { switch { case x < (1 << 7): @@ -2531,12 +2533,16 @@ func EncodeJSONValue(appendTo []byte, colID uint32, data []byte) []byte { // returned colID should be discarded.) // // Concretely: -// b := ... -// typeOffset, _, colID, typ, err := DecodeValueTag(b) -// _, _, _, typ, err := DecodeValueTag(b[typeOffset:]) +// +// b := ... +// typeOffset, _, colID, typ, err := DecodeValueTag(b) +// _, _, _, typ, err := DecodeValueTag(b[typeOffset:]) +// // will return the same typ and err and -// DecodeFooValue(b) -// DecodeFooValue(b[typeOffset:]) +// +// DecodeFooValue(b) +// DecodeFooValue(b[typeOffset:]) +// // will return the same thing. PeekValueLength works as expected with either of // `b` or `b[typeOffset:]`. func DecodeValueTag(b []byte) (typeOffset int, dataOffset int, colID uint32, typ Type, err error) { diff --git a/pkg/util/errorutil/tenant_deprecated_wrapper.go b/pkg/util/errorutil/tenant_deprecated_wrapper.go index 5d98755aca74..c158b67d3abb 100644 --- a/pkg/util/errorutil/tenant_deprecated_wrapper.go +++ b/pkg/util/errorutil/tenant_deprecated_wrapper.go @@ -19,11 +19,11 @@ package errorutil // and which will be inaccessible from a SQL tenant server. Their uses in SQL // fall into two categories: // -// - functionality essential for multi-tenancy, i.e. a use which will -// have to be removed before we can start SQL tenant servers. -// - non-essential functionality, which will be disabled when run in -// a SQL tenant server. It may or may not be a long-term goal to remove -// this usage; this is determined on a case-by-case basis. +// - functionality essential for multi-tenancy, i.e. a use which will +// have to be removed before we can start SQL tenant servers. +// - non-essential functionality, which will be disabled when run in +// a SQL tenant server. It may or may not be a long-term goal to remove +// this usage; this is determined on a case-by-case basis. // // As work towards multi-tenancy is taking place, semi-dedicated SQL tenant // servers are supported. These are essentially SQL tenant servers that get diff --git a/pkg/util/fast_int_map.go b/pkg/util/fast_int_map.go index 1fa447537a74..924e305b9046 100644 --- a/pkg/util/fast_int_map.go +++ b/pkg/util/fast_int_map.go @@ -200,7 +200,9 @@ func (m FastIntMap) ForEach(fn func(key, val int)) { // ContentsIntoBuffer writes the contents of the map into the provided buffer in // the following format: -// key1:val1 key2:val2 ... +// +// key1:val1 key2:val2 ... +// // The keys are in ascending order. func (m FastIntMap) ContentsIntoBuffer(buf *bytes.Buffer) { first := true @@ -231,7 +233,9 @@ func (m FastIntMap) ContentsIntoBuffer(buf *bytes.Buffer) { } // String prints out the contents of the map in the following format: -// map[key1:val1 key2:val2 ...] +// +// map[key1:val1 key2:val2 ...] +// // The keys are in ascending order. func (m FastIntMap) String() string { var buf bytes.Buffer diff --git a/pkg/util/fsm/doc.go b/pkg/util/fsm/doc.go index bec2eb0edc96..2a3d839fc6e8 100644 --- a/pkg/util/fsm/doc.go +++ b/pkg/util/fsm/doc.go @@ -27,11 +27,11 @@ graph when it is created to specify its State graph. Since the Transition graph is itself state-less, multiple Machines can be powered by the same graph simultaneously. The Machine has an Apply(Event) method, which applies the provided event to its current state. This does two things: -1. It may move the current State to a new State, according to the Transitions - graph. -2. It may apply an Action function on the Machine's ExtendedState, which is - extra state in a Machine that does not contribute to state transition - decisions, but that can be affected by a state transition. + 1. It may move the current State to a new State, according to the Transitions + graph. + 2. It may apply an Action function on the Machine's ExtendedState, which is + extra state in a Machine that does not contribute to state transition + decisions, but that can be affected by a state transition. See example_test.go for a full working example of a state machine with an associated set of states and events. @@ -41,19 +41,19 @@ declaring this literal, be careful to not declare two equal keys: they'll result in the second overwriting the first with no warning because of how Go deals with map literals. Note that keys that are not technically equal, but where one is a superset of the other, will work as intended. E.g. the following is permitted: - Compile(Pattern{ - stateOpen{retryIntent: Any} { - eventTxnFinish{}: {...} - } - stateOpen{retryIntent: True} { - eventRetriableErr{}: {...} - } + + Compile(Pattern{ + stateOpen{retryIntent: Any} { + eventTxnFinish{}: {...} + } + stateOpen{retryIntent: True} { + eventRetriableErr{}: {...} + } Members of this package are accessed frequently when implementing a state machine. For that reason, it is encouraged to dot-import this package in the file with the transitions Pattern. The respective file should be kept small and named _fsm.go; our linter doesn't complain about dot-imports in such files. - */ package fsm diff --git a/pkg/util/fsm/match.go b/pkg/util/fsm/match.go index 33c21cbbbb8d..307c75daaffe 100644 --- a/pkg/util/fsm/match.go +++ b/pkg/util/fsm/match.go @@ -57,32 +57,31 @@ type Pattern map[State]map[Event]Transition // expandPattern expands the States and Events in a Pattern to produce a new // Pattern with no wildcards or variable bindings. For example: // -// Pattern{ -// state3{Any}: { -// event1{}: {state2{}, ...}, -// }, -// state1{}: { -// event4{Any, Var("x")}: {state3{Var("x")}, ...}, -// }, -// } +// Pattern{ +// state3{Any}: { +// event1{}: {state2{}, ...}, +// }, +// state1{}: { +// event4{Any, Var("x")}: {state3{Var("x")}, ...}, +// }, +// } // // is expanded to: // -// Pattern{ -// state3{False}: { -// event1{}: {state2{}, ...}, -// }, -// state3{True}: { -// event1{}: {state2{}, ...}, -// }, -// state1{}: { -// event4{False, False}: {state3{False}, ...}, -// event4{False, True}: {state3{True}, ...}, -// event4{True, False}: {state3{False}, ...}, -// event4{True, True}: {state3{True}, ...}, -// }, -// } -// +// Pattern{ +// state3{False}: { +// event1{}: {state2{}, ...}, +// }, +// state3{True}: { +// event1{}: {state2{}, ...}, +// }, +// state1{}: { +// event4{False, False}: {state3{False}, ...}, +// event4{False, True}: {state3{True}, ...}, +// event4{True, False}: {state3{False}, ...}, +// event4{True, True}: {state3{True}, ...}, +// }, +// } func expandPattern(p Pattern) Pattern { xp := make(Pattern) for s, sm := range p { diff --git a/pkg/util/fuzzystrmatch/soundex.go b/pkg/util/fuzzystrmatch/soundex.go index 0161b3bf92ff..dcf3b5549df1 100644 --- a/pkg/util/fuzzystrmatch/soundex.go +++ b/pkg/util/fuzzystrmatch/soundex.go @@ -18,7 +18,7 @@ import ( // The soundex code consists of four characters. const soundexLen = 4 -// ABCDEFGHIJKLMNOPQRSTUVWXYZ +// ABCDEFGHIJKLMNOPQRSTUVWXYZ const soundexTable = "01230120022455012623010202" func isAlpha(r rune) bool { diff --git a/pkg/util/goschedstats/BUILD.bazel b/pkg/util/goschedstats/BUILD.bazel index 474815d92ae1..83c8fc3bc62f 100644 --- a/pkg/util/goschedstats/BUILD.bazel +++ b/pkg/util/goschedstats/BUILD.bazel @@ -5,7 +5,7 @@ go_library( name = "goschedstats", srcs = [ "runnable.go", - "runtime_go1.18.go", + "runtime_go1.19.go", ], importpath = "github.com/cockroachdb/cockroach/pkg/util/goschedstats", visibility = ["//visibility:public"], diff --git a/pkg/util/goschedstats/runtime_go1.18.go b/pkg/util/goschedstats/runtime_go1.19.go similarity index 98% rename from pkg/util/goschedstats/runtime_go1.18.go rename to pkg/util/goschedstats/runtime_go1.19.go index e8724d81cac2..90d9b345910a 100644 --- a/pkg/util/goschedstats/runtime_go1.18.go +++ b/pkg/util/goschedstats/runtime_go1.19.go @@ -8,12 +8,12 @@ // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. // -// The structure definitions in this file have been cross-checked against go1.18. +// The structure definitions in this file have been cross-checked against go1.19. // Before allowing newer versions, please check that the structures // still match with those in go/src/runtime. -//go:build gc && go1.18 && !go1.19 -// +build gc,go1.18,!go1.19 +//go:build gc && go1.19 && !go1.20 +// +build gc,go1.19,!go1.20 package goschedstats diff --git a/pkg/util/grunning/enabled_test.go b/pkg/util/grunning/enabled_test.go index 649a6f202236..a3b864bc0738 100644 --- a/pkg/util/grunning/enabled_test.go +++ b/pkg/util/grunning/enabled_test.go @@ -188,11 +188,11 @@ func TestPingPongHog(t *testing.T) { // BenchmarkGRunningTime measures how costly it is to read the current // goroutine's running time. Results: // -// goos: linux -// goarch: amd64 -// cpu: Intel(R) Xeon(R) CPU @ 2.20GHz -// BenchmarkGRunningTime -// BenchmarkGRunningTime-24 38336452 31.59 ns/op +// goos: linux +// goarch: amd64 +// cpu: Intel(R) Xeon(R) CPU @ 2.20GHz +// BenchmarkGRunningTime +// BenchmarkGRunningTime-24 38336452 31.59 ns/op func BenchmarkGRunningTime(b *testing.B) { for n := 0; n < b.N; n++ { _ = grunning.Time() diff --git a/pkg/util/hlc/doc.go b/pkg/util/hlc/doc.go index 733cc0cd75f6..2326c7db4adb 100644 --- a/pkg/util/hlc/doc.go +++ b/pkg/util/hlc/doc.go @@ -20,7 +20,7 @@ logical time is based on Lamport’s clocks. Hybrid-logical clocks provide a few important properties: -Causality tracking +# Causality tracking HLCs provide causality tracking through a combination of a physical and logical (to break ties) component upon each inter-node exchange. Nodes attach HLC @@ -30,28 +30,28 @@ message that they receive to update their local clock. There are currently three channels through which HLC timestamps are passed between nodes in a cluster: - - Raft (unidirectional): proposers of Raft commands (i.e. leaseholders) attach - clock readings to some of these command (e.g. lease transfers, range merges), - which are later consumed by followers when commands are applied to their Raft - state machine. + - Raft (unidirectional): proposers of Raft commands (i.e. leaseholders) attach + clock readings to some of these command (e.g. lease transfers, range merges), + which are later consumed by followers when commands are applied to their Raft + state machine. - Ref: (roachpb.Lease).Start. - Ref: (roachpb.MergeTrigger).FreezeStart. + Ref: (roachpb.Lease).Start. + Ref: (roachpb.MergeTrigger).FreezeStart. - - BatchRequest API (bidirectional): clients and servers of the KV BatchRequest - API will attach HLC clock readings on requests and responses (successes and - errors). + - BatchRequest API (bidirectional): clients and servers of the KV BatchRequest + API will attach HLC clock readings on requests and responses (successes and + errors). - Ref: (roachpb.Header).Timestamp. - Ref: (roachpb.BatchResponse_Header).Now. - Ref: (roachpb.Error).Now. + Ref: (roachpb.Header).Timestamp. + Ref: (roachpb.BatchResponse_Header).Now. + Ref: (roachpb.Error).Now. - - DistSQL flows (unidirectional): leaves of a DistSQL flow will pass clock - readings back to the root of the flow. Currently, this only takes place on - errors, and relates to the "Transaction retry errors" interaction detailed - below. + - DistSQL flows (unidirectional): leaves of a DistSQL flow will pass clock + readings back to the root of the flow. Currently, this only takes place on + errors, and relates to the "Transaction retry errors" interaction detailed + below. - Ref: (roachpb.Error).Now. + Ref: (roachpb.Error).Now. Capturing causal relationships between events on different nodes is critical for enforcing invariants within CockroachDB. What follows is an enumeration of each @@ -77,72 +77,72 @@ local timestamp. Since the commit status and timestamp are non-local properties, a range may contain committed values (as unresolved intents) that turn out to exist in the future of the local HLC when the intent gets resolved. - - Cooperative lease transfers (Raft channel). During a cooperative lease - transfer from one replica of a range to another, the outgoing leaseholder - revokes its lease before its expiration time and consults its clock to - determine the start time of the next lease. It then proposes this new lease - through Raft (see the raft channel above). Upon application of this Raft - entry, the incoming leaseholder forwards its HLC to the start time of the - lease, ensuring that its clock is >= the new lease's start time. - - The invariant that a leaseholder's clock is always >= its lease's start time - is used in a few places. First, it ensures that the leaseholder's clock - always leads the local_timestamp of any value in its keyspace written by a - prior leaseholder on its range, which is an important property for the - correctness of observed timestamps. Second, it ensures that the leaseholder - immediately views itself as the leaseholder. Third, it ensures that if the - new leaseholder was to transfer the lease away at some point in the future, - this later lease's start time could be pulled from the local clock and be - guaranteed to receive an even greater starting timestamp. - - - Range merges (Raft + BatchRequest channels). During a merge of two ranges, - the right-hand side of the merge passes a "frozen timestamp" clock reading - from the right-hand side leaseholder, through the merge transaction - coordinator, all the way to the left-hand side's leaseholder. This timestamp - is captured after the right-hand side has been subsumed and has stopped - serving KV traffic. When the left-hand side's leaseholder applies the range - merge and officially takes control of the combined range, it forwards its HLC - to this frozen timestamp. Like the previous interaction, this one is also - necessary to ensure that the leaseholder of the joint range has a clock that - leads the local_timestamp of any value in its keyspace, even one written - originally on the right-hand side range. - - - Observed timestamps (Raft + BatchRequest channels). During the lifetime of a - transaction, its coordinator issues BatchRequests to other nodes in the - cluster. Each time a given transaction visits a node for the first time, it - captures an observation from the node's HLC. Separately, when a leaseholder - on a given node serves a write, it assigns the write a local_timestamp from - its node's HLC clock. This local_timestamp is retained even if an intent is - moved to a higher timestamp if it is asynchronously resolved. As a result, - these "observed timestamps" captured during the lifetime of a transaction can - be used to make a claim about values that could not have been written yet at - the time that the transaction first visited the node, and by extension, at - the time that the transaction began. This allows the transaction to avoid - uncertainty restarts in some circumstances. - - A variant of this same mechanism applies to non-transactional requests that - defer their timestamp allocation to the leaseholder of their (single) range. - These requests do not collect observed timestamps directly, but they do - establish an uncertainty interval immediately upon receipt by their target - leaseholder, using a clock reading from the leaseholder's local HLC as the - local limit and this clock reading + the cluster's maximum clock skew as the - global limit. This limit can be used to make claims about values that could - not have been written yet at the time that the non-transaction request first - reached the leaseholder node. - - For more, see pkg/kv/kvserver/uncertainty/doc.go. - - - Transaction retry errors (BatchRequest and DistSQL channels). - TODO(nvanbenschoten/andreimatei): is this a real case where passing a remote - clock signal through the local clock is necessary? The DistSQL channel and - its introduction in 72fa944 seem to imply that it is, but I don't really see - it, because we don't use the local clock to determine which timestamp to - restart the transaction at. Maybe we were just concerned about a transaction - restarting at a timestamp above the local clock back then because we had yet - to separate the "clock timestamp" domain from the "transaction timestamp" - domain. - -Strict monotonicity + - Cooperative lease transfers (Raft channel). During a cooperative lease + transfer from one replica of a range to another, the outgoing leaseholder + revokes its lease before its expiration time and consults its clock to + determine the start time of the next lease. It then proposes this new lease + through Raft (see the raft channel above). Upon application of this Raft + entry, the incoming leaseholder forwards its HLC to the start time of the + lease, ensuring that its clock is >= the new lease's start time. + + The invariant that a leaseholder's clock is always >= its lease's start time + is used in a few places. First, it ensures that the leaseholder's clock + always leads the local_timestamp of any value in its keyspace written by a + prior leaseholder on its range, which is an important property for the + correctness of observed timestamps. Second, it ensures that the leaseholder + immediately views itself as the leaseholder. Third, it ensures that if the + new leaseholder was to transfer the lease away at some point in the future, + this later lease's start time could be pulled from the local clock and be + guaranteed to receive an even greater starting timestamp. + + - Range merges (Raft + BatchRequest channels). During a merge of two ranges, + the right-hand side of the merge passes a "frozen timestamp" clock reading + from the right-hand side leaseholder, through the merge transaction + coordinator, all the way to the left-hand side's leaseholder. This timestamp + is captured after the right-hand side has been subsumed and has stopped + serving KV traffic. When the left-hand side's leaseholder applies the range + merge and officially takes control of the combined range, it forwards its HLC + to this frozen timestamp. Like the previous interaction, this one is also + necessary to ensure that the leaseholder of the joint range has a clock that + leads the local_timestamp of any value in its keyspace, even one written + originally on the right-hand side range. + + - Observed timestamps (Raft + BatchRequest channels). During the lifetime of a + transaction, its coordinator issues BatchRequests to other nodes in the + cluster. Each time a given transaction visits a node for the first time, it + captures an observation from the node's HLC. Separately, when a leaseholder + on a given node serves a write, it assigns the write a local_timestamp from + its node's HLC clock. This local_timestamp is retained even if an intent is + moved to a higher timestamp if it is asynchronously resolved. As a result, + these "observed timestamps" captured during the lifetime of a transaction can + be used to make a claim about values that could not have been written yet at + the time that the transaction first visited the node, and by extension, at + the time that the transaction began. This allows the transaction to avoid + uncertainty restarts in some circumstances. + + A variant of this same mechanism applies to non-transactional requests that + defer their timestamp allocation to the leaseholder of their (single) range. + These requests do not collect observed timestamps directly, but they do + establish an uncertainty interval immediately upon receipt by their target + leaseholder, using a clock reading from the leaseholder's local HLC as the + local limit and this clock reading + the cluster's maximum clock skew as the + global limit. This limit can be used to make claims about values that could + not have been written yet at the time that the non-transaction request first + reached the leaseholder node. + + For more, see pkg/kv/kvserver/uncertainty/doc.go. + + - Transaction retry errors (BatchRequest and DistSQL channels). + TODO(nvanbenschoten/andreimatei): is this a real case where passing a remote + clock signal through the local clock is necessary? The DistSQL channel and + its introduction in 72fa944 seem to imply that it is, but I don't really see + it, because we don't use the local clock to determine which timestamp to + restart the transaction at. Maybe we were just concerned about a transaction + restarting at a timestamp above the local clock back then because we had yet + to separate the "clock timestamp" domain from the "transaction timestamp" + domain. + +# Strict monotonicity HLCs, as implemented by CockroachDB, provide strict monotonicity within and across restarts on a single node. Within a continuous process, providing this @@ -163,7 +163,7 @@ originate from the same node. Strictly monotonic timestamp allocation underpins the causality tracking uses detailed above. -Self-stabilization +# Self-stabilization HLCs provide self-stabilization in the presence of isolated transient clock skew fluctuations. As stated above, a node forwards its HLC upon its receipt of a @@ -172,7 +172,7 @@ communication, HLCs across nodes tend to converge and stabilize even if their individual physical clocks diverge. This provides no strong guarantees but can mask clock synchronization errors in practice. -Bounded skew +# Bounded skew HLCs within a CockroachDB deployment are configured with a maximum allowable offset between their physical time component and that of other HLCs in the @@ -187,110 +187,110 @@ a discussion for each about the consequences of that assumption being broken and the maximum clock skew between two nodes in the cluster exceeding the configured limit. - - Transaction uncertainty intervals. The single-key linearizability property is - satisfied in CockroachDB by tracking an uncertainty interval for each - transaction, within which the real-time ordering between two transactions is - indeterminate. Upon its creation, a transaction is given a provisional commit - timestamp commit_ts from the transaction coordinator’s local HLC and an - uncertainty interval of [commit_ts, commit_ts + max_offset]. - - When a transaction encounters a value on a key at a timestamp below its - provisional commit timestamp, it trivially observes the value during reads - and overwrites the value at a higher timestamp during writes. This alone - would satisfy single-key linearizability if transactions had access to a - perfectly synchronized global clock. - - Without global synchronization, the uncertainty interval is needed because it - is possible for a transaction to receive a provisional commit timestamp up to - the cluster’s max_offset earlier than a transaction that causally preceded - this new transaction in real time. When a transaction encounters a value on a - key at a timestamp above its provisional commit timestamp but within its - uncertainty interval, it performs an uncertainty restart, moving its - provisional commit timestamp above the uncertain value but keeping the upper - bound of its uncertainty interval fixed. - - This corresponds to treating all values in a transaction’s uncertainty window - as past writes. As a result, the operations on each key performed by - transactions take place in an order consistent with the real time ordering of - those transactions. - - HAZARD: If the maximum clock offset is exceeded, it is possible for a - transaction to serve a stale read that violates single-key linearizability. - For example, it is possible for a transaction A to write to a key and commit - at a timestamp t1, then for its client to hear about the commit. The client - may then initiate a second transaction B on a different gateway that has a - slow clock. If this slow clock is more than max_offset from other clocks in - the system, it is possible for transaction B's uncertainty interval not to - extend up to t1 and therefore for a read of the key that transaction A wrote - to be missed. Notably, this is a violation of consistency (linearizability) - but not of isolation (serializability) — transaction isolation has no clock - dependence. - - - Non-cooperative lease transfers. In the happy case, range leases move from - replica to replica using a coordinated handoff. However, in the unhappy case - where a leaseholder crashes or otherwise becomes unresponsive, other replicas - are able to attempt to acquire a new lease for the range as soon as they - observe the old lease expire. In this case, the max_offset plays a role in - ensuring that two replicas do not both consider themselves the leaseholder - for a range at the same (wallclock) time. This is ensured by designating a - "stasis" period equal in size to the max_offset at the end of each lease, - immediately before its expiration, as unusable. By preventing a lease from - being used within this stasis period, two replicas will never think that they - hold a valid lease at the same time, even if the outgoing leaseholder has a - slow clock and the incoming leaseholder has a fast clock (within bounds). For - more, see LeaseState_UNUSABLE. - - Note however that it is easy to overstate the salient point here if one is - not careful. Lease start and end times operate in the MVCC time domain, and - any two leases are always guaranteed to cover disjoint intervals of MVCC - time. Leases entitle their holder to serve reads at any MVCC time below their - expiration and to serve writes at any MVCC time at or after their start time - and below their expiration. Additionally, the lease sequence is attached to - all writes and checked during Raft application, so a stale leaseholder is - unable to perform a write after it has been replaced (in "consensus time"). - This combines to mean that even if two replicas believed that they hold the - lease for a range at the same time, they can not perform operations that - would be incompatible with one another (e.g. two conflicting writes). Again, - transaction isolation has no clock dependence. - - HAZARD: If the maximum clock offset is exceeded, it is possible for two - replicas to both consider themselves leaseholders at the same time. This can - not lead to stale reads for transactional requests, because a transaction - with an uncertainty interval that extends past a lease's expiration will not - be able to use that lease to perform a read (which is enforced by a stasis - period immediately before its expiration). However, because some - non-transactional requests receive their timestamp on the server and do not - carry an uncertainty interval, they would be susceptible to stale reads - during this period. This is equivalent to the hazard for operations that do - use uncertainty intervals, but the mechanics differ slightly. - - - "Linearizable" transactions. By default, transactions in CockroachDB provide - single-key linearizability and guarantee that as long as clock skew remains - below the configured bounds, transactions will not serve stale reads. - However, by default, transactions do not provide strict serializability, as - they are susceptible to the "causal reverse" anomaly. - - However, CockroachDB does supports a stricter model of consistency through - its COCKROACH_EXPERIMENTAL_LINEARIZABLE environment variable. When in - "linearizable" mode (also known as "strict serializable" mode), all writing - transactions (but not read-only transactions) must wait ("commit-wait") an - additional max_offset after committing to ensure that their commit timestamp - is below the current HLC clock time of any other node in the system. In doing - so, all causally dependent transactions are guaranteed to start with higher - timestamps, regardless of the gateway they use. This ensures that all - causally dependent transactions commit with higher timestamps, even if their - read and writes sets do not conflict with the original transaction's. This - prevents the "causal reverse" anomaly which can be observed by a third, - concurrent transaction. - - HAZARD: If the maximum clock offset is exceeded, it is possible that even - after a transaction commit-waits for the full max_offset, a causally - dependent transaction that evaluates on a different gateway node receives and - commits with an earlier timestamp. This resuscitates the possibility of the - causal reverse anomaly, along with the possibility for stale reads, as - detailed above. - - HAZARD: This mode of operation is completely untested. + - Transaction uncertainty intervals. The single-key linearizability property is + satisfied in CockroachDB by tracking an uncertainty interval for each + transaction, within which the real-time ordering between two transactions is + indeterminate. Upon its creation, a transaction is given a provisional commit + timestamp commit_ts from the transaction coordinator’s local HLC and an + uncertainty interval of [commit_ts, commit_ts + max_offset]. + + When a transaction encounters a value on a key at a timestamp below its + provisional commit timestamp, it trivially observes the value during reads + and overwrites the value at a higher timestamp during writes. This alone + would satisfy single-key linearizability if transactions had access to a + perfectly synchronized global clock. + + Without global synchronization, the uncertainty interval is needed because it + is possible for a transaction to receive a provisional commit timestamp up to + the cluster’s max_offset earlier than a transaction that causally preceded + this new transaction in real time. When a transaction encounters a value on a + key at a timestamp above its provisional commit timestamp but within its + uncertainty interval, it performs an uncertainty restart, moving its + provisional commit timestamp above the uncertain value but keeping the upper + bound of its uncertainty interval fixed. + + This corresponds to treating all values in a transaction’s uncertainty window + as past writes. As a result, the operations on each key performed by + transactions take place in an order consistent with the real time ordering of + those transactions. + + HAZARD: If the maximum clock offset is exceeded, it is possible for a + transaction to serve a stale read that violates single-key linearizability. + For example, it is possible for a transaction A to write to a key and commit + at a timestamp t1, then for its client to hear about the commit. The client + may then initiate a second transaction B on a different gateway that has a + slow clock. If this slow clock is more than max_offset from other clocks in + the system, it is possible for transaction B's uncertainty interval not to + extend up to t1 and therefore for a read of the key that transaction A wrote + to be missed. Notably, this is a violation of consistency (linearizability) + but not of isolation (serializability) — transaction isolation has no clock + dependence. + + - Non-cooperative lease transfers. In the happy case, range leases move from + replica to replica using a coordinated handoff. However, in the unhappy case + where a leaseholder crashes or otherwise becomes unresponsive, other replicas + are able to attempt to acquire a new lease for the range as soon as they + observe the old lease expire. In this case, the max_offset plays a role in + ensuring that two replicas do not both consider themselves the leaseholder + for a range at the same (wallclock) time. This is ensured by designating a + "stasis" period equal in size to the max_offset at the end of each lease, + immediately before its expiration, as unusable. By preventing a lease from + being used within this stasis period, two replicas will never think that they + hold a valid lease at the same time, even if the outgoing leaseholder has a + slow clock and the incoming leaseholder has a fast clock (within bounds). For + more, see LeaseState_UNUSABLE. + + Note however that it is easy to overstate the salient point here if one is + not careful. Lease start and end times operate in the MVCC time domain, and + any two leases are always guaranteed to cover disjoint intervals of MVCC + time. Leases entitle their holder to serve reads at any MVCC time below their + expiration and to serve writes at any MVCC time at or after their start time + and below their expiration. Additionally, the lease sequence is attached to + all writes and checked during Raft application, so a stale leaseholder is + unable to perform a write after it has been replaced (in "consensus time"). + This combines to mean that even if two replicas believed that they hold the + lease for a range at the same time, they can not perform operations that + would be incompatible with one another (e.g. two conflicting writes). Again, + transaction isolation has no clock dependence. + + HAZARD: If the maximum clock offset is exceeded, it is possible for two + replicas to both consider themselves leaseholders at the same time. This can + not lead to stale reads for transactional requests, because a transaction + with an uncertainty interval that extends past a lease's expiration will not + be able to use that lease to perform a read (which is enforced by a stasis + period immediately before its expiration). However, because some + non-transactional requests receive their timestamp on the server and do not + carry an uncertainty interval, they would be susceptible to stale reads + during this period. This is equivalent to the hazard for operations that do + use uncertainty intervals, but the mechanics differ slightly. + + - "Linearizable" transactions. By default, transactions in CockroachDB provide + single-key linearizability and guarantee that as long as clock skew remains + below the configured bounds, transactions will not serve stale reads. + However, by default, transactions do not provide strict serializability, as + they are susceptible to the "causal reverse" anomaly. + + However, CockroachDB does supports a stricter model of consistency through + its COCKROACH_EXPERIMENTAL_LINEARIZABLE environment variable. When in + "linearizable" mode (also known as "strict serializable" mode), all writing + transactions (but not read-only transactions) must wait ("commit-wait") an + additional max_offset after committing to ensure that their commit timestamp + is below the current HLC clock time of any other node in the system. In doing + so, all causally dependent transactions are guaranteed to start with higher + timestamps, regardless of the gateway they use. This ensures that all + causally dependent transactions commit with higher timestamps, even if their + read and writes sets do not conflict with the original transaction's. This + prevents the "causal reverse" anomaly which can be observed by a third, + concurrent transaction. + + HAZARD: If the maximum clock offset is exceeded, it is possible that even + after a transaction commit-waits for the full max_offset, a causally + dependent transaction that evaluates on a different gateway node receives and + commits with an earlier timestamp. This resuscitates the possibility of the + causal reverse anomaly, along with the possibility for stale reads, as + detailed above. + + HAZARD: This mode of operation is completely untested. To reduce the likelihood of stale reads, nodes periodically measure their clock’s offset from other nodes. If any node exceeds the configured maximum diff --git a/pkg/util/hlc/hlc.go b/pkg/util/hlc/hlc.go index ba6e092aaa56..ea981adddd99 100644 --- a/pkg/util/hlc/hlc.go +++ b/pkg/util/hlc/hlc.go @@ -634,12 +634,11 @@ func DecimalToHLC(d *apd.Decimal) (Timestamp, error) { // // This function is used to parse: // -// 1580361670629466905.0000000001 +// 1580361670629466905.0000000001 // // hlc.ParseTimestamp() would be used to parse: // -// 1580361670.629466905,1 -// +// 1580361670.629466905,1 func ParseHLC(s string) (Timestamp, error) { dec, _, err := apd.NewFromString(s) if err != nil { diff --git a/pkg/util/httputil/handlers.go b/pkg/util/httputil/handlers.go index 666f0a10e626..4fa9f85a4e1c 100644 --- a/pkg/util/httputil/handlers.go +++ b/pkg/util/httputil/handlers.go @@ -20,9 +20,9 @@ import "net/http" // served and an HTTP 304 with no body is sent to clients instead, to indicate // that the client's stale cache entry is still valid. // -// - contentHashes is a map of URL path (including a leading "/") to the ETag -// value to use for that file -// - next is the next handler in the http.Handler chain, used +// - contentHashes is a map of URL path (including a leading "/") to the ETag +// value to use for that file +// - next is the next handler in the http.Handler chain, used func EtagHandler(contentHashes map[string]string, next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if contentHashes == nil { diff --git a/pkg/util/humanizeutil/duration.go b/pkg/util/humanizeutil/duration.go index a1dd78e89603..897c1a393971 100644 --- a/pkg/util/humanizeutil/duration.go +++ b/pkg/util/humanizeutil/duration.go @@ -21,11 +21,11 @@ import ( // and the granularity is no smaller than microseconds. // // Examples: -// 0 -> "0µs" -// 123456ns -> "123µs" -// 12345678ns -> "12ms" -// 12345678912ns -> "1.2s" // +// 0 -> "0µs" +// 123456ns -> "123µs" +// 12345678ns -> "12ms" +// 12345678912ns -> "1.2s" func Duration(val time.Duration) redact.SafeString { val = val.Round(time.Microsecond) if val == 0 { @@ -54,12 +54,12 @@ func Duration(val time.Duration) redact.SafeString { // the granularity is no smaller than seconds. // // Examples: -// - 0 seconds -// - 1 second -// - 3 minutes -// - 1 hour -// - 5 days -// - 1000 days +// - 0 seconds +// - 1 second +// - 3 minutes +// - 1 hour +// - 5 days +// - 1000 days func LongDuration(val time.Duration) redact.SafeString { var round time.Duration var unit string diff --git a/pkg/util/interval/btree_based_interval.go b/pkg/util/interval/btree_based_interval.go index e8e99c4f9008..a60fad9b8ccf 100644 --- a/pkg/util/interval/btree_based_interval.go +++ b/pkg/util/interval/btree_based_interval.go @@ -209,8 +209,8 @@ func (s *children) truncate(index int) { // node is an internal node in a tree. // // It must at all times maintain the invariant that either -// * len(children) == 0, len(interfaces) unconstrained -// * len(children) == len(interfaces) + 1 +// - len(children) == 0, len(interfaces) unconstrained +// - len(children) == len(interfaces) + 1 type node struct { // Range is the node range which covers all the ranges in the subtree rooted // at the node. Range.Start is the leftmost position. Range.End is the @@ -256,21 +256,21 @@ func (n *node) mutableChild(i int) *node { // this function returns the Interface that existed at that index and a new node // containing all interfaces/children after it. Before splitting: // -// +-----------+ -// | x y z | -// ---/-/-\-\--+ +// +-----------+ +// | x y z | +// ---/-/-\-\--+ // // After splitting: // -// +-----------+ -// | y | -// -----/-\----+ -// / \ -// v v +// +-----------+ +// | y | +// -----/-\----+ +// / \ +// v v +// // +-----------+ +-----------+ // | x | | z | // +-----------+ +-----------+ -// func (n *node) split(i int, fast bool) (Interface, *node) { e := n.items[i] second := n.cow.newNode() @@ -634,15 +634,20 @@ func (n *node) removeFromLeaf(i int, fast bool) (out Interface, shrunk bool) { // actually remove it. // // Most documentation says we have to do two sets of special casing: -// 1) interface is in this node -// 2) interface is in child +// 1. interface is in this node +// 2. interface is in child +// // In both cases, we need to handle the two subcases: -// A) node has enough values that it can spare one -// B) node doesn't have enough values +// +// A) node has enough values that it can spare one +// B) node doesn't have enough values +// // For the latter, we have to check: -// a) left sibling has node to spare -// b) right sibling has node to spare -// c) we must merge +// +// a) left sibling has node to spare +// b) right sibling has node to spare +// c) we must merge +// // To simplify our code here, we handle cases #1 and #2 the same: // If a node doesn't have enough Interfaces, we make sure it does (using a,b,c). // We then simply redo our remove call, and the second time (regardless of @@ -666,32 +671,35 @@ func (n *node) growChildAndRemove( // Steal from left child. Before stealing: // -// +-----------+ -// | y | -// -----/-\----+ -// / \ -// v v +// +-----------+ +// | y | +// -----/-\----+ +// / \ +// v v +// // +-----------+ +-----------+ // | x | | | // +----------\+ +-----------+ -// \ -// v -// a +// +// \ +// v +// a // // After stealing: // -// +-----------+ -// | x | -// -----/-\----+ -// / \ -// v v +// +-----------+ +// | x | +// -----/-\----+ +// / \ +// v v +// // +-----------+ +-----------+ // | | | y | // +-----------+ +/----------+ -// / -// v -// a // +// / +// v +// a func (n *node) stealFromLeftChild(i int, fast bool) { // steal stealTo := n.mutableChild(i) @@ -723,32 +731,35 @@ func (n *node) stealFromLeftChild(i int, fast bool) { // Steal from right child. Before stealing: // -// +-----------+ -// | y | -// -----/-\----+ -// / \ -// v v +// +-----------+ +// | y | +// -----/-\----+ +// / \ +// v v +// // +-----------+ +-----------+ // | | | x | // +---------- + +/----------+ -// / -// v -// a +// +// / +// v +// a // // After stealing: // -// +-----------+ -// | x | -// -----/-\----+ -// / \ -// v v +// +-----------+ +// | x | +// -----/-\----+ +// / \ +// v v +// // +-----------+ +-----------+ // | y | | | // +----------\+ +-----------+ -// \ -// v -// a // +// \ +// v +// a func (n *node) stealFromRightChild(i int, fast bool) { // steal stealTo := n.mutableChild(i) @@ -780,26 +791,26 @@ func (n *node) stealFromRightChild(i int, fast bool) { // Merge with right child. Before merging: // -// +-----------+ -// | u y v | -// -----/-\----+ -// / \ -// v v +// +-----------+ +// | u y v | +// -----/-\----+ +// / \ +// v v +// // +-----------+ +-----------+ // | x | | z | // +---------- + +-----------+ // // After merging: // -// +-----------+ -// | u v | -// ------|-----+ -// | -// v -// +-----------+ -// | x y z | -// +---------- + -// +// +-----------+ +// | u v | +// ------|-----+ +// | +// v +// +-----------+ +// | x y z | +// +---------- + func (n *node) mergeWithRightChild(i int, fast bool) { // merge child := n.mutableChild(i) @@ -1107,13 +1118,14 @@ func (t *btree) Iterator() TreeIterator { // for use by the new one, instead of being lost to the garbage collector. // // This call takes: -// O(1): when addNodesToFreelist is false, this is a single operation. -// O(1): when the freelist is already full, it breaks out immediately -// O(freelist size): when the freelist is empty and the nodes are all owned -// by this tree, nodes are added to the freelist until full. -// O(tree size): when all nodes are owned by another tree, all nodes are -// iterated over looking for nodes to add to the freelist, and due to -// ownership, none are. +// +// O(1): when addNodesToFreelist is false, this is a single operation. +// O(1): when the freelist is already full, it breaks out immediately +// O(freelist size): when the freelist is empty and the nodes are all owned +// by this tree, nodes are added to the freelist until full. +// O(tree size): when all nodes are owned by another tree, all nodes are +// iterated over looking for nodes to add to the freelist, and due to +// ownership, none are. func (t *btree) ClearWithOpt(addNodesToFreelist bool) { if t.root != nil && addNodesToFreelist { t.root.reset(t.cow) diff --git a/pkg/util/interval/generic/doc.go b/pkg/util/interval/generic/doc.go index 3098883b8106..7f0aaab384a2 100644 --- a/pkg/util/interval/generic/doc.go +++ b/pkg/util/interval/generic/doc.go @@ -15,53 +15,53 @@ B-Tree. The package uses code generation to create type-safe, zero-allocation specializations of the ordered tree structure. -Usage +# Usage Users of the package should follow these steps: - 1. Define a type that will be used to parameterize the generic tree structure. - 2. Ensure that the parameter type fulfills the type contract defined in - internal/contract.go. - 3. Include a go generate declaration that invokes the gen.sh script with the - type name as the first argument and the package name as the second argument. - 4. Invoke go generate. + 1. Define a type that will be used to parameterize the generic tree structure. + 2. Ensure that the parameter type fulfills the type contract defined in + internal/contract.go. + 3. Include a go generate declaration that invokes the gen.sh script with the + type name as the first argument and the package name as the second argument. + 4. Invoke go generate. -Example +# Example 1. The latch type is defined: - type latch struct { - id uint64 - span roachpb.Span - ts hlc.Timestamp - done *signal - next, prev *latch // readSet linked-list. - } + type latch struct { + id uint64 + span roachpb.Span + ts hlc.Timestamp + done *signal + next, prev *latch // readSet linked-list. + } 2. Methods are defined to fulfill the type contract. - func (la *latch) ID() uint64 { return la.id } - func (la *latch) Key() []byte { return la.span.Key } - func (la *latch) EndKey() []byte { return la.span.EndKey } - func (la *latch) String() string { return fmt.Sprintf("%s@%s", la.span, la.ts) } - func (la *latch) SetID(v uint64) { la.id = v } - func (la *latch) SetKey(v []byte) { la.span.Key = v } - func (la *latch) SetEndKey(v []byte) { la.span.EndKey = v } + func (la *latch) ID() uint64 { return la.id } + func (la *latch) Key() []byte { return la.span.Key } + func (la *latch) EndKey() []byte { return la.span.EndKey } + func (la *latch) String() string { return fmt.Sprintf("%s@%s", la.span, la.ts) } + func (la *latch) SetID(v uint64) { la.id = v } + func (la *latch) SetKey(v []byte) { la.span.Key = v } + func (la *latch) SetEndKey(v []byte) { la.span.EndKey = v } 3. The following comment is added near the declaration of the latch type: - //go:generate ../../util/interval/generic/gen.sh *latch spanlatch + //go:generate ../../util/interval/generic/gen.sh *latch spanlatch 4. Invoking go generate results in the creation of the following files: - * latch_interval_btree.go - * latch_interval_btree_test.go + - latch_interval_btree.go + - latch_interval_btree_test.go -Working Example +# Working Example See example_t.go for a working example. Running go generate on this package generates: - * example_interval_btree.go - * example_interval_btree_test.go + - example_interval_btree.go + - example_interval_btree_test.go */ package generic diff --git a/pkg/util/interval/generic/example_interval_btree.go b/pkg/util/interval/generic/example_interval_btree.go index 5f803ef5c9a0..b340d6a14533 100644 --- a/pkg/util/interval/generic/example_interval_btree.go +++ b/pkg/util/interval/generic/example_interval_btree.go @@ -32,17 +32,20 @@ const ( // cmp returns a value indicating the sort order relationship between // a and b. The comparison is performed lexicographically on -// (a.Key(), a.EndKey(), a.ID()) +// +// (a.Key(), a.EndKey(), a.ID()) +// // and -// (b.Key(), b.EndKey(), b.ID()) +// +// (b.Key(), b.EndKey(), b.ID()) +// // tuples. // // Given c = cmp(a, b): // -// c == -1 if (a.Key(), a.EndKey(), a.ID()) < (b.Key(), b.EndKey(), b.ID()) -// c == 0 if (a.Key(), a.EndKey(), a.ID()) == (b.Key(), b.EndKey(), b.ID()) -// c == 1 if (a.Key(), a.EndKey(), a.ID()) > (b.Key(), b.EndKey(), b.ID()) -// +// c == -1 if (a.Key(), a.EndKey(), a.ID()) < (b.Key(), b.EndKey(), b.ID()) +// c == 0 if (a.Key(), a.EndKey(), a.ID()) == (b.Key(), b.EndKey(), b.ID()) +// c == 1 if (a.Key(), a.EndKey(), a.ID()) > (b.Key(), b.EndKey(), b.ID()) func cmp(a, b *example) int { c := bytes.Compare(a.Key(), b.Key()) if c != 0 { @@ -325,21 +328,21 @@ func (n *node) find(item *example) (index int, found bool) { // // Before: // -// +-----------+ -// | x y z | -// +--/-/-\-\--+ +// +-----------+ +// | x y z | +// +--/-/-\-\--+ // // After: // -// +-----------+ -// | y | -// +----/-\----+ -// / \ -// v v +// +-----------+ +// | y | +// +----/-\----+ +// / \ +// v v +// // +-----------+ +-----------+ // | x | | z | // +-----------+ +-----------+ -// func (n *node) split(i int) (*example, *node) { out := n.items[i] var next *node @@ -1004,9 +1007,9 @@ func (i *iterator) Cur() *example { // is to minimize the number of key comparisons performed in total. The // algorithm operates based on the following two invariants maintained by // augmented interval btree: -// 1. all items are sorted in the btree based on their start key. -// 2. all btree nodes maintain the upper bound end key of all items -// in their subtree. +// 1. all items are sorted in the btree based on their start key. +// 2. all btree nodes maintain the upper bound end key of all items +// in their subtree. // // The scan algorithm starts in "unconstrained minimum" and "unconstrained // maximum" states. To enter a "constrained minimum" state, the scan must reach @@ -1021,28 +1024,28 @@ func (i *iterator) Cur() *example { // // The scan algorithm works like a standard btree forward scan with the // following augmentations: -// 1. before tranversing the tree, the scan performs a binary search on the -// root node's items to determine a "soft" lower-bound constraint position -// and a "hard" upper-bound constraint position in the root's children. -// 2. when tranversing into a child node in the lower or upper bound constraint -// position, the constraint is refined by searching the child's items. -// 3. the initial traversal down the tree follows the left-most children -// whose upper bound end keys are equal to or greater than the start key -// of the search range. The children followed will be equal to or less -// than the soft lower bound constraint. -// 4. once the initial tranversal completes and the scan is in the left-most -// btree node whose upper bound overlaps the search range, key comparisons -// must be performed with each item in the tree. This is necessary because -// any of these items may have end keys that cause them to overlap with the -// search range. -// 5. once the scan reaches the lower bound constraint position (the first item -// with a start key equal to or greater than the search range's start key), -// it can begin scaning without performing key comparisons. This is allowed -// because all items from this point forward will have end keys that are -// greater than the search range's start key. -// 6. once the scan reaches the upper bound constraint position, it terminates. -// It does so because the item at this position is the first item with a -// start key larger than the search range's end key. +// 1. before tranversing the tree, the scan performs a binary search on the +// root node's items to determine a "soft" lower-bound constraint position +// and a "hard" upper-bound constraint position in the root's children. +// 2. when tranversing into a child node in the lower or upper bound constraint +// position, the constraint is refined by searching the child's items. +// 3. the initial traversal down the tree follows the left-most children +// whose upper bound end keys are equal to or greater than the start key +// of the search range. The children followed will be equal to or less +// than the soft lower bound constraint. +// 4. once the initial tranversal completes and the scan is in the left-most +// btree node whose upper bound overlaps the search range, key comparisons +// must be performed with each item in the tree. This is necessary because +// any of these items may have end keys that cause them to overlap with the +// search range. +// 5. once the scan reaches the lower bound constraint position (the first item +// with a start key equal to or greater than the search range's start key), +// it can begin scaning without performing key comparisons. This is allowed +// because all items from this point forward will have end keys that are +// greater than the search range's start key. +// 6. once the scan reaches the upper bound constraint position, it terminates. +// It does so because the item at this position is the first item with a +// start key larger than the search range's end key. type overlapScan struct { // The "soft" lower-bound constraint. constrMinN *node diff --git a/pkg/util/interval/generic/internal/contract.go b/pkg/util/interval/generic/internal/contract.go index 0b9562c1eac3..550a5c56c307 100644 --- a/pkg/util/interval/generic/internal/contract.go +++ b/pkg/util/interval/generic/internal/contract.go @@ -11,6 +11,7 @@ package internal // T is a Template type. The methods in the interface make up its contract. +// //lint:ignore U1001 unused type T interface { ID() uint64 diff --git a/pkg/util/interval/generic/internal/interval_btree_tmpl.go b/pkg/util/interval/generic/internal/interval_btree_tmpl.go index e4db492dd1b1..365161e1ec2c 100644 --- a/pkg/util/interval/generic/internal/interval_btree_tmpl.go +++ b/pkg/util/interval/generic/internal/interval_btree_tmpl.go @@ -33,17 +33,20 @@ const ( // cmp returns a value indicating the sort order relationship between // a and b. The comparison is performed lexicographically on -// (a.Key(), a.EndKey(), a.ID()) +// +// (a.Key(), a.EndKey(), a.ID()) +// // and -// (b.Key(), b.EndKey(), b.ID()) +// +// (b.Key(), b.EndKey(), b.ID()) +// // tuples. // // Given c = cmp(a, b): // -// c == -1 if (a.Key(), a.EndKey(), a.ID()) < (b.Key(), b.EndKey(), b.ID()) -// c == 0 if (a.Key(), a.EndKey(), a.ID()) == (b.Key(), b.EndKey(), b.ID()) -// c == 1 if (a.Key(), a.EndKey(), a.ID()) > (b.Key(), b.EndKey(), b.ID()) -// +// c == -1 if (a.Key(), a.EndKey(), a.ID()) < (b.Key(), b.EndKey(), b.ID()) +// c == 0 if (a.Key(), a.EndKey(), a.ID()) == (b.Key(), b.EndKey(), b.ID()) +// c == 1 if (a.Key(), a.EndKey(), a.ID()) > (b.Key(), b.EndKey(), b.ID()) func cmp(a, b T) int { c := bytes.Compare(a.Key(), b.Key()) if c != 0 { @@ -326,21 +329,21 @@ func (n *node) find(item T) (index int, found bool) { // // Before: // -// +-----------+ -// | x y z | -// +--/-/-\-\--+ +// +-----------+ +// | x y z | +// +--/-/-\-\--+ // // After: // -// +-----------+ -// | y | -// +----/-\----+ -// / \ -// v v +// +-----------+ +// | y | +// +----/-\----+ +// / \ +// v v +// // +-----------+ +-----------+ // | x | | z | // +-----------+ +-----------+ -// func (n *node) split(i int) (T, *node) { out := n.items[i] var next *node @@ -1005,9 +1008,9 @@ func (i *iterator) Cur() T { // is to minimize the number of key comparisons performed in total. The // algorithm operates based on the following two invariants maintained by // augmented interval btree: -// 1. all items are sorted in the btree based on their start key. -// 2. all btree nodes maintain the upper bound end key of all items -// in their subtree. +// 1. all items are sorted in the btree based on their start key. +// 2. all btree nodes maintain the upper bound end key of all items +// in their subtree. // // The scan algorithm starts in "unconstrained minimum" and "unconstrained // maximum" states. To enter a "constrained minimum" state, the scan must reach @@ -1022,28 +1025,28 @@ func (i *iterator) Cur() T { // // The scan algorithm works like a standard btree forward scan with the // following augmentations: -// 1. before tranversing the tree, the scan performs a binary search on the -// root node's items to determine a "soft" lower-bound constraint position -// and a "hard" upper-bound constraint position in the root's children. -// 2. when tranversing into a child node in the lower or upper bound constraint -// position, the constraint is refined by searching the child's items. -// 3. the initial traversal down the tree follows the left-most children -// whose upper bound end keys are equal to or greater than the start key -// of the search range. The children followed will be equal to or less -// than the soft lower bound constraint. -// 4. once the initial tranversal completes and the scan is in the left-most -// btree node whose upper bound overlaps the search range, key comparisons -// must be performed with each item in the tree. This is necessary because -// any of these items may have end keys that cause them to overlap with the -// search range. -// 5. once the scan reaches the lower bound constraint position (the first item -// with a start key equal to or greater than the search range's start key), -// it can begin scaning without performing key comparisons. This is allowed -// because all items from this point forward will have end keys that are -// greater than the search range's start key. -// 6. once the scan reaches the upper bound constraint position, it terminates. -// It does so because the item at this position is the first item with a -// start key larger than the search range's end key. +// 1. before tranversing the tree, the scan performs a binary search on the +// root node's items to determine a "soft" lower-bound constraint position +// and a "hard" upper-bound constraint position in the root's children. +// 2. when tranversing into a child node in the lower or upper bound constraint +// position, the constraint is refined by searching the child's items. +// 3. the initial traversal down the tree follows the left-most children +// whose upper bound end keys are equal to or greater than the start key +// of the search range. The children followed will be equal to or less +// than the soft lower bound constraint. +// 4. once the initial tranversal completes and the scan is in the left-most +// btree node whose upper bound overlaps the search range, key comparisons +// must be performed with each item in the tree. This is necessary because +// any of these items may have end keys that cause them to overlap with the +// search range. +// 5. once the scan reaches the lower bound constraint position (the first item +// with a start key equal to or greater than the search range's start key), +// it can begin scaning without performing key comparisons. This is allowed +// because all items from this point forward will have end keys that are +// greater than the search range's start key. +// 6. once the scan reaches the upper bound constraint position, it terminates. +// It does so because the item at this position is the first item with a +// start key larger than the search range's end key. type overlapScan struct { // The "soft" lower-bound constraint. constrMinN *node diff --git a/pkg/util/interval/interval.go b/pkg/util/interval/interval.go index dfbf9aa50457..af34ac564b25 100644 --- a/pkg/util/interval/interval.go +++ b/pkg/util/interval/interval.go @@ -122,9 +122,9 @@ type Interface interface { // // Given c = Compare(a, b): // -// c == -1 if (a.Range().Start, a.ID()) < (b.Range().Start, b.ID()); -// c == 0 if (a.Range().Start, a.ID()) == (b.Range().Start, b.ID()); and -// c == 1 if (a.Range().Start, a.ID()) > (b.Range().Start, b.ID()). +// c == -1 if (a.Range().Start, a.ID()) < (b.Range().Start, b.ID()); +// c == 0 if (a.Range().Start, a.ID()) == (b.Range().Start, b.ID()); and +// c == 1 if (a.Range().Start, a.ID()) > (b.Range().Start, b.ID()). // // "c == 0" is equivalent to "Equal(a, b) == true". func Compare(a, b Interface) int { @@ -159,10 +159,10 @@ type Comparable []byte // receiver and the parameter. // // Given c = a.Compare(b): -// c == -1 if a < b; -// c == 0 if a == b; and -// c == 1 if a > b. // +// c == -1 if a < b; +// c == 0 if a == b; and +// c == 1 if a > b. func (c Comparable) Compare(o Comparable) int { return bytes.Compare(c, o) } diff --git a/pkg/util/ipaddr/ipaddr.go b/pkg/util/ipaddr/ipaddr.go index 0103a0a85208..bc78ec321575 100644 --- a/pkg/util/ipaddr/ipaddr.go +++ b/pkg/util/ipaddr/ipaddr.go @@ -34,12 +34,12 @@ type Addr uint128.Uint128 // IPAddr stores an IP address's family, IP, and host mask. This was chosen over // Go's "net" IP, as that struct doesn't work well for what we need to do. -// - It discards information when parsing IPv4, forcing it to be IPv6, and then -// assuming IPv4-mapped IPv6 addresses are purely IPv4 (only for printing). -// This is solved by having a Family field. -// - ParseIP and ParseCIDR are very strict, whereas postgres' INET and CIDR -// have very relaxed constraints for parsing an IP. -// - Doing int64 operations is much more efficient than byte slice operations. +// - It discards information when parsing IPv4, forcing it to be IPv6, and then +// assuming IPv4-mapped IPv6 addresses are purely IPv4 (only for printing). +// This is solved by having a Family field. +// - ParseIP and ParseCIDR are very strict, whereas postgres' INET and CIDR +// have very relaxed constraints for parsing an IP. +// - Doing int64 operations is much more efficient than byte slice operations. type IPAddr struct { // Family denotes what type of IP the original IP was. Family IPFamily diff --git a/pkg/util/iterutil/iterutil.go b/pkg/util/iterutil/iterutil.go index e65c0dab0f86..3936fe253fe9 100644 --- a/pkg/util/iterutil/iterutil.go +++ b/pkg/util/iterutil/iterutil.go @@ -19,20 +19,19 @@ var errStopIteration = errors.New("stop iteration") // This error should not be propagated further, i.e., if a closure returns // this error, the loop should break returning nil error. For example: // -// f := func(i int) error { -// if i == 10 { -// return iterutil.StopIteration() -// } -// return nil -// } -// -// for i := range slice { -// if err := f(i); err != nil { -// return iterutil.Map(err) -// } -// // continue when nil error -// } +// f := func(i int) error { +// if i == 10 { +// return iterutil.StopIteration() +// } +// return nil +// } // +// for i := range slice { +// if err := f(i); err != nil { +// return iterutil.Map(err) +// } +// // continue when nil error +// } func StopIteration() error { return errStopIteration } // Map the nil if it is StopIteration, or keep the error otherwise diff --git a/pkg/util/json/encode_test.go b/pkg/util/json/encode_test.go index 8b23586a1083..43ca3e5d605f 100644 --- a/pkg/util/json/encode_test.go +++ b/pkg/util/json/encode_test.go @@ -13,7 +13,6 @@ package json import ( "flag" "fmt" - "io/ioutil" "math/rand" "os" "path/filepath" @@ -64,7 +63,7 @@ func TestJSONRandomEncodeRoundTrip(t *testing.T) { func TestFilesEncode(t *testing.T) { dir := testutils.TestDataPath(t, "raw") - dirContents, err := ioutil.ReadDir(dir) + dirContents, err := os.ReadDir(dir) if err != nil { t.Fatal(err) } diff --git a/pkg/util/log/ambient_context.go b/pkg/util/log/ambient_context.go index 24a60b797a50..eff0e235ad10 100644 --- a/pkg/util/log/ambient_context.go +++ b/pkg/util/log/ambient_context.go @@ -23,33 +23,34 @@ import ( // server components. // // Example: -// type SomeServer struct { -// log.AmbientContext -// ... -// } // -// ac := AmbientContext{Tracer: tracing.NewTracer()} -// ac.AddLogTag("n", 1) +// type SomeServer struct { +// log.AmbientContext +// ... +// } // -// s := &SomeServer{ -// AmbientContext: ac -// ... -// } +// ac := AmbientContext{Tracer: tracing.NewTracer()} +// ac.AddLogTag("n", 1) // -// // on an operation with context ctx -// ctx = s.AnnotateCtx(ctx) -// ... +// s := &SomeServer{ +// AmbientContext: ac +// ... +// } // -// // run a worker -// s.stopper.RunWorker(func() { -// ctx := s.AnnotateCtx(context.Background()) -// ... -// }) +// // on an operation with context ctx +// ctx = s.AnnotateCtx(ctx) +// ... // -// // start a background operation -// ctx, span := s.AnnotateCtxWithSpan(context.Background(), "some-op") -// defer span.Finish() -// ... +// // run a worker +// s.stopper.RunWorker(func() { +// ctx := s.AnnotateCtx(context.Background()) +// ... +// }) +// +// // start a background operation +// ctx, span := s.AnnotateCtxWithSpan(context.Background(), "some-op") +// defer span.Finish() +// ... type AmbientContext struct { // Tracer is used to open spans (see AnnotateCtxWithSpan). Tracer *tracing.Tracer @@ -98,11 +99,11 @@ func (ac *AmbientContext) refreshCache() { } // AnnotateCtx annotates a given context with the information in AmbientContext: -// - the EventLog is embedded in the context if the context doesn't already -// have an event log or an open trace. -// - the log tags in AmbientContext are added (if ctx doesn't already have -// them). If the tags already exist, the values from the AmbientContext -// overwrite the existing values, but the order of the tags might change. +// - the EventLog is embedded in the context if the context doesn't already +// have an event log or an open trace. +// - the log tags in AmbientContext are added (if ctx doesn't already have +// them). If the tags already exist, the values from the AmbientContext +// overwrite the existing values, but the order of the tags might change. // // For background operations, context.Background() should be passed; however, in // that case it is strongly recommended to open a span if possible (using diff --git a/pkg/util/log/buffered_sink.go b/pkg/util/log/buffered_sink.go index 5c9cf9a0d81c..707ed40341dc 100644 --- a/pkg/util/log/buffered_sink.go +++ b/pkg/util/log/buffered_sink.go @@ -85,9 +85,10 @@ type bufferedSink struct { // // |msg|msg|msg|msg|msg|msg|msg|msg|msg| // └----------------------^--------------┘ -// triggerSize maxBufferSize -// └--------------┘ -// sized-based flush is triggered when size falls in this range +// +// triggerSize maxBufferSize +// └--------------┘ +// sized-based flush is triggered when size falls in this range // // maxBufferSize should also be set such that it makes sense in relationship // with the flush latency: only one flush is ever in flight at a time, so the diff --git a/pkg/util/log/channel/channel_generated.go b/pkg/util/log/channel/channel_generated.go index fb3073be65f9..0e28927c6eaa 100644 --- a/pkg/util/log/channel/channel_generated.go +++ b/pkg/util/log/channel/channel_generated.go @@ -20,24 +20,24 @@ const DEV = logpb.Channel_DEV // OPS is used to report "point" operational events, // initiated by user operators or automation: // -// - Operator or system actions on server processes: process starts, -// stops, shutdowns, crashes (if they can be logged), -// including each time: command-line parameters, current version being run -// - Actions that impact the topology of a cluster: node additions, -// removals, decommissions, etc. -// - Job-related initiation or termination -// - [Cluster setting](cluster-settings.html) changes -// - [Zone configuration](configure-replication-zones.html) changes +// - Operator or system actions on server processes: process starts, +// stops, shutdowns, crashes (if they can be logged), +// including each time: command-line parameters, current version being run +// - Actions that impact the topology of a cluster: node additions, +// removals, decommissions, etc. +// - Job-related initiation or termination +// - [Cluster setting](cluster-settings.html) changes +// - [Zone configuration](configure-replication-zones.html) changes const OPS = logpb.Channel_OPS // HEALTH is used to report "background" operational // events, initiated by CockroachDB or reporting on automatic processes: // -// - Current resource usage, including critical resource usage -// - Node-node connection events, including connection errors and -// gossip details -// - Range and table leasing events -// - Up- and down-replication, range unavailability +// - Current resource usage, including critical resource usage +// - Node-node connection events, including connection errors and +// gossip details +// - Range and table leasing events +// - Up- and down-replication, range unavailability const HEALTH = logpb.Channel_HEALTH // STORAGE is used to report low-level storage @@ -48,9 +48,9 @@ const STORAGE = logpb.Channel_STORAGE // the `server.auth_log.sql_connections.enabled` and/or // `server.auth_log.sql_sessions.enabled` [cluster setting](cluster-settings.html): // -// - Connections opened/closed -// - Authentication events: logins, failed attempts -// - Session and query cancellation +// - Connections opened/closed +// - Authentication events: logins, failed attempts +// - Session and query cancellation // // This is typically configured in "audit" mode, with event // numbering and synchronous writes. @@ -63,9 +63,9 @@ const SESSIONS = logpb.Channel_SESSIONS // // This includes: // -// - Database/schema/table/sequence/view/type creation -// - Adding/removing/changing table columns -// - Changing sequence parameters +// - Database/schema/table/sequence/view/type creation +// - Adding/removing/changing table columns +// - Changing sequence parameters // // `SQL_SCHEMA` events generally comprise changes to the schema that affect the // functional behavior of client apps using stored objects. @@ -74,10 +74,10 @@ const SQL_SCHEMA = logpb.Channel_SQL_SCHEMA // USER_ADMIN is used to report changes // in users and roles, including: // -// - Users added/dropped -// - Changes to authentication credentials (e.g., passwords, validity, etc.) -// - Role grants/revocations -// - Role option grants/revocations +// - Users added/dropped +// - Changes to authentication credentials (e.g., passwords, validity, etc.) +// - Role grants/revocations +// - Role option grants/revocations // // This is typically configured in "audit" mode, with event // numbering and synchronous writes. @@ -86,8 +86,8 @@ const USER_ADMIN = logpb.Channel_USER_ADMIN // PRIVILEGES is used to report data // authorization changes, including: // -// - Privilege grants/revocations on database, objects, etc. -// - Object ownership changes +// - Privilege grants/revocations on database, objects, etc. +// - Object ownership changes // // This is typically configured in "audit" mode, with event // numbering and synchronous writes. @@ -96,10 +96,10 @@ const PRIVILEGES = logpb.Channel_PRIVILEGES // SENSITIVE_ACCESS is used to report SQL // data access to sensitive data: // -// - Data access audit events (when table audit is enabled via -// [EXPERIMENTAL_AUDIT](experimental-audit.html)) -// - SQL statements executed by users with the admin role -// - Operations that write to system tables +// - Data access audit events (when table audit is enabled via +// [EXPERIMENTAL_AUDIT](experimental-audit.html)) +// - SQL statements executed by users with the admin role +// - Operations that write to system tables // // This is typically configured in "audit" mode, with event // numbering and synchronous writes. @@ -108,9 +108,9 @@ const SENSITIVE_ACCESS = logpb.Channel_SENSITIVE_ACCESS // SQL_EXEC is used to report SQL execution on // behalf of client connections: // -// - Logical SQL statement executions (when enabled via the -// `sql.trace.log_statement_execute` [cluster setting](cluster-settings.html)) -// - uncaught Go panic errors during the execution of a SQL statement. +// - Logical SQL statement executions (when enabled via the +// `sql.trace.log_statement_execute` [cluster setting](cluster-settings.html)) +// - uncaught Go panic errors during the execution of a SQL statement. const SQL_EXEC = logpb.Channel_SQL_EXEC // SQL_PERF is used to report SQL executions diff --git a/pkg/util/log/doc.go b/pkg/util/log/doc.go index 784659c470e5..fcea1c3e16db 100644 --- a/pkg/util/log/doc.go +++ b/pkg/util/log/doc.go @@ -28,7 +28,7 @@ // Package log implements logging. // There are three logging styles: named, V-style, events. // -// Named Functions +// # Named Functions // // The functions Info, Warning, Error, and Fatal log their arguments at the // severity level. All include formatting variants like Infof. @@ -51,7 +51,7 @@ // For any given channel, users can filter out logging to a given // severity level or higher. // -// V-Style +// # V-Style // // The V functions can be used to selectively enable logging at a call // site. Invoking the binary with --vmodule=*=N will enable V functions @@ -67,11 +67,11 @@ // Additionally, severity functions also exist in a V variant for // convenience. For example: // -// log.Ops.VWarningf(ctx, 2, "attention!") +// log.Ops.VWarningf(ctx, 2, "attention!") // // aliases: if V(2) { log.Ops.Warningf(ctx, "attention!") } // -// Events +// # Events // // The Event functions log messages to an existing trace if one exists. The // VEvent functions logs the message to a trace and also the Dev channel based @@ -81,7 +81,7 @@ // // log.VEventf(ctx, 2, "client error; %s", err) // -// Output +// # Output // // Log output is buffered and written periodically using Flush. Programs // should call Flush before exiting to guarantee all log output is written. @@ -90,29 +90,28 @@ // This package provides several flags that modify this behavior. // These are provided via the util/log/logflags package; see InitFlags. // -// --logtostderr=LEVEL -// Logs are written to standard error as well as to files. -// Entries with severity below LEVEL are not written to stderr. -// "true" and "false" are also supported (everything / nothing). -// --log-dir="..." -// Log files will be written to this directory by the main logger -// instead of the default target directory. -// --log-file-verbosity=LEVEL -// Entries with severity below LEVEL are not written to the log file. -// "true" and "false" are also supported (everything / nothing). -// --log-file-max-size=N -// Log files are rotated after reaching that size. -// --log-group-max-size=N -// Log files are removed after the total size of all files generated -// by one logger reaches that size. +// --logtostderr=LEVEL +// Logs are written to standard error as well as to files. +// Entries with severity below LEVEL are not written to stderr. +// "true" and "false" are also supported (everything / nothing). +// --log-dir="..." +// Log files will be written to this directory by the main logger +// instead of the default target directory. +// --log-file-verbosity=LEVEL +// Entries with severity below LEVEL are not written to the log file. +// "true" and "false" are also supported (everything / nothing). +// --log-file-max-size=N +// Log files are rotated after reaching that size. +// --log-group-max-size=N +// Log files are removed after the total size of all files generated +// by one logger reaches that size. // // Other flags provide aids to debugging. // -// --vmodule="" -// The syntax of the argument is a comma-separated list of pattern=N, -// where pattern is a literal file name (minus the ".go" suffix) or -// "glob" pattern and N is a V level. For instance, -// --vmodule=gopher*=3 -// sets the V level to 3 in all Go files whose names begin "gopher". -// +// --vmodule="" +// The syntax of the argument is a comma-separated list of pattern=N, +// where pattern is a literal file name (minus the ".go" suffix) or +// "glob" pattern and N is a V level. For instance, +// --vmodule=gopher*=3 +// sets the V level to 3 in all Go files whose names begin "gopher". package log diff --git a/pkg/util/log/eventpb/doc.go b/pkg/util/log/eventpb/doc.go index 01d41fb20fec..110db19706f8 100644 --- a/pkg/util/log/eventpb/doc.go +++ b/pkg/util/log/eventpb/doc.go @@ -13,23 +13,23 @@ // // The consumers of structured events include: // -// - Instrusion Detection Systems in the CockroachCloud infrastructure. -// Check in with the security team before changing or evolving -// any payloads related to privilege changes, user management, -// authentication or other security-adjacent features. +// - Instrusion Detection Systems in the CockroachCloud infrastructure. +// Check in with the security team before changing or evolving +// any payloads related to privilege changes, user management, +// authentication or other security-adjacent features. // // - The DB Console (ex "Admin UI") code. See ui/util/events*.ts. // -// - Customers using automatic event triaging in network log collectors. -// Check in with the product team before changing or evolving -// the way event types are classified into categories or channels. -// Also check in with the product team before changing -// or evolving "Common" payload types shared across multiple event types. +// - Customers using automatic event triaging in network log collectors. +// Check in with the product team before changing or evolving +// the way event types are classified into categories or channels. +// Also check in with the product team before changing +// or evolving "Common" payload types shared across multiple event types. // -// - Documentation. These are published APIs. A reference doc is -// automatically generated (docs/generated/eventlog.md). Significant -// changes to the event documentation should be reviewed by -// the doc team. +// - Documentation. These are published APIs. A reference doc is +// automatically generated (docs/generated/eventlog.md). Significant +// changes to the event documentation should be reviewed by +// the doc team. // // Maintenance instructions: // @@ -47,20 +47,20 @@ // // Note that the JSON compatibility rules apply, not just protobuf: // -// - The names of the types and fields are part of the interface. A -// name change is a breaking change. The casing of the field names -// matters too. +// - The names of the types and fields are part of the interface. A +// name change is a breaking change. The casing of the field names +// matters too. // -// - the fields in the proto definition have jsontag starting with the -// empty string before the comma, e.g. ",omitempty" so as to -// override the default JSON field name generated by the protobuf -// compiler. We want to keep the cased names for compatibility with -// pre-v21.1 consumers of system.eventlog. +// - the fields in the proto definition have jsontag starting with the +// empty string before the comma, e.g. ",omitempty" so as to +// override the default JSON field name generated by the protobuf +// compiler. We want to keep the cased names for compatibility with +// pre-v21.1 consumers of system.eventlog. // -// - likewise, the entire CommonXXXX payloads are marked as embedded -// and their json tag is removed in every event log message, to -// ensure that it appears inline in the JSON output. This is because -// we wish our event structures to be flat and easier to consume. +// - likewise, the entire CommonXXXX payloads are marked as embedded +// and their json tag is removed in every event log message, to +// ensure that it appears inline in the JSON output. This is because +// we wish our event structures to be flat and easier to consume. // // Beware: because the common structs are marked inline in the // individual events, care must be taken to not reuse field diff --git a/pkg/util/log/file_api.go b/pkg/util/log/file_api.go index 23198fd3f471..cf2b1d477ba4 100644 --- a/pkg/util/log/file_api.go +++ b/pkg/util/log/file_api.go @@ -30,8 +30,8 @@ import ( // accidentally and it splits the details of the filename into groups for easy // parsing. The log file format is // -// {program}.{host}.{username}.{timestamp}.{pid}.log -// cockroach.Brams-MacBook-Pro.bram.2015-06-09T16-10-48Z.30209.log +// {program}.{host}.{username}.{timestamp}.{pid}.log +// cockroach.Brams-MacBook-Pro.bram.2015-06-09T16-10-48Z.30209.log // // All underscore in process, host and username are escaped to double // underscores and all periods are escaped to an underscore. @@ -115,10 +115,11 @@ func listLogGroups() (logGroups [][]logpb.FileInfo, err error) { // the log configuration. For example, consider the following config: // // file-groups: -// groupA: -// dir: dir1 -// groupB: -// dir: dir2 +// +// groupA: +// dir: dir1 +// groupB: +// dir: dir2 // // The result of ListLogFiles on this config will return the list // {cockroach-groupA.XXX.log, cockroach-groupB.XXX.log}, without diff --git a/pkg/util/log/format_crdb_v1.go b/pkg/util/log/format_crdb_v1.go index bdcc0fc68fa1..191ab3f978aa 100644 --- a/pkg/util/log/format_crdb_v1.go +++ b/pkg/util/log/format_crdb_v1.go @@ -223,7 +223,6 @@ func (formatCrdbV1TTYWithCounter) contentType() string { return "text/plain" } // Log lines are colorized depending on severity. // It uses a newly allocated *buffer. The caller is responsible // for calling putBuffer() afterwards. -// func formatLogEntryInternalV1( entry logpb.Entry, isHeader, showCounter bool, cp ttycolor.Profile, ) *buffer { diff --git a/pkg/util/log/formattable_tags.go b/pkg/util/log/formattable_tags.go index 850d89b8b607..b2d747e7ae4a 100644 --- a/pkg/util/log/formattable_tags.go +++ b/pkg/util/log/formattable_tags.go @@ -26,7 +26,9 @@ import ( // // Internally, it is a sequence of nul-delimited strings, // interleaving tag key and value strings. For example: -// {'n', 0, '1', 0, 's', 0, '2', 0} +// +// {'n', 0, '1', 0, 's', 0, '2', 0} +// // to encode e.g. n=1,s=2 // // Note that we preserve the invariant that there is always a value diff --git a/pkg/util/log/log.go b/pkg/util/log/log.go index 54bffc856c65..3ab6535f1d2a 100644 --- a/pkg/util/log/log.go +++ b/pkg/util/log/log.go @@ -51,11 +51,10 @@ func V(level Level) bool { // // Usage: // -// if ExpensiveLogEnabled(ctx, 2) { -// msg := constructExpensiveMessage() -// log.VEventf(ctx, 2, msg) -// } -// +// if ExpensiveLogEnabled(ctx, 2) { +// msg := constructExpensiveMessage() +// log.VEventf(ctx, 2, msg) +// } func ExpensiveLogEnabled(ctx context.Context, level Level) bool { if sp := tracing.SpanFromContext(ctx); sp != nil { if sp.IsVerbose() || sp.Tracer().HasExternalSink() { diff --git a/pkg/util/log/log_channels_generated.go b/pkg/util/log/log_channels_generated.go index b3db240d06a8..7abe46896926 100644 --- a/pkg/util/log/log_channels_generated.go +++ b/pkg/util/log/log_channels_generated.go @@ -1055,14 +1055,14 @@ type loggerOps struct{} // The `OPS` channel is used to report "point" operational events, // initiated by user operators or automation: // -// - Operator or system actions on server processes: process starts, -// stops, shutdowns, crashes (if they can be logged), -// including each time: command-line parameters, current version being run -// - Actions that impact the topology of a cluster: node additions, -// removals, decommissions, etc. -// - Job-related initiation or termination -// - [Cluster setting](cluster-settings.html) changes -// - [Zone configuration](configure-replication-zones.html) changes +// - Operator or system actions on server processes: process starts, +// stops, shutdowns, crashes (if they can be logged), +// including each time: command-line parameters, current version being run +// - Actions that impact the topology of a cluster: node additions, +// removals, decommissions, etc. +// - Job-related initiation or termination +// - [Cluster setting](cluster-settings.html) changes +// - [Zone configuration](configure-replication-zones.html) changes var Ops loggerOps // Ops and loggerOps implement ChannelLogger. @@ -1079,14 +1079,14 @@ var _ ChannelLogger = Ops // The `OPS` channel is used to report "point" operational events, // initiated by user operators or automation: // -// - Operator or system actions on server processes: process starts, -// stops, shutdowns, crashes (if they can be logged), -// including each time: command-line parameters, current version being run -// - Actions that impact the topology of a cluster: node additions, -// removals, decommissions, etc. -// - Job-related initiation or termination -// - [Cluster setting](cluster-settings.html) changes -// - [Zone configuration](configure-replication-zones.html) changes +// - Operator or system actions on server processes: process starts, +// stops, shutdowns, crashes (if they can be logged), +// including each time: command-line parameters, current version being run +// - Actions that impact the topology of a cluster: node additions, +// removals, decommissions, etc. +// - Job-related initiation or termination +// - [Cluster setting](cluster-settings.html) changes +// - [Zone configuration](configure-replication-zones.html) changes // // The `INFO` severity is used for informational messages that do not // require action. @@ -1103,14 +1103,14 @@ func (loggerOps) Infof(ctx context.Context, format string, args ...interface{}) // The `OPS` channel is used to report "point" operational events, // initiated by user operators or automation: // -// - Operator or system actions on server processes: process starts, -// stops, shutdowns, crashes (if they can be logged), -// including each time: command-line parameters, current version being run -// - Actions that impact the topology of a cluster: node additions, -// removals, decommissions, etc. -// - Job-related initiation or termination -// - [Cluster setting](cluster-settings.html) changes -// - [Zone configuration](configure-replication-zones.html) changes +// - Operator or system actions on server processes: process starts, +// stops, shutdowns, crashes (if they can be logged), +// including each time: command-line parameters, current version being run +// - Actions that impact the topology of a cluster: node additions, +// removals, decommissions, etc. +// - Job-related initiation or termination +// - [Cluster setting](cluster-settings.html) changes +// - [Zone configuration](configure-replication-zones.html) changes // // The `INFO` severity is used for informational messages that do not // require action. @@ -1127,14 +1127,14 @@ func (loggerOps) VInfof(ctx context.Context, level Level, format string, args .. // The `OPS` channel is used to report "point" operational events, // initiated by user operators or automation: // -// - Operator or system actions on server processes: process starts, -// stops, shutdowns, crashes (if they can be logged), -// including each time: command-line parameters, current version being run -// - Actions that impact the topology of a cluster: node additions, -// removals, decommissions, etc. -// - Job-related initiation or termination -// - [Cluster setting](cluster-settings.html) changes -// - [Zone configuration](configure-replication-zones.html) changes +// - Operator or system actions on server processes: process starts, +// stops, shutdowns, crashes (if they can be logged), +// including each time: command-line parameters, current version being run +// - Actions that impact the topology of a cluster: node additions, +// removals, decommissions, etc. +// - Job-related initiation or termination +// - [Cluster setting](cluster-settings.html) changes +// - [Zone configuration](configure-replication-zones.html) changes // // The `INFO` severity is used for informational messages that do not // require action. @@ -1150,14 +1150,14 @@ func (loggerOps) Info(ctx context.Context, msg string) { // The `OPS` channel is used to report "point" operational events, // initiated by user operators or automation: // -// - Operator or system actions on server processes: process starts, -// stops, shutdowns, crashes (if they can be logged), -// including each time: command-line parameters, current version being run -// - Actions that impact the topology of a cluster: node additions, -// removals, decommissions, etc. -// - Job-related initiation or termination -// - [Cluster setting](cluster-settings.html) changes -// - [Zone configuration](configure-replication-zones.html) changes +// - Operator or system actions on server processes: process starts, +// stops, shutdowns, crashes (if they can be logged), +// including each time: command-line parameters, current version being run +// - Actions that impact the topology of a cluster: node additions, +// removals, decommissions, etc. +// - Job-related initiation or termination +// - [Cluster setting](cluster-settings.html) changes +// - [Zone configuration](configure-replication-zones.html) changes // // The `INFO` severity is used for informational messages that do not // require action. @@ -1172,14 +1172,14 @@ func (loggerOps) InfofDepth(ctx context.Context, depth int, format string, args // The `OPS` channel is used to report "point" operational events, // initiated by user operators or automation: // -// - Operator or system actions on server processes: process starts, -// stops, shutdowns, crashes (if they can be logged), -// including each time: command-line parameters, current version being run -// - Actions that impact the topology of a cluster: node additions, -// removals, decommissions, etc. -// - Job-related initiation or termination -// - [Cluster setting](cluster-settings.html) changes -// - [Zone configuration](configure-replication-zones.html) changes +// - Operator or system actions on server processes: process starts, +// stops, shutdowns, crashes (if they can be logged), +// including each time: command-line parameters, current version being run +// - Actions that impact the topology of a cluster: node additions, +// removals, decommissions, etc. +// - Job-related initiation or termination +// - [Cluster setting](cluster-settings.html) changes +// - [Zone configuration](configure-replication-zones.html) changes // // The `WARNING` severity is used for situations which may require special handling, // where normal operation is expected to resume automatically. @@ -1196,14 +1196,14 @@ func (loggerOps) Warningf(ctx context.Context, format string, args ...interface{ // The `OPS` channel is used to report "point" operational events, // initiated by user operators or automation: // -// - Operator or system actions on server processes: process starts, -// stops, shutdowns, crashes (if they can be logged), -// including each time: command-line parameters, current version being run -// - Actions that impact the topology of a cluster: node additions, -// removals, decommissions, etc. -// - Job-related initiation or termination -// - [Cluster setting](cluster-settings.html) changes -// - [Zone configuration](configure-replication-zones.html) changes +// - Operator or system actions on server processes: process starts, +// stops, shutdowns, crashes (if they can be logged), +// including each time: command-line parameters, current version being run +// - Actions that impact the topology of a cluster: node additions, +// removals, decommissions, etc. +// - Job-related initiation or termination +// - [Cluster setting](cluster-settings.html) changes +// - [Zone configuration](configure-replication-zones.html) changes // // The `WARNING` severity is used for situations which may require special handling, // where normal operation is expected to resume automatically. @@ -1220,14 +1220,14 @@ func (loggerOps) VWarningf(ctx context.Context, level Level, format string, args // The `OPS` channel is used to report "point" operational events, // initiated by user operators or automation: // -// - Operator or system actions on server processes: process starts, -// stops, shutdowns, crashes (if they can be logged), -// including each time: command-line parameters, current version being run -// - Actions that impact the topology of a cluster: node additions, -// removals, decommissions, etc. -// - Job-related initiation or termination -// - [Cluster setting](cluster-settings.html) changes -// - [Zone configuration](configure-replication-zones.html) changes +// - Operator or system actions on server processes: process starts, +// stops, shutdowns, crashes (if they can be logged), +// including each time: command-line parameters, current version being run +// - Actions that impact the topology of a cluster: node additions, +// removals, decommissions, etc. +// - Job-related initiation or termination +// - [Cluster setting](cluster-settings.html) changes +// - [Zone configuration](configure-replication-zones.html) changes // // The `WARNING` severity is used for situations which may require special handling, // where normal operation is expected to resume automatically. @@ -1243,14 +1243,14 @@ func (loggerOps) Warning(ctx context.Context, msg string) { // The `OPS` channel is used to report "point" operational events, // initiated by user operators or automation: // -// - Operator or system actions on server processes: process starts, -// stops, shutdowns, crashes (if they can be logged), -// including each time: command-line parameters, current version being run -// - Actions that impact the topology of a cluster: node additions, -// removals, decommissions, etc. -// - Job-related initiation or termination -// - [Cluster setting](cluster-settings.html) changes -// - [Zone configuration](configure-replication-zones.html) changes +// - Operator or system actions on server processes: process starts, +// stops, shutdowns, crashes (if they can be logged), +// including each time: command-line parameters, current version being run +// - Actions that impact the topology of a cluster: node additions, +// removals, decommissions, etc. +// - Job-related initiation or termination +// - [Cluster setting](cluster-settings.html) changes +// - [Zone configuration](configure-replication-zones.html) changes // // The `WARNING` severity is used for situations which may require special handling, // where normal operation is expected to resume automatically. @@ -1265,14 +1265,14 @@ func (loggerOps) WarningfDepth(ctx context.Context, depth int, format string, ar // The `OPS` channel is used to report "point" operational events, // initiated by user operators or automation: // -// - Operator or system actions on server processes: process starts, -// stops, shutdowns, crashes (if they can be logged), -// including each time: command-line parameters, current version being run -// - Actions that impact the topology of a cluster: node additions, -// removals, decommissions, etc. -// - Job-related initiation or termination -// - [Cluster setting](cluster-settings.html) changes -// - [Zone configuration](configure-replication-zones.html) changes +// - Operator or system actions on server processes: process starts, +// stops, shutdowns, crashes (if they can be logged), +// including each time: command-line parameters, current version being run +// - Actions that impact the topology of a cluster: node additions, +// removals, decommissions, etc. +// - Job-related initiation or termination +// - [Cluster setting](cluster-settings.html) changes +// - [Zone configuration](configure-replication-zones.html) changes // // The `ERROR` severity is used for situations that require special handling, // where normal operation could not proceed as expected. @@ -1290,14 +1290,14 @@ func (loggerOps) Errorf(ctx context.Context, format string, args ...interface{}) // The `OPS` channel is used to report "point" operational events, // initiated by user operators or automation: // -// - Operator or system actions on server processes: process starts, -// stops, shutdowns, crashes (if they can be logged), -// including each time: command-line parameters, current version being run -// - Actions that impact the topology of a cluster: node additions, -// removals, decommissions, etc. -// - Job-related initiation or termination -// - [Cluster setting](cluster-settings.html) changes -// - [Zone configuration](configure-replication-zones.html) changes +// - Operator or system actions on server processes: process starts, +// stops, shutdowns, crashes (if they can be logged), +// including each time: command-line parameters, current version being run +// - Actions that impact the topology of a cluster: node additions, +// removals, decommissions, etc. +// - Job-related initiation or termination +// - [Cluster setting](cluster-settings.html) changes +// - [Zone configuration](configure-replication-zones.html) changes // // The `ERROR` severity is used for situations that require special handling, // where normal operation could not proceed as expected. @@ -1315,14 +1315,14 @@ func (loggerOps) VErrorf(ctx context.Context, level Level, format string, args . // The `OPS` channel is used to report "point" operational events, // initiated by user operators or automation: // -// - Operator or system actions on server processes: process starts, -// stops, shutdowns, crashes (if they can be logged), -// including each time: command-line parameters, current version being run -// - Actions that impact the topology of a cluster: node additions, -// removals, decommissions, etc. -// - Job-related initiation or termination -// - [Cluster setting](cluster-settings.html) changes -// - [Zone configuration](configure-replication-zones.html) changes +// - Operator or system actions on server processes: process starts, +// stops, shutdowns, crashes (if they can be logged), +// including each time: command-line parameters, current version being run +// - Actions that impact the topology of a cluster: node additions, +// removals, decommissions, etc. +// - Job-related initiation or termination +// - [Cluster setting](cluster-settings.html) changes +// - [Zone configuration](configure-replication-zones.html) changes // // The `ERROR` severity is used for situations that require special handling, // where normal operation could not proceed as expected. @@ -1339,14 +1339,14 @@ func (loggerOps) Error(ctx context.Context, msg string) { // The `OPS` channel is used to report "point" operational events, // initiated by user operators or automation: // -// - Operator or system actions on server processes: process starts, -// stops, shutdowns, crashes (if they can be logged), -// including each time: command-line parameters, current version being run -// - Actions that impact the topology of a cluster: node additions, -// removals, decommissions, etc. -// - Job-related initiation or termination -// - [Cluster setting](cluster-settings.html) changes -// - [Zone configuration](configure-replication-zones.html) changes +// - Operator or system actions on server processes: process starts, +// stops, shutdowns, crashes (if they can be logged), +// including each time: command-line parameters, current version being run +// - Actions that impact the topology of a cluster: node additions, +// removals, decommissions, etc. +// - Job-related initiation or termination +// - [Cluster setting](cluster-settings.html) changes +// - [Zone configuration](configure-replication-zones.html) changes // // The `ERROR` severity is used for situations that require special handling, // where normal operation could not proceed as expected. @@ -1362,14 +1362,14 @@ func (loggerOps) ErrorfDepth(ctx context.Context, depth int, format string, args // The `OPS` channel is used to report "point" operational events, // initiated by user operators or automation: // -// - Operator or system actions on server processes: process starts, -// stops, shutdowns, crashes (if they can be logged), -// including each time: command-line parameters, current version being run -// - Actions that impact the topology of a cluster: node additions, -// removals, decommissions, etc. -// - Job-related initiation or termination -// - [Cluster setting](cluster-settings.html) changes -// - [Zone configuration](configure-replication-zones.html) changes +// - Operator or system actions on server processes: process starts, +// stops, shutdowns, crashes (if they can be logged), +// including each time: command-line parameters, current version being run +// - Actions that impact the topology of a cluster: node additions, +// removals, decommissions, etc. +// - Job-related initiation or termination +// - [Cluster setting](cluster-settings.html) changes +// - [Zone configuration](configure-replication-zones.html) changes // // The `FATAL` severity is used for situations that require an immedate, hard // server shutdown. A report is also sent to telemetry if telemetry @@ -1387,14 +1387,14 @@ func (loggerOps) Fatalf(ctx context.Context, format string, args ...interface{}) // The `OPS` channel is used to report "point" operational events, // initiated by user operators or automation: // -// - Operator or system actions on server processes: process starts, -// stops, shutdowns, crashes (if they can be logged), -// including each time: command-line parameters, current version being run -// - Actions that impact the topology of a cluster: node additions, -// removals, decommissions, etc. -// - Job-related initiation or termination -// - [Cluster setting](cluster-settings.html) changes -// - [Zone configuration](configure-replication-zones.html) changes +// - Operator or system actions on server processes: process starts, +// stops, shutdowns, crashes (if they can be logged), +// including each time: command-line parameters, current version being run +// - Actions that impact the topology of a cluster: node additions, +// removals, decommissions, etc. +// - Job-related initiation or termination +// - [Cluster setting](cluster-settings.html) changes +// - [Zone configuration](configure-replication-zones.html) changes // // The `FATAL` severity is used for situations that require an immedate, hard // server shutdown. A report is also sent to telemetry if telemetry @@ -1412,14 +1412,14 @@ func (loggerOps) VFatalf(ctx context.Context, level Level, format string, args . // The `OPS` channel is used to report "point" operational events, // initiated by user operators or automation: // -// - Operator or system actions on server processes: process starts, -// stops, shutdowns, crashes (if they can be logged), -// including each time: command-line parameters, current version being run -// - Actions that impact the topology of a cluster: node additions, -// removals, decommissions, etc. -// - Job-related initiation or termination -// - [Cluster setting](cluster-settings.html) changes -// - [Zone configuration](configure-replication-zones.html) changes +// - Operator or system actions on server processes: process starts, +// stops, shutdowns, crashes (if they can be logged), +// including each time: command-line parameters, current version being run +// - Actions that impact the topology of a cluster: node additions, +// removals, decommissions, etc. +// - Job-related initiation or termination +// - [Cluster setting](cluster-settings.html) changes +// - [Zone configuration](configure-replication-zones.html) changes // // The `FATAL` severity is used for situations that require an immedate, hard // server shutdown. A report is also sent to telemetry if telemetry @@ -1436,14 +1436,14 @@ func (loggerOps) Fatal(ctx context.Context, msg string) { // The `OPS` channel is used to report "point" operational events, // initiated by user operators or automation: // -// - Operator or system actions on server processes: process starts, -// stops, shutdowns, crashes (if they can be logged), -// including each time: command-line parameters, current version being run -// - Actions that impact the topology of a cluster: node additions, -// removals, decommissions, etc. -// - Job-related initiation or termination -// - [Cluster setting](cluster-settings.html) changes -// - [Zone configuration](configure-replication-zones.html) changes +// - Operator or system actions on server processes: process starts, +// stops, shutdowns, crashes (if they can be logged), +// including each time: command-line parameters, current version being run +// - Actions that impact the topology of a cluster: node additions, +// removals, decommissions, etc. +// - Job-related initiation or termination +// - [Cluster setting](cluster-settings.html) changes +// - [Zone configuration](configure-replication-zones.html) changes // // The `FATAL` severity is used for situations that require an immedate, hard // server shutdown. A report is also sent to telemetry if telemetry @@ -1458,14 +1458,14 @@ func (loggerOps) FatalfDepth(ctx context.Context, depth int, format string, args // The `OPS` channel is used to report "point" operational events, // initiated by user operators or automation: // -// - Operator or system actions on server processes: process starts, -// stops, shutdowns, crashes (if they can be logged), -// including each time: command-line parameters, current version being run -// - Actions that impact the topology of a cluster: node additions, -// removals, decommissions, etc. -// - Job-related initiation or termination -// - [Cluster setting](cluster-settings.html) changes -// - [Zone configuration](configure-replication-zones.html) changes +// - Operator or system actions on server processes: process starts, +// stops, shutdowns, crashes (if they can be logged), +// including each time: command-line parameters, current version being run +// - Actions that impact the topology of a cluster: node additions, +// removals, decommissions, etc. +// - Job-related initiation or termination +// - [Cluster setting](cluster-settings.html) changes +// - [Zone configuration](configure-replication-zones.html) changes func (loggerOps) Shout(ctx context.Context, sev Severity, msg string) { shoutfDepth(ctx, 1, sev, channel.OPS, msg) } @@ -1477,14 +1477,14 @@ func (loggerOps) Shout(ctx context.Context, sev Severity, msg string) { // The `OPS` channel is used to report "point" operational events, // initiated by user operators or automation: // -// - Operator or system actions on server processes: process starts, -// stops, shutdowns, crashes (if they can be logged), -// including each time: command-line parameters, current version being run -// - Actions that impact the topology of a cluster: node additions, -// removals, decommissions, etc. -// - Job-related initiation or termination -// - [Cluster setting](cluster-settings.html) changes -// - [Zone configuration](configure-replication-zones.html) changes +// - Operator or system actions on server processes: process starts, +// stops, shutdowns, crashes (if they can be logged), +// including each time: command-line parameters, current version being run +// - Actions that impact the topology of a cluster: node additions, +// removals, decommissions, etc. +// - Job-related initiation or termination +// - [Cluster setting](cluster-settings.html) changes +// - [Zone configuration](configure-replication-zones.html) changes func (loggerOps) Shoutf(ctx context.Context, sev Severity, format string, args ...interface{}) { shoutfDepth(ctx, 1, sev, channel.OPS, format, args...) } @@ -1495,14 +1495,14 @@ func (loggerOps) Shoutf(ctx context.Context, sev Severity, format string, args . // The `OPS` channel is used to report "point" operational events, // initiated by user operators or automation: // -// - Operator or system actions on server processes: process starts, -// stops, shutdowns, crashes (if they can be logged), -// including each time: command-line parameters, current version being run -// - Actions that impact the topology of a cluster: node additions, -// removals, decommissions, etc. -// - Job-related initiation or termination -// - [Cluster setting](cluster-settings.html) changes -// - [Zone configuration](configure-replication-zones.html) changes +// - Operator or system actions on server processes: process starts, +// stops, shutdowns, crashes (if they can be logged), +// including each time: command-line parameters, current version being run +// - Actions that impact the topology of a cluster: node additions, +// removals, decommissions, etc. +// - Job-related initiation or termination +// - [Cluster setting](cluster-settings.html) changes +// - [Zone configuration](configure-replication-zones.html) changes func (loggerOps) VEvent(ctx context.Context, level Level, msg string) { vEventf(ctx, false /* isErr */, 1, level, channel.OPS, msg) } @@ -1513,14 +1513,14 @@ func (loggerOps) VEvent(ctx context.Context, level Level, msg string) { // The `OPS` channel is used to report "point" operational events, // initiated by user operators or automation: // -// - Operator or system actions on server processes: process starts, -// stops, shutdowns, crashes (if they can be logged), -// including each time: command-line parameters, current version being run -// - Actions that impact the topology of a cluster: node additions, -// removals, decommissions, etc. -// - Job-related initiation or termination -// - [Cluster setting](cluster-settings.html) changes -// - [Zone configuration](configure-replication-zones.html) changes +// - Operator or system actions on server processes: process starts, +// stops, shutdowns, crashes (if they can be logged), +// including each time: command-line parameters, current version being run +// - Actions that impact the topology of a cluster: node additions, +// removals, decommissions, etc. +// - Job-related initiation or termination +// - [Cluster setting](cluster-settings.html) changes +// - [Zone configuration](configure-replication-zones.html) changes func (loggerOps) VEventf(ctx context.Context, level Level, format string, args ...interface{}) { vEventf(ctx, false /* isErr */, 1, level, channel.OPS, format, args...) } @@ -1530,14 +1530,14 @@ func (loggerOps) VEventf(ctx context.Context, level Level, format string, args . // The `OPS` channel is used to report "point" operational events, // initiated by user operators or automation: // -// - Operator or system actions on server processes: process starts, -// stops, shutdowns, crashes (if they can be logged), -// including each time: command-line parameters, current version being run -// - Actions that impact the topology of a cluster: node additions, -// removals, decommissions, etc. -// - Job-related initiation or termination -// - [Cluster setting](cluster-settings.html) changes -// - [Zone configuration](configure-replication-zones.html) changes +// - Operator or system actions on server processes: process starts, +// stops, shutdowns, crashes (if they can be logged), +// including each time: command-line parameters, current version being run +// - Actions that impact the topology of a cluster: node additions, +// removals, decommissions, etc. +// - Job-related initiation or termination +// - [Cluster setting](cluster-settings.html) changes +// - [Zone configuration](configure-replication-zones.html) changes func (loggerOps) VEventfDepth(ctx context.Context, depth int, level Level, format string, args ...interface{}) { vEventf(ctx, false /* isErr */, 1+depth, level, channel.OPS, format, args...) } @@ -1550,11 +1550,11 @@ type loggerHealth struct{} // The `HEALTH` channel is used to report "background" operational // events, initiated by CockroachDB or reporting on automatic processes: // -// - Current resource usage, including critical resource usage -// - Node-node connection events, including connection errors and -// gossip details -// - Range and table leasing events -// - Up- and down-replication, range unavailability +// - Current resource usage, including critical resource usage +// - Node-node connection events, including connection errors and +// gossip details +// - Range and table leasing events +// - Up- and down-replication, range unavailability var Health loggerHealth // Health and loggerHealth implement ChannelLogger. @@ -1571,11 +1571,11 @@ var _ ChannelLogger = Health // The `HEALTH` channel is used to report "background" operational // events, initiated by CockroachDB or reporting on automatic processes: // -// - Current resource usage, including critical resource usage -// - Node-node connection events, including connection errors and -// gossip details -// - Range and table leasing events -// - Up- and down-replication, range unavailability +// - Current resource usage, including critical resource usage +// - Node-node connection events, including connection errors and +// gossip details +// - Range and table leasing events +// - Up- and down-replication, range unavailability // // The `INFO` severity is used for informational messages that do not // require action. @@ -1592,11 +1592,11 @@ func (loggerHealth) Infof(ctx context.Context, format string, args ...interface{ // The `HEALTH` channel is used to report "background" operational // events, initiated by CockroachDB or reporting on automatic processes: // -// - Current resource usage, including critical resource usage -// - Node-node connection events, including connection errors and -// gossip details -// - Range and table leasing events -// - Up- and down-replication, range unavailability +// - Current resource usage, including critical resource usage +// - Node-node connection events, including connection errors and +// gossip details +// - Range and table leasing events +// - Up- and down-replication, range unavailability // // The `INFO` severity is used for informational messages that do not // require action. @@ -1613,11 +1613,11 @@ func (loggerHealth) VInfof(ctx context.Context, level Level, format string, args // The `HEALTH` channel is used to report "background" operational // events, initiated by CockroachDB or reporting on automatic processes: // -// - Current resource usage, including critical resource usage -// - Node-node connection events, including connection errors and -// gossip details -// - Range and table leasing events -// - Up- and down-replication, range unavailability +// - Current resource usage, including critical resource usage +// - Node-node connection events, including connection errors and +// gossip details +// - Range and table leasing events +// - Up- and down-replication, range unavailability // // The `INFO` severity is used for informational messages that do not // require action. @@ -1633,11 +1633,11 @@ func (loggerHealth) Info(ctx context.Context, msg string) { // The `HEALTH` channel is used to report "background" operational // events, initiated by CockroachDB or reporting on automatic processes: // -// - Current resource usage, including critical resource usage -// - Node-node connection events, including connection errors and -// gossip details -// - Range and table leasing events -// - Up- and down-replication, range unavailability +// - Current resource usage, including critical resource usage +// - Node-node connection events, including connection errors and +// gossip details +// - Range and table leasing events +// - Up- and down-replication, range unavailability // // The `INFO` severity is used for informational messages that do not // require action. @@ -1652,11 +1652,11 @@ func (loggerHealth) InfofDepth(ctx context.Context, depth int, format string, ar // The `HEALTH` channel is used to report "background" operational // events, initiated by CockroachDB or reporting on automatic processes: // -// - Current resource usage, including critical resource usage -// - Node-node connection events, including connection errors and -// gossip details -// - Range and table leasing events -// - Up- and down-replication, range unavailability +// - Current resource usage, including critical resource usage +// - Node-node connection events, including connection errors and +// gossip details +// - Range and table leasing events +// - Up- and down-replication, range unavailability // // The `WARNING` severity is used for situations which may require special handling, // where normal operation is expected to resume automatically. @@ -1673,11 +1673,11 @@ func (loggerHealth) Warningf(ctx context.Context, format string, args ...interfa // The `HEALTH` channel is used to report "background" operational // events, initiated by CockroachDB or reporting on automatic processes: // -// - Current resource usage, including critical resource usage -// - Node-node connection events, including connection errors and -// gossip details -// - Range and table leasing events -// - Up- and down-replication, range unavailability +// - Current resource usage, including critical resource usage +// - Node-node connection events, including connection errors and +// gossip details +// - Range and table leasing events +// - Up- and down-replication, range unavailability // // The `WARNING` severity is used for situations which may require special handling, // where normal operation is expected to resume automatically. @@ -1694,11 +1694,11 @@ func (loggerHealth) VWarningf(ctx context.Context, level Level, format string, a // The `HEALTH` channel is used to report "background" operational // events, initiated by CockroachDB or reporting on automatic processes: // -// - Current resource usage, including critical resource usage -// - Node-node connection events, including connection errors and -// gossip details -// - Range and table leasing events -// - Up- and down-replication, range unavailability +// - Current resource usage, including critical resource usage +// - Node-node connection events, including connection errors and +// gossip details +// - Range and table leasing events +// - Up- and down-replication, range unavailability // // The `WARNING` severity is used for situations which may require special handling, // where normal operation is expected to resume automatically. @@ -1714,11 +1714,11 @@ func (loggerHealth) Warning(ctx context.Context, msg string) { // The `HEALTH` channel is used to report "background" operational // events, initiated by CockroachDB or reporting on automatic processes: // -// - Current resource usage, including critical resource usage -// - Node-node connection events, including connection errors and -// gossip details -// - Range and table leasing events -// - Up- and down-replication, range unavailability +// - Current resource usage, including critical resource usage +// - Node-node connection events, including connection errors and +// gossip details +// - Range and table leasing events +// - Up- and down-replication, range unavailability // // The `WARNING` severity is used for situations which may require special handling, // where normal operation is expected to resume automatically. @@ -1733,11 +1733,11 @@ func (loggerHealth) WarningfDepth(ctx context.Context, depth int, format string, // The `HEALTH` channel is used to report "background" operational // events, initiated by CockroachDB or reporting on automatic processes: // -// - Current resource usage, including critical resource usage -// - Node-node connection events, including connection errors and -// gossip details -// - Range and table leasing events -// - Up- and down-replication, range unavailability +// - Current resource usage, including critical resource usage +// - Node-node connection events, including connection errors and +// gossip details +// - Range and table leasing events +// - Up- and down-replication, range unavailability // // The `ERROR` severity is used for situations that require special handling, // where normal operation could not proceed as expected. @@ -1755,11 +1755,11 @@ func (loggerHealth) Errorf(ctx context.Context, format string, args ...interface // The `HEALTH` channel is used to report "background" operational // events, initiated by CockroachDB or reporting on automatic processes: // -// - Current resource usage, including critical resource usage -// - Node-node connection events, including connection errors and -// gossip details -// - Range and table leasing events -// - Up- and down-replication, range unavailability +// - Current resource usage, including critical resource usage +// - Node-node connection events, including connection errors and +// gossip details +// - Range and table leasing events +// - Up- and down-replication, range unavailability // // The `ERROR` severity is used for situations that require special handling, // where normal operation could not proceed as expected. @@ -1777,11 +1777,11 @@ func (loggerHealth) VErrorf(ctx context.Context, level Level, format string, arg // The `HEALTH` channel is used to report "background" operational // events, initiated by CockroachDB or reporting on automatic processes: // -// - Current resource usage, including critical resource usage -// - Node-node connection events, including connection errors and -// gossip details -// - Range and table leasing events -// - Up- and down-replication, range unavailability +// - Current resource usage, including critical resource usage +// - Node-node connection events, including connection errors and +// gossip details +// - Range and table leasing events +// - Up- and down-replication, range unavailability // // The `ERROR` severity is used for situations that require special handling, // where normal operation could not proceed as expected. @@ -1798,11 +1798,11 @@ func (loggerHealth) Error(ctx context.Context, msg string) { // The `HEALTH` channel is used to report "background" operational // events, initiated by CockroachDB or reporting on automatic processes: // -// - Current resource usage, including critical resource usage -// - Node-node connection events, including connection errors and -// gossip details -// - Range and table leasing events -// - Up- and down-replication, range unavailability +// - Current resource usage, including critical resource usage +// - Node-node connection events, including connection errors and +// gossip details +// - Range and table leasing events +// - Up- and down-replication, range unavailability // // The `ERROR` severity is used for situations that require special handling, // where normal operation could not proceed as expected. @@ -1818,11 +1818,11 @@ func (loggerHealth) ErrorfDepth(ctx context.Context, depth int, format string, a // The `HEALTH` channel is used to report "background" operational // events, initiated by CockroachDB or reporting on automatic processes: // -// - Current resource usage, including critical resource usage -// - Node-node connection events, including connection errors and -// gossip details -// - Range and table leasing events -// - Up- and down-replication, range unavailability +// - Current resource usage, including critical resource usage +// - Node-node connection events, including connection errors and +// gossip details +// - Range and table leasing events +// - Up- and down-replication, range unavailability // // The `FATAL` severity is used for situations that require an immedate, hard // server shutdown. A report is also sent to telemetry if telemetry @@ -1840,11 +1840,11 @@ func (loggerHealth) Fatalf(ctx context.Context, format string, args ...interface // The `HEALTH` channel is used to report "background" operational // events, initiated by CockroachDB or reporting on automatic processes: // -// - Current resource usage, including critical resource usage -// - Node-node connection events, including connection errors and -// gossip details -// - Range and table leasing events -// - Up- and down-replication, range unavailability +// - Current resource usage, including critical resource usage +// - Node-node connection events, including connection errors and +// gossip details +// - Range and table leasing events +// - Up- and down-replication, range unavailability // // The `FATAL` severity is used for situations that require an immedate, hard // server shutdown. A report is also sent to telemetry if telemetry @@ -1862,11 +1862,11 @@ func (loggerHealth) VFatalf(ctx context.Context, level Level, format string, arg // The `HEALTH` channel is used to report "background" operational // events, initiated by CockroachDB or reporting on automatic processes: // -// - Current resource usage, including critical resource usage -// - Node-node connection events, including connection errors and -// gossip details -// - Range and table leasing events -// - Up- and down-replication, range unavailability +// - Current resource usage, including critical resource usage +// - Node-node connection events, including connection errors and +// gossip details +// - Range and table leasing events +// - Up- and down-replication, range unavailability // // The `FATAL` severity is used for situations that require an immedate, hard // server shutdown. A report is also sent to telemetry if telemetry @@ -1883,11 +1883,11 @@ func (loggerHealth) Fatal(ctx context.Context, msg string) { // The `HEALTH` channel is used to report "background" operational // events, initiated by CockroachDB or reporting on automatic processes: // -// - Current resource usage, including critical resource usage -// - Node-node connection events, including connection errors and -// gossip details -// - Range and table leasing events -// - Up- and down-replication, range unavailability +// - Current resource usage, including critical resource usage +// - Node-node connection events, including connection errors and +// gossip details +// - Range and table leasing events +// - Up- and down-replication, range unavailability // // The `FATAL` severity is used for situations that require an immedate, hard // server shutdown. A report is also sent to telemetry if telemetry @@ -1902,11 +1902,11 @@ func (loggerHealth) FatalfDepth(ctx context.Context, depth int, format string, a // The `HEALTH` channel is used to report "background" operational // events, initiated by CockroachDB or reporting on automatic processes: // -// - Current resource usage, including critical resource usage -// - Node-node connection events, including connection errors and -// gossip details -// - Range and table leasing events -// - Up- and down-replication, range unavailability +// - Current resource usage, including critical resource usage +// - Node-node connection events, including connection errors and +// gossip details +// - Range and table leasing events +// - Up- and down-replication, range unavailability func (loggerHealth) Shout(ctx context.Context, sev Severity, msg string) { shoutfDepth(ctx, 1, sev, channel.HEALTH, msg) } @@ -1918,11 +1918,11 @@ func (loggerHealth) Shout(ctx context.Context, sev Severity, msg string) { // The `HEALTH` channel is used to report "background" operational // events, initiated by CockroachDB or reporting on automatic processes: // -// - Current resource usage, including critical resource usage -// - Node-node connection events, including connection errors and -// gossip details -// - Range and table leasing events -// - Up- and down-replication, range unavailability +// - Current resource usage, including critical resource usage +// - Node-node connection events, including connection errors and +// gossip details +// - Range and table leasing events +// - Up- and down-replication, range unavailability func (loggerHealth) Shoutf(ctx context.Context, sev Severity, format string, args ...interface{}) { shoutfDepth(ctx, 1, sev, channel.HEALTH, format, args...) } @@ -1933,11 +1933,11 @@ func (loggerHealth) Shoutf(ctx context.Context, sev Severity, format string, arg // The `HEALTH` channel is used to report "background" operational // events, initiated by CockroachDB or reporting on automatic processes: // -// - Current resource usage, including critical resource usage -// - Node-node connection events, including connection errors and -// gossip details -// - Range and table leasing events -// - Up- and down-replication, range unavailability +// - Current resource usage, including critical resource usage +// - Node-node connection events, including connection errors and +// gossip details +// - Range and table leasing events +// - Up- and down-replication, range unavailability func (loggerHealth) VEvent(ctx context.Context, level Level, msg string) { vEventf(ctx, false /* isErr */, 1, level, channel.HEALTH, msg) } @@ -1948,11 +1948,11 @@ func (loggerHealth) VEvent(ctx context.Context, level Level, msg string) { // The `HEALTH` channel is used to report "background" operational // events, initiated by CockroachDB or reporting on automatic processes: // -// - Current resource usage, including critical resource usage -// - Node-node connection events, including connection errors and -// gossip details -// - Range and table leasing events -// - Up- and down-replication, range unavailability +// - Current resource usage, including critical resource usage +// - Node-node connection events, including connection errors and +// gossip details +// - Range and table leasing events +// - Up- and down-replication, range unavailability func (loggerHealth) VEventf(ctx context.Context, level Level, format string, args ...interface{}) { vEventf(ctx, false /* isErr */, 1, level, channel.HEALTH, format, args...) } @@ -1962,11 +1962,11 @@ func (loggerHealth) VEventf(ctx context.Context, level Level, format string, arg // The `HEALTH` channel is used to report "background" operational // events, initiated by CockroachDB or reporting on automatic processes: // -// - Current resource usage, including critical resource usage -// - Node-node connection events, including connection errors and -// gossip details -// - Range and table leasing events -// - Up- and down-replication, range unavailability +// - Current resource usage, including critical resource usage +// - Node-node connection events, including connection errors and +// gossip details +// - Range and table leasing events +// - Up- and down-replication, range unavailability func (loggerHealth) VEventfDepth(ctx context.Context, depth int, level Level, format string, args ...interface{}) { vEventf(ctx, false /* isErr */, 1+depth, level, channel.HEALTH, format, args...) } @@ -2277,9 +2277,9 @@ type loggerSessions struct{} // the `server.auth_log.sql_connections.enabled` and/or // `server.auth_log.sql_sessions.enabled` [cluster setting](cluster-settings.html): // -// - Connections opened/closed -// - Authentication events: logins, failed attempts -// - Session and query cancellation +// - Connections opened/closed +// - Authentication events: logins, failed attempts +// - Session and query cancellation // // This is typically configured in "audit" mode, with event // numbering and synchronous writes. @@ -2300,9 +2300,9 @@ var _ ChannelLogger = Sessions // the `server.auth_log.sql_connections.enabled` and/or // `server.auth_log.sql_sessions.enabled` [cluster setting](cluster-settings.html): // -// - Connections opened/closed -// - Authentication events: logins, failed attempts -// - Session and query cancellation +// - Connections opened/closed +// - Authentication events: logins, failed attempts +// - Session and query cancellation // // This is typically configured in "audit" mode, with event // numbering and synchronous writes. @@ -2323,9 +2323,9 @@ func (loggerSessions) Infof(ctx context.Context, format string, args ...interfac // the `server.auth_log.sql_connections.enabled` and/or // `server.auth_log.sql_sessions.enabled` [cluster setting](cluster-settings.html): // -// - Connections opened/closed -// - Authentication events: logins, failed attempts -// - Session and query cancellation +// - Connections opened/closed +// - Authentication events: logins, failed attempts +// - Session and query cancellation // // This is typically configured in "audit" mode, with event // numbering and synchronous writes. @@ -2346,9 +2346,9 @@ func (loggerSessions) VInfof(ctx context.Context, level Level, format string, ar // the `server.auth_log.sql_connections.enabled` and/or // `server.auth_log.sql_sessions.enabled` [cluster setting](cluster-settings.html): // -// - Connections opened/closed -// - Authentication events: logins, failed attempts -// - Session and query cancellation +// - Connections opened/closed +// - Authentication events: logins, failed attempts +// - Session and query cancellation // // This is typically configured in "audit" mode, with event // numbering and synchronous writes. @@ -2368,9 +2368,9 @@ func (loggerSessions) Info(ctx context.Context, msg string) { // the `server.auth_log.sql_connections.enabled` and/or // `server.auth_log.sql_sessions.enabled` [cluster setting](cluster-settings.html): // -// - Connections opened/closed -// - Authentication events: logins, failed attempts -// - Session and query cancellation +// - Connections opened/closed +// - Authentication events: logins, failed attempts +// - Session and query cancellation // // This is typically configured in "audit" mode, with event // numbering and synchronous writes. @@ -2389,9 +2389,9 @@ func (loggerSessions) InfofDepth(ctx context.Context, depth int, format string, // the `server.auth_log.sql_connections.enabled` and/or // `server.auth_log.sql_sessions.enabled` [cluster setting](cluster-settings.html): // -// - Connections opened/closed -// - Authentication events: logins, failed attempts -// - Session and query cancellation +// - Connections opened/closed +// - Authentication events: logins, failed attempts +// - Session and query cancellation // // This is typically configured in "audit" mode, with event // numbering and synchronous writes. @@ -2412,9 +2412,9 @@ func (loggerSessions) Warningf(ctx context.Context, format string, args ...inter // the `server.auth_log.sql_connections.enabled` and/or // `server.auth_log.sql_sessions.enabled` [cluster setting](cluster-settings.html): // -// - Connections opened/closed -// - Authentication events: logins, failed attempts -// - Session and query cancellation +// - Connections opened/closed +// - Authentication events: logins, failed attempts +// - Session and query cancellation // // This is typically configured in "audit" mode, with event // numbering and synchronous writes. @@ -2435,9 +2435,9 @@ func (loggerSessions) VWarningf(ctx context.Context, level Level, format string, // the `server.auth_log.sql_connections.enabled` and/or // `server.auth_log.sql_sessions.enabled` [cluster setting](cluster-settings.html): // -// - Connections opened/closed -// - Authentication events: logins, failed attempts -// - Session and query cancellation +// - Connections opened/closed +// - Authentication events: logins, failed attempts +// - Session and query cancellation // // This is typically configured in "audit" mode, with event // numbering and synchronous writes. @@ -2457,9 +2457,9 @@ func (loggerSessions) Warning(ctx context.Context, msg string) { // the `server.auth_log.sql_connections.enabled` and/or // `server.auth_log.sql_sessions.enabled` [cluster setting](cluster-settings.html): // -// - Connections opened/closed -// - Authentication events: logins, failed attempts -// - Session and query cancellation +// - Connections opened/closed +// - Authentication events: logins, failed attempts +// - Session and query cancellation // // This is typically configured in "audit" mode, with event // numbering and synchronous writes. @@ -2478,9 +2478,9 @@ func (loggerSessions) WarningfDepth(ctx context.Context, depth int, format strin // the `server.auth_log.sql_connections.enabled` and/or // `server.auth_log.sql_sessions.enabled` [cluster setting](cluster-settings.html): // -// - Connections opened/closed -// - Authentication events: logins, failed attempts -// - Session and query cancellation +// - Connections opened/closed +// - Authentication events: logins, failed attempts +// - Session and query cancellation // // This is typically configured in "audit" mode, with event // numbering and synchronous writes. @@ -2502,9 +2502,9 @@ func (loggerSessions) Errorf(ctx context.Context, format string, args ...interfa // the `server.auth_log.sql_connections.enabled` and/or // `server.auth_log.sql_sessions.enabled` [cluster setting](cluster-settings.html): // -// - Connections opened/closed -// - Authentication events: logins, failed attempts -// - Session and query cancellation +// - Connections opened/closed +// - Authentication events: logins, failed attempts +// - Session and query cancellation // // This is typically configured in "audit" mode, with event // numbering and synchronous writes. @@ -2526,9 +2526,9 @@ func (loggerSessions) VErrorf(ctx context.Context, level Level, format string, a // the `server.auth_log.sql_connections.enabled` and/or // `server.auth_log.sql_sessions.enabled` [cluster setting](cluster-settings.html): // -// - Connections opened/closed -// - Authentication events: logins, failed attempts -// - Session and query cancellation +// - Connections opened/closed +// - Authentication events: logins, failed attempts +// - Session and query cancellation // // This is typically configured in "audit" mode, with event // numbering and synchronous writes. @@ -2549,9 +2549,9 @@ func (loggerSessions) Error(ctx context.Context, msg string) { // the `server.auth_log.sql_connections.enabled` and/or // `server.auth_log.sql_sessions.enabled` [cluster setting](cluster-settings.html): // -// - Connections opened/closed -// - Authentication events: logins, failed attempts -// - Session and query cancellation +// - Connections opened/closed +// - Authentication events: logins, failed attempts +// - Session and query cancellation // // This is typically configured in "audit" mode, with event // numbering and synchronous writes. @@ -2571,9 +2571,9 @@ func (loggerSessions) ErrorfDepth(ctx context.Context, depth int, format string, // the `server.auth_log.sql_connections.enabled` and/or // `server.auth_log.sql_sessions.enabled` [cluster setting](cluster-settings.html): // -// - Connections opened/closed -// - Authentication events: logins, failed attempts -// - Session and query cancellation +// - Connections opened/closed +// - Authentication events: logins, failed attempts +// - Session and query cancellation // // This is typically configured in "audit" mode, with event // numbering and synchronous writes. @@ -2595,9 +2595,9 @@ func (loggerSessions) Fatalf(ctx context.Context, format string, args ...interfa // the `server.auth_log.sql_connections.enabled` and/or // `server.auth_log.sql_sessions.enabled` [cluster setting](cluster-settings.html): // -// - Connections opened/closed -// - Authentication events: logins, failed attempts -// - Session and query cancellation +// - Connections opened/closed +// - Authentication events: logins, failed attempts +// - Session and query cancellation // // This is typically configured in "audit" mode, with event // numbering and synchronous writes. @@ -2619,9 +2619,9 @@ func (loggerSessions) VFatalf(ctx context.Context, level Level, format string, a // the `server.auth_log.sql_connections.enabled` and/or // `server.auth_log.sql_sessions.enabled` [cluster setting](cluster-settings.html): // -// - Connections opened/closed -// - Authentication events: logins, failed attempts -// - Session and query cancellation +// - Connections opened/closed +// - Authentication events: logins, failed attempts +// - Session and query cancellation // // This is typically configured in "audit" mode, with event // numbering and synchronous writes. @@ -2642,9 +2642,9 @@ func (loggerSessions) Fatal(ctx context.Context, msg string) { // the `server.auth_log.sql_connections.enabled` and/or // `server.auth_log.sql_sessions.enabled` [cluster setting](cluster-settings.html): // -// - Connections opened/closed -// - Authentication events: logins, failed attempts -// - Session and query cancellation +// - Connections opened/closed +// - Authentication events: logins, failed attempts +// - Session and query cancellation // // This is typically configured in "audit" mode, with event // numbering and synchronous writes. @@ -2663,9 +2663,9 @@ func (loggerSessions) FatalfDepth(ctx context.Context, depth int, format string, // the `server.auth_log.sql_connections.enabled` and/or // `server.auth_log.sql_sessions.enabled` [cluster setting](cluster-settings.html): // -// - Connections opened/closed -// - Authentication events: logins, failed attempts -// - Session and query cancellation +// - Connections opened/closed +// - Authentication events: logins, failed attempts +// - Session and query cancellation // // This is typically configured in "audit" mode, with event // numbering and synchronous writes. @@ -2681,9 +2681,9 @@ func (loggerSessions) Shout(ctx context.Context, sev Severity, msg string) { // the `server.auth_log.sql_connections.enabled` and/or // `server.auth_log.sql_sessions.enabled` [cluster setting](cluster-settings.html): // -// - Connections opened/closed -// - Authentication events: logins, failed attempts -// - Session and query cancellation +// - Connections opened/closed +// - Authentication events: logins, failed attempts +// - Session and query cancellation // // This is typically configured in "audit" mode, with event // numbering and synchronous writes. @@ -2698,9 +2698,9 @@ func (loggerSessions) Shoutf(ctx context.Context, sev Severity, format string, a // the `server.auth_log.sql_connections.enabled` and/or // `server.auth_log.sql_sessions.enabled` [cluster setting](cluster-settings.html): // -// - Connections opened/closed -// - Authentication events: logins, failed attempts -// - Session and query cancellation +// - Connections opened/closed +// - Authentication events: logins, failed attempts +// - Session and query cancellation // // This is typically configured in "audit" mode, with event // numbering and synchronous writes. @@ -2715,9 +2715,9 @@ func (loggerSessions) VEvent(ctx context.Context, level Level, msg string) { // the `server.auth_log.sql_connections.enabled` and/or // `server.auth_log.sql_sessions.enabled` [cluster setting](cluster-settings.html): // -// - Connections opened/closed -// - Authentication events: logins, failed attempts -// - Session and query cancellation +// - Connections opened/closed +// - Authentication events: logins, failed attempts +// - Session and query cancellation // // This is typically configured in "audit" mode, with event // numbering and synchronous writes. @@ -2731,9 +2731,9 @@ func (loggerSessions) VEventf(ctx context.Context, level Level, format string, a // the `server.auth_log.sql_connections.enabled` and/or // `server.auth_log.sql_sessions.enabled` [cluster setting](cluster-settings.html): // -// - Connections opened/closed -// - Authentication events: logins, failed attempts -// - Session and query cancellation +// - Connections opened/closed +// - Authentication events: logins, failed attempts +// - Session and query cancellation // // This is typically configured in "audit" mode, with event // numbering and synchronous writes. @@ -2753,9 +2753,9 @@ type loggerSqlSchema struct{} // // This includes: // -// - Database/schema/table/sequence/view/type creation -// - Adding/removing/changing table columns -// - Changing sequence parameters +// - Database/schema/table/sequence/view/type creation +// - Adding/removing/changing table columns +// - Changing sequence parameters // // `SQL_SCHEMA` events generally comprise changes to the schema that affect the // functional behavior of client apps using stored objects. @@ -2779,9 +2779,9 @@ var _ ChannelLogger = SqlSchema // // This includes: // -// - Database/schema/table/sequence/view/type creation -// - Adding/removing/changing table columns -// - Changing sequence parameters +// - Database/schema/table/sequence/view/type creation +// - Adding/removing/changing table columns +// - Changing sequence parameters // // `SQL_SCHEMA` events generally comprise changes to the schema that affect the // functional behavior of client apps using stored objects. @@ -2805,9 +2805,9 @@ func (loggerSqlSchema) Infof(ctx context.Context, format string, args ...interfa // // This includes: // -// - Database/schema/table/sequence/view/type creation -// - Adding/removing/changing table columns -// - Changing sequence parameters +// - Database/schema/table/sequence/view/type creation +// - Adding/removing/changing table columns +// - Changing sequence parameters // // `SQL_SCHEMA` events generally comprise changes to the schema that affect the // functional behavior of client apps using stored objects. @@ -2831,9 +2831,9 @@ func (loggerSqlSchema) VInfof(ctx context.Context, level Level, format string, a // // This includes: // -// - Database/schema/table/sequence/view/type creation -// - Adding/removing/changing table columns -// - Changing sequence parameters +// - Database/schema/table/sequence/view/type creation +// - Adding/removing/changing table columns +// - Changing sequence parameters // // `SQL_SCHEMA` events generally comprise changes to the schema that affect the // functional behavior of client apps using stored objects. @@ -2856,9 +2856,9 @@ func (loggerSqlSchema) Info(ctx context.Context, msg string) { // // This includes: // -// - Database/schema/table/sequence/view/type creation -// - Adding/removing/changing table columns -// - Changing sequence parameters +// - Database/schema/table/sequence/view/type creation +// - Adding/removing/changing table columns +// - Changing sequence parameters // // `SQL_SCHEMA` events generally comprise changes to the schema that affect the // functional behavior of client apps using stored objects. @@ -2880,9 +2880,9 @@ func (loggerSqlSchema) InfofDepth(ctx context.Context, depth int, format string, // // This includes: // -// - Database/schema/table/sequence/view/type creation -// - Adding/removing/changing table columns -// - Changing sequence parameters +// - Database/schema/table/sequence/view/type creation +// - Adding/removing/changing table columns +// - Changing sequence parameters // // `SQL_SCHEMA` events generally comprise changes to the schema that affect the // functional behavior of client apps using stored objects. @@ -2906,9 +2906,9 @@ func (loggerSqlSchema) Warningf(ctx context.Context, format string, args ...inte // // This includes: // -// - Database/schema/table/sequence/view/type creation -// - Adding/removing/changing table columns -// - Changing sequence parameters +// - Database/schema/table/sequence/view/type creation +// - Adding/removing/changing table columns +// - Changing sequence parameters // // `SQL_SCHEMA` events generally comprise changes to the schema that affect the // functional behavior of client apps using stored objects. @@ -2932,9 +2932,9 @@ func (loggerSqlSchema) VWarningf(ctx context.Context, level Level, format string // // This includes: // -// - Database/schema/table/sequence/view/type creation -// - Adding/removing/changing table columns -// - Changing sequence parameters +// - Database/schema/table/sequence/view/type creation +// - Adding/removing/changing table columns +// - Changing sequence parameters // // `SQL_SCHEMA` events generally comprise changes to the schema that affect the // functional behavior of client apps using stored objects. @@ -2957,9 +2957,9 @@ func (loggerSqlSchema) Warning(ctx context.Context, msg string) { // // This includes: // -// - Database/schema/table/sequence/view/type creation -// - Adding/removing/changing table columns -// - Changing sequence parameters +// - Database/schema/table/sequence/view/type creation +// - Adding/removing/changing table columns +// - Changing sequence parameters // // `SQL_SCHEMA` events generally comprise changes to the schema that affect the // functional behavior of client apps using stored objects. @@ -2981,9 +2981,9 @@ func (loggerSqlSchema) WarningfDepth(ctx context.Context, depth int, format stri // // This includes: // -// - Database/schema/table/sequence/view/type creation -// - Adding/removing/changing table columns -// - Changing sequence parameters +// - Database/schema/table/sequence/view/type creation +// - Adding/removing/changing table columns +// - Changing sequence parameters // // `SQL_SCHEMA` events generally comprise changes to the schema that affect the // functional behavior of client apps using stored objects. @@ -3008,9 +3008,9 @@ func (loggerSqlSchema) Errorf(ctx context.Context, format string, args ...interf // // This includes: // -// - Database/schema/table/sequence/view/type creation -// - Adding/removing/changing table columns -// - Changing sequence parameters +// - Database/schema/table/sequence/view/type creation +// - Adding/removing/changing table columns +// - Changing sequence parameters // // `SQL_SCHEMA` events generally comprise changes to the schema that affect the // functional behavior of client apps using stored objects. @@ -3035,9 +3035,9 @@ func (loggerSqlSchema) VErrorf(ctx context.Context, level Level, format string, // // This includes: // -// - Database/schema/table/sequence/view/type creation -// - Adding/removing/changing table columns -// - Changing sequence parameters +// - Database/schema/table/sequence/view/type creation +// - Adding/removing/changing table columns +// - Changing sequence parameters // // `SQL_SCHEMA` events generally comprise changes to the schema that affect the // functional behavior of client apps using stored objects. @@ -3061,9 +3061,9 @@ func (loggerSqlSchema) Error(ctx context.Context, msg string) { // // This includes: // -// - Database/schema/table/sequence/view/type creation -// - Adding/removing/changing table columns -// - Changing sequence parameters +// - Database/schema/table/sequence/view/type creation +// - Adding/removing/changing table columns +// - Changing sequence parameters // // `SQL_SCHEMA` events generally comprise changes to the schema that affect the // functional behavior of client apps using stored objects. @@ -3086,9 +3086,9 @@ func (loggerSqlSchema) ErrorfDepth(ctx context.Context, depth int, format string // // This includes: // -// - Database/schema/table/sequence/view/type creation -// - Adding/removing/changing table columns -// - Changing sequence parameters +// - Database/schema/table/sequence/view/type creation +// - Adding/removing/changing table columns +// - Changing sequence parameters // // `SQL_SCHEMA` events generally comprise changes to the schema that affect the // functional behavior of client apps using stored objects. @@ -3113,9 +3113,9 @@ func (loggerSqlSchema) Fatalf(ctx context.Context, format string, args ...interf // // This includes: // -// - Database/schema/table/sequence/view/type creation -// - Adding/removing/changing table columns -// - Changing sequence parameters +// - Database/schema/table/sequence/view/type creation +// - Adding/removing/changing table columns +// - Changing sequence parameters // // `SQL_SCHEMA` events generally comprise changes to the schema that affect the // functional behavior of client apps using stored objects. @@ -3140,9 +3140,9 @@ func (loggerSqlSchema) VFatalf(ctx context.Context, level Level, format string, // // This includes: // -// - Database/schema/table/sequence/view/type creation -// - Adding/removing/changing table columns -// - Changing sequence parameters +// - Database/schema/table/sequence/view/type creation +// - Adding/removing/changing table columns +// - Changing sequence parameters // // `SQL_SCHEMA` events generally comprise changes to the schema that affect the // functional behavior of client apps using stored objects. @@ -3166,9 +3166,9 @@ func (loggerSqlSchema) Fatal(ctx context.Context, msg string) { // // This includes: // -// - Database/schema/table/sequence/view/type creation -// - Adding/removing/changing table columns -// - Changing sequence parameters +// - Database/schema/table/sequence/view/type creation +// - Adding/removing/changing table columns +// - Changing sequence parameters // // `SQL_SCHEMA` events generally comprise changes to the schema that affect the // functional behavior of client apps using stored objects. @@ -3190,9 +3190,9 @@ func (loggerSqlSchema) FatalfDepth(ctx context.Context, depth int, format string // // This includes: // -// - Database/schema/table/sequence/view/type creation -// - Adding/removing/changing table columns -// - Changing sequence parameters +// - Database/schema/table/sequence/view/type creation +// - Adding/removing/changing table columns +// - Changing sequence parameters // // `SQL_SCHEMA` events generally comprise changes to the schema that affect the // functional behavior of client apps using stored objects. @@ -3211,9 +3211,9 @@ func (loggerSqlSchema) Shout(ctx context.Context, sev Severity, msg string) { // // This includes: // -// - Database/schema/table/sequence/view/type creation -// - Adding/removing/changing table columns -// - Changing sequence parameters +// - Database/schema/table/sequence/view/type creation +// - Adding/removing/changing table columns +// - Changing sequence parameters // // `SQL_SCHEMA` events generally comprise changes to the schema that affect the // functional behavior of client apps using stored objects. @@ -3231,9 +3231,9 @@ func (loggerSqlSchema) Shoutf(ctx context.Context, sev Severity, format string, // // This includes: // -// - Database/schema/table/sequence/view/type creation -// - Adding/removing/changing table columns -// - Changing sequence parameters +// - Database/schema/table/sequence/view/type creation +// - Adding/removing/changing table columns +// - Changing sequence parameters // // `SQL_SCHEMA` events generally comprise changes to the schema that affect the // functional behavior of client apps using stored objects. @@ -3251,9 +3251,9 @@ func (loggerSqlSchema) VEvent(ctx context.Context, level Level, msg string) { // // This includes: // -// - Database/schema/table/sequence/view/type creation -// - Adding/removing/changing table columns -// - Changing sequence parameters +// - Database/schema/table/sequence/view/type creation +// - Adding/removing/changing table columns +// - Changing sequence parameters // // `SQL_SCHEMA` events generally comprise changes to the schema that affect the // functional behavior of client apps using stored objects. @@ -3270,9 +3270,9 @@ func (loggerSqlSchema) VEventf(ctx context.Context, level Level, format string, // // This includes: // -// - Database/schema/table/sequence/view/type creation -// - Adding/removing/changing table columns -// - Changing sequence parameters +// - Database/schema/table/sequence/view/type creation +// - Adding/removing/changing table columns +// - Changing sequence parameters // // `SQL_SCHEMA` events generally comprise changes to the schema that affect the // functional behavior of client apps using stored objects. @@ -3288,10 +3288,10 @@ type loggerUserAdmin struct{} // The `USER_ADMIN` channel is used to report changes // in users and roles, including: // -// - Users added/dropped -// - Changes to authentication credentials (e.g., passwords, validity, etc.) -// - Role grants/revocations -// - Role option grants/revocations +// - Users added/dropped +// - Changes to authentication credentials (e.g., passwords, validity, etc.) +// - Role grants/revocations +// - Role option grants/revocations // // This is typically configured in "audit" mode, with event // numbering and synchronous writes. @@ -3311,10 +3311,10 @@ var _ ChannelLogger = UserAdmin // The `USER_ADMIN` channel is used to report changes // in users and roles, including: // -// - Users added/dropped -// - Changes to authentication credentials (e.g., passwords, validity, etc.) -// - Role grants/revocations -// - Role option grants/revocations +// - Users added/dropped +// - Changes to authentication credentials (e.g., passwords, validity, etc.) +// - Role grants/revocations +// - Role option grants/revocations // // This is typically configured in "audit" mode, with event // numbering and synchronous writes. @@ -3334,10 +3334,10 @@ func (loggerUserAdmin) Infof(ctx context.Context, format string, args ...interfa // The `USER_ADMIN` channel is used to report changes // in users and roles, including: // -// - Users added/dropped -// - Changes to authentication credentials (e.g., passwords, validity, etc.) -// - Role grants/revocations -// - Role option grants/revocations +// - Users added/dropped +// - Changes to authentication credentials (e.g., passwords, validity, etc.) +// - Role grants/revocations +// - Role option grants/revocations // // This is typically configured in "audit" mode, with event // numbering and synchronous writes. @@ -3357,10 +3357,10 @@ func (loggerUserAdmin) VInfof(ctx context.Context, level Level, format string, a // The `USER_ADMIN` channel is used to report changes // in users and roles, including: // -// - Users added/dropped -// - Changes to authentication credentials (e.g., passwords, validity, etc.) -// - Role grants/revocations -// - Role option grants/revocations +// - Users added/dropped +// - Changes to authentication credentials (e.g., passwords, validity, etc.) +// - Role grants/revocations +// - Role option grants/revocations // // This is typically configured in "audit" mode, with event // numbering and synchronous writes. @@ -3379,10 +3379,10 @@ func (loggerUserAdmin) Info(ctx context.Context, msg string) { // The `USER_ADMIN` channel is used to report changes // in users and roles, including: // -// - Users added/dropped -// - Changes to authentication credentials (e.g., passwords, validity, etc.) -// - Role grants/revocations -// - Role option grants/revocations +// - Users added/dropped +// - Changes to authentication credentials (e.g., passwords, validity, etc.) +// - Role grants/revocations +// - Role option grants/revocations // // This is typically configured in "audit" mode, with event // numbering and synchronous writes. @@ -3400,10 +3400,10 @@ func (loggerUserAdmin) InfofDepth(ctx context.Context, depth int, format string, // The `USER_ADMIN` channel is used to report changes // in users and roles, including: // -// - Users added/dropped -// - Changes to authentication credentials (e.g., passwords, validity, etc.) -// - Role grants/revocations -// - Role option grants/revocations +// - Users added/dropped +// - Changes to authentication credentials (e.g., passwords, validity, etc.) +// - Role grants/revocations +// - Role option grants/revocations // // This is typically configured in "audit" mode, with event // numbering and synchronous writes. @@ -3423,10 +3423,10 @@ func (loggerUserAdmin) Warningf(ctx context.Context, format string, args ...inte // The `USER_ADMIN` channel is used to report changes // in users and roles, including: // -// - Users added/dropped -// - Changes to authentication credentials (e.g., passwords, validity, etc.) -// - Role grants/revocations -// - Role option grants/revocations +// - Users added/dropped +// - Changes to authentication credentials (e.g., passwords, validity, etc.) +// - Role grants/revocations +// - Role option grants/revocations // // This is typically configured in "audit" mode, with event // numbering and synchronous writes. @@ -3446,10 +3446,10 @@ func (loggerUserAdmin) VWarningf(ctx context.Context, level Level, format string // The `USER_ADMIN` channel is used to report changes // in users and roles, including: // -// - Users added/dropped -// - Changes to authentication credentials (e.g., passwords, validity, etc.) -// - Role grants/revocations -// - Role option grants/revocations +// - Users added/dropped +// - Changes to authentication credentials (e.g., passwords, validity, etc.) +// - Role grants/revocations +// - Role option grants/revocations // // This is typically configured in "audit" mode, with event // numbering and synchronous writes. @@ -3468,10 +3468,10 @@ func (loggerUserAdmin) Warning(ctx context.Context, msg string) { // The `USER_ADMIN` channel is used to report changes // in users and roles, including: // -// - Users added/dropped -// - Changes to authentication credentials (e.g., passwords, validity, etc.) -// - Role grants/revocations -// - Role option grants/revocations +// - Users added/dropped +// - Changes to authentication credentials (e.g., passwords, validity, etc.) +// - Role grants/revocations +// - Role option grants/revocations // // This is typically configured in "audit" mode, with event // numbering and synchronous writes. @@ -3489,10 +3489,10 @@ func (loggerUserAdmin) WarningfDepth(ctx context.Context, depth int, format stri // The `USER_ADMIN` channel is used to report changes // in users and roles, including: // -// - Users added/dropped -// - Changes to authentication credentials (e.g., passwords, validity, etc.) -// - Role grants/revocations -// - Role option grants/revocations +// - Users added/dropped +// - Changes to authentication credentials (e.g., passwords, validity, etc.) +// - Role grants/revocations +// - Role option grants/revocations // // This is typically configured in "audit" mode, with event // numbering and synchronous writes. @@ -3513,10 +3513,10 @@ func (loggerUserAdmin) Errorf(ctx context.Context, format string, args ...interf // The `USER_ADMIN` channel is used to report changes // in users and roles, including: // -// - Users added/dropped -// - Changes to authentication credentials (e.g., passwords, validity, etc.) -// - Role grants/revocations -// - Role option grants/revocations +// - Users added/dropped +// - Changes to authentication credentials (e.g., passwords, validity, etc.) +// - Role grants/revocations +// - Role option grants/revocations // // This is typically configured in "audit" mode, with event // numbering and synchronous writes. @@ -3537,10 +3537,10 @@ func (loggerUserAdmin) VErrorf(ctx context.Context, level Level, format string, // The `USER_ADMIN` channel is used to report changes // in users and roles, including: // -// - Users added/dropped -// - Changes to authentication credentials (e.g., passwords, validity, etc.) -// - Role grants/revocations -// - Role option grants/revocations +// - Users added/dropped +// - Changes to authentication credentials (e.g., passwords, validity, etc.) +// - Role grants/revocations +// - Role option grants/revocations // // This is typically configured in "audit" mode, with event // numbering and synchronous writes. @@ -3560,10 +3560,10 @@ func (loggerUserAdmin) Error(ctx context.Context, msg string) { // The `USER_ADMIN` channel is used to report changes // in users and roles, including: // -// - Users added/dropped -// - Changes to authentication credentials (e.g., passwords, validity, etc.) -// - Role grants/revocations -// - Role option grants/revocations +// - Users added/dropped +// - Changes to authentication credentials (e.g., passwords, validity, etc.) +// - Role grants/revocations +// - Role option grants/revocations // // This is typically configured in "audit" mode, with event // numbering and synchronous writes. @@ -3582,10 +3582,10 @@ func (loggerUserAdmin) ErrorfDepth(ctx context.Context, depth int, format string // The `USER_ADMIN` channel is used to report changes // in users and roles, including: // -// - Users added/dropped -// - Changes to authentication credentials (e.g., passwords, validity, etc.) -// - Role grants/revocations -// - Role option grants/revocations +// - Users added/dropped +// - Changes to authentication credentials (e.g., passwords, validity, etc.) +// - Role grants/revocations +// - Role option grants/revocations // // This is typically configured in "audit" mode, with event // numbering and synchronous writes. @@ -3606,10 +3606,10 @@ func (loggerUserAdmin) Fatalf(ctx context.Context, format string, args ...interf // The `USER_ADMIN` channel is used to report changes // in users and roles, including: // -// - Users added/dropped -// - Changes to authentication credentials (e.g., passwords, validity, etc.) -// - Role grants/revocations -// - Role option grants/revocations +// - Users added/dropped +// - Changes to authentication credentials (e.g., passwords, validity, etc.) +// - Role grants/revocations +// - Role option grants/revocations // // This is typically configured in "audit" mode, with event // numbering and synchronous writes. @@ -3630,10 +3630,10 @@ func (loggerUserAdmin) VFatalf(ctx context.Context, level Level, format string, // The `USER_ADMIN` channel is used to report changes // in users and roles, including: // -// - Users added/dropped -// - Changes to authentication credentials (e.g., passwords, validity, etc.) -// - Role grants/revocations -// - Role option grants/revocations +// - Users added/dropped +// - Changes to authentication credentials (e.g., passwords, validity, etc.) +// - Role grants/revocations +// - Role option grants/revocations // // This is typically configured in "audit" mode, with event // numbering and synchronous writes. @@ -3653,10 +3653,10 @@ func (loggerUserAdmin) Fatal(ctx context.Context, msg string) { // The `USER_ADMIN` channel is used to report changes // in users and roles, including: // -// - Users added/dropped -// - Changes to authentication credentials (e.g., passwords, validity, etc.) -// - Role grants/revocations -// - Role option grants/revocations +// - Users added/dropped +// - Changes to authentication credentials (e.g., passwords, validity, etc.) +// - Role grants/revocations +// - Role option grants/revocations // // This is typically configured in "audit" mode, with event // numbering and synchronous writes. @@ -3674,10 +3674,10 @@ func (loggerUserAdmin) FatalfDepth(ctx context.Context, depth int, format string // The `USER_ADMIN` channel is used to report changes // in users and roles, including: // -// - Users added/dropped -// - Changes to authentication credentials (e.g., passwords, validity, etc.) -// - Role grants/revocations -// - Role option grants/revocations +// - Users added/dropped +// - Changes to authentication credentials (e.g., passwords, validity, etc.) +// - Role grants/revocations +// - Role option grants/revocations // // This is typically configured in "audit" mode, with event // numbering and synchronous writes. @@ -3692,10 +3692,10 @@ func (loggerUserAdmin) Shout(ctx context.Context, sev Severity, msg string) { // The `USER_ADMIN` channel is used to report changes // in users and roles, including: // -// - Users added/dropped -// - Changes to authentication credentials (e.g., passwords, validity, etc.) -// - Role grants/revocations -// - Role option grants/revocations +// - Users added/dropped +// - Changes to authentication credentials (e.g., passwords, validity, etc.) +// - Role grants/revocations +// - Role option grants/revocations // // This is typically configured in "audit" mode, with event // numbering and synchronous writes. @@ -3709,10 +3709,10 @@ func (loggerUserAdmin) Shoutf(ctx context.Context, sev Severity, format string, // The `USER_ADMIN` channel is used to report changes // in users and roles, including: // -// - Users added/dropped -// - Changes to authentication credentials (e.g., passwords, validity, etc.) -// - Role grants/revocations -// - Role option grants/revocations +// - Users added/dropped +// - Changes to authentication credentials (e.g., passwords, validity, etc.) +// - Role grants/revocations +// - Role option grants/revocations // // This is typically configured in "audit" mode, with event // numbering and synchronous writes. @@ -3726,10 +3726,10 @@ func (loggerUserAdmin) VEvent(ctx context.Context, level Level, msg string) { // The `USER_ADMIN` channel is used to report changes // in users and roles, including: // -// - Users added/dropped -// - Changes to authentication credentials (e.g., passwords, validity, etc.) -// - Role grants/revocations -// - Role option grants/revocations +// - Users added/dropped +// - Changes to authentication credentials (e.g., passwords, validity, etc.) +// - Role grants/revocations +// - Role option grants/revocations // // This is typically configured in "audit" mode, with event // numbering and synchronous writes. @@ -3742,10 +3742,10 @@ func (loggerUserAdmin) VEventf(ctx context.Context, level Level, format string, // The `USER_ADMIN` channel is used to report changes // in users and roles, including: // -// - Users added/dropped -// - Changes to authentication credentials (e.g., passwords, validity, etc.) -// - Role grants/revocations -// - Role option grants/revocations +// - Users added/dropped +// - Changes to authentication credentials (e.g., passwords, validity, etc.) +// - Role grants/revocations +// - Role option grants/revocations // // This is typically configured in "audit" mode, with event // numbering and synchronous writes. @@ -3761,8 +3761,8 @@ type loggerPrivileges struct{} // The `PRIVILEGES` channel is used to report data // authorization changes, including: // -// - Privilege grants/revocations on database, objects, etc. -// - Object ownership changes +// - Privilege grants/revocations on database, objects, etc. +// - Object ownership changes // // This is typically configured in "audit" mode, with event // numbering and synchronous writes. @@ -3782,8 +3782,8 @@ var _ ChannelLogger = Privileges // The `PRIVILEGES` channel is used to report data // authorization changes, including: // -// - Privilege grants/revocations on database, objects, etc. -// - Object ownership changes +// - Privilege grants/revocations on database, objects, etc. +// - Object ownership changes // // This is typically configured in "audit" mode, with event // numbering and synchronous writes. @@ -3803,8 +3803,8 @@ func (loggerPrivileges) Infof(ctx context.Context, format string, args ...interf // The `PRIVILEGES` channel is used to report data // authorization changes, including: // -// - Privilege grants/revocations on database, objects, etc. -// - Object ownership changes +// - Privilege grants/revocations on database, objects, etc. +// - Object ownership changes // // This is typically configured in "audit" mode, with event // numbering and synchronous writes. @@ -3824,8 +3824,8 @@ func (loggerPrivileges) VInfof(ctx context.Context, level Level, format string, // The `PRIVILEGES` channel is used to report data // authorization changes, including: // -// - Privilege grants/revocations on database, objects, etc. -// - Object ownership changes +// - Privilege grants/revocations on database, objects, etc. +// - Object ownership changes // // This is typically configured in "audit" mode, with event // numbering and synchronous writes. @@ -3844,8 +3844,8 @@ func (loggerPrivileges) Info(ctx context.Context, msg string) { // The `PRIVILEGES` channel is used to report data // authorization changes, including: // -// - Privilege grants/revocations on database, objects, etc. -// - Object ownership changes +// - Privilege grants/revocations on database, objects, etc. +// - Object ownership changes // // This is typically configured in "audit" mode, with event // numbering and synchronous writes. @@ -3863,8 +3863,8 @@ func (loggerPrivileges) InfofDepth(ctx context.Context, depth int, format string // The `PRIVILEGES` channel is used to report data // authorization changes, including: // -// - Privilege grants/revocations on database, objects, etc. -// - Object ownership changes +// - Privilege grants/revocations on database, objects, etc. +// - Object ownership changes // // This is typically configured in "audit" mode, with event // numbering and synchronous writes. @@ -3884,8 +3884,8 @@ func (loggerPrivileges) Warningf(ctx context.Context, format string, args ...int // The `PRIVILEGES` channel is used to report data // authorization changes, including: // -// - Privilege grants/revocations on database, objects, etc. -// - Object ownership changes +// - Privilege grants/revocations on database, objects, etc. +// - Object ownership changes // // This is typically configured in "audit" mode, with event // numbering and synchronous writes. @@ -3905,8 +3905,8 @@ func (loggerPrivileges) VWarningf(ctx context.Context, level Level, format strin // The `PRIVILEGES` channel is used to report data // authorization changes, including: // -// - Privilege grants/revocations on database, objects, etc. -// - Object ownership changes +// - Privilege grants/revocations on database, objects, etc. +// - Object ownership changes // // This is typically configured in "audit" mode, with event // numbering and synchronous writes. @@ -3925,8 +3925,8 @@ func (loggerPrivileges) Warning(ctx context.Context, msg string) { // The `PRIVILEGES` channel is used to report data // authorization changes, including: // -// - Privilege grants/revocations on database, objects, etc. -// - Object ownership changes +// - Privilege grants/revocations on database, objects, etc. +// - Object ownership changes // // This is typically configured in "audit" mode, with event // numbering and synchronous writes. @@ -3944,8 +3944,8 @@ func (loggerPrivileges) WarningfDepth(ctx context.Context, depth int, format str // The `PRIVILEGES` channel is used to report data // authorization changes, including: // -// - Privilege grants/revocations on database, objects, etc. -// - Object ownership changes +// - Privilege grants/revocations on database, objects, etc. +// - Object ownership changes // // This is typically configured in "audit" mode, with event // numbering and synchronous writes. @@ -3966,8 +3966,8 @@ func (loggerPrivileges) Errorf(ctx context.Context, format string, args ...inter // The `PRIVILEGES` channel is used to report data // authorization changes, including: // -// - Privilege grants/revocations on database, objects, etc. -// - Object ownership changes +// - Privilege grants/revocations on database, objects, etc. +// - Object ownership changes // // This is typically configured in "audit" mode, with event // numbering and synchronous writes. @@ -3988,8 +3988,8 @@ func (loggerPrivileges) VErrorf(ctx context.Context, level Level, format string, // The `PRIVILEGES` channel is used to report data // authorization changes, including: // -// - Privilege grants/revocations on database, objects, etc. -// - Object ownership changes +// - Privilege grants/revocations on database, objects, etc. +// - Object ownership changes // // This is typically configured in "audit" mode, with event // numbering and synchronous writes. @@ -4009,8 +4009,8 @@ func (loggerPrivileges) Error(ctx context.Context, msg string) { // The `PRIVILEGES` channel is used to report data // authorization changes, including: // -// - Privilege grants/revocations on database, objects, etc. -// - Object ownership changes +// - Privilege grants/revocations on database, objects, etc. +// - Object ownership changes // // This is typically configured in "audit" mode, with event // numbering and synchronous writes. @@ -4029,8 +4029,8 @@ func (loggerPrivileges) ErrorfDepth(ctx context.Context, depth int, format strin // The `PRIVILEGES` channel is used to report data // authorization changes, including: // -// - Privilege grants/revocations on database, objects, etc. -// - Object ownership changes +// - Privilege grants/revocations on database, objects, etc. +// - Object ownership changes // // This is typically configured in "audit" mode, with event // numbering and synchronous writes. @@ -4051,8 +4051,8 @@ func (loggerPrivileges) Fatalf(ctx context.Context, format string, args ...inter // The `PRIVILEGES` channel is used to report data // authorization changes, including: // -// - Privilege grants/revocations on database, objects, etc. -// - Object ownership changes +// - Privilege grants/revocations on database, objects, etc. +// - Object ownership changes // // This is typically configured in "audit" mode, with event // numbering and synchronous writes. @@ -4073,8 +4073,8 @@ func (loggerPrivileges) VFatalf(ctx context.Context, level Level, format string, // The `PRIVILEGES` channel is used to report data // authorization changes, including: // -// - Privilege grants/revocations on database, objects, etc. -// - Object ownership changes +// - Privilege grants/revocations on database, objects, etc. +// - Object ownership changes // // This is typically configured in "audit" mode, with event // numbering and synchronous writes. @@ -4094,8 +4094,8 @@ func (loggerPrivileges) Fatal(ctx context.Context, msg string) { // The `PRIVILEGES` channel is used to report data // authorization changes, including: // -// - Privilege grants/revocations on database, objects, etc. -// - Object ownership changes +// - Privilege grants/revocations on database, objects, etc. +// - Object ownership changes // // This is typically configured in "audit" mode, with event // numbering and synchronous writes. @@ -4113,8 +4113,8 @@ func (loggerPrivileges) FatalfDepth(ctx context.Context, depth int, format strin // The `PRIVILEGES` channel is used to report data // authorization changes, including: // -// - Privilege grants/revocations on database, objects, etc. -// - Object ownership changes +// - Privilege grants/revocations on database, objects, etc. +// - Object ownership changes // // This is typically configured in "audit" mode, with event // numbering and synchronous writes. @@ -4129,8 +4129,8 @@ func (loggerPrivileges) Shout(ctx context.Context, sev Severity, msg string) { // The `PRIVILEGES` channel is used to report data // authorization changes, including: // -// - Privilege grants/revocations on database, objects, etc. -// - Object ownership changes +// - Privilege grants/revocations on database, objects, etc. +// - Object ownership changes // // This is typically configured in "audit" mode, with event // numbering and synchronous writes. @@ -4144,8 +4144,8 @@ func (loggerPrivileges) Shoutf(ctx context.Context, sev Severity, format string, // The `PRIVILEGES` channel is used to report data // authorization changes, including: // -// - Privilege grants/revocations on database, objects, etc. -// - Object ownership changes +// - Privilege grants/revocations on database, objects, etc. +// - Object ownership changes // // This is typically configured in "audit" mode, with event // numbering and synchronous writes. @@ -4159,8 +4159,8 @@ func (loggerPrivileges) VEvent(ctx context.Context, level Level, msg string) { // The `PRIVILEGES` channel is used to report data // authorization changes, including: // -// - Privilege grants/revocations on database, objects, etc. -// - Object ownership changes +// - Privilege grants/revocations on database, objects, etc. +// - Object ownership changes // // This is typically configured in "audit" mode, with event // numbering and synchronous writes. @@ -4173,8 +4173,8 @@ func (loggerPrivileges) VEventf(ctx context.Context, level Level, format string, // The `PRIVILEGES` channel is used to report data // authorization changes, including: // -// - Privilege grants/revocations on database, objects, etc. -// - Object ownership changes +// - Privilege grants/revocations on database, objects, etc. +// - Object ownership changes // // This is typically configured in "audit" mode, with event // numbering and synchronous writes. @@ -4190,10 +4190,10 @@ type loggerSensitiveAccess struct{} // The `SENSITIVE_ACCESS` channel is used to report SQL // data access to sensitive data: // -// - Data access audit events (when table audit is enabled via -// [EXPERIMENTAL_AUDIT](experimental-audit.html)) -// - SQL statements executed by users with the admin role -// - Operations that write to system tables +// - Data access audit events (when table audit is enabled via +// [EXPERIMENTAL_AUDIT](experimental-audit.html)) +// - SQL statements executed by users with the admin role +// - Operations that write to system tables // // This is typically configured in "audit" mode, with event // numbering and synchronous writes. @@ -4213,10 +4213,10 @@ var _ ChannelLogger = SensitiveAccess // The `SENSITIVE_ACCESS` channel is used to report SQL // data access to sensitive data: // -// - Data access audit events (when table audit is enabled via -// [EXPERIMENTAL_AUDIT](experimental-audit.html)) -// - SQL statements executed by users with the admin role -// - Operations that write to system tables +// - Data access audit events (when table audit is enabled via +// [EXPERIMENTAL_AUDIT](experimental-audit.html)) +// - SQL statements executed by users with the admin role +// - Operations that write to system tables // // This is typically configured in "audit" mode, with event // numbering and synchronous writes. @@ -4236,10 +4236,10 @@ func (loggerSensitiveAccess) Infof(ctx context.Context, format string, args ...i // The `SENSITIVE_ACCESS` channel is used to report SQL // data access to sensitive data: // -// - Data access audit events (when table audit is enabled via -// [EXPERIMENTAL_AUDIT](experimental-audit.html)) -// - SQL statements executed by users with the admin role -// - Operations that write to system tables +// - Data access audit events (when table audit is enabled via +// [EXPERIMENTAL_AUDIT](experimental-audit.html)) +// - SQL statements executed by users with the admin role +// - Operations that write to system tables // // This is typically configured in "audit" mode, with event // numbering and synchronous writes. @@ -4259,10 +4259,10 @@ func (loggerSensitiveAccess) VInfof(ctx context.Context, level Level, format str // The `SENSITIVE_ACCESS` channel is used to report SQL // data access to sensitive data: // -// - Data access audit events (when table audit is enabled via -// [EXPERIMENTAL_AUDIT](experimental-audit.html)) -// - SQL statements executed by users with the admin role -// - Operations that write to system tables +// - Data access audit events (when table audit is enabled via +// [EXPERIMENTAL_AUDIT](experimental-audit.html)) +// - SQL statements executed by users with the admin role +// - Operations that write to system tables // // This is typically configured in "audit" mode, with event // numbering and synchronous writes. @@ -4281,10 +4281,10 @@ func (loggerSensitiveAccess) Info(ctx context.Context, msg string) { // The `SENSITIVE_ACCESS` channel is used to report SQL // data access to sensitive data: // -// - Data access audit events (when table audit is enabled via -// [EXPERIMENTAL_AUDIT](experimental-audit.html)) -// - SQL statements executed by users with the admin role -// - Operations that write to system tables +// - Data access audit events (when table audit is enabled via +// [EXPERIMENTAL_AUDIT](experimental-audit.html)) +// - SQL statements executed by users with the admin role +// - Operations that write to system tables // // This is typically configured in "audit" mode, with event // numbering and synchronous writes. @@ -4302,10 +4302,10 @@ func (loggerSensitiveAccess) InfofDepth(ctx context.Context, depth int, format s // The `SENSITIVE_ACCESS` channel is used to report SQL // data access to sensitive data: // -// - Data access audit events (when table audit is enabled via -// [EXPERIMENTAL_AUDIT](experimental-audit.html)) -// - SQL statements executed by users with the admin role -// - Operations that write to system tables +// - Data access audit events (when table audit is enabled via +// [EXPERIMENTAL_AUDIT](experimental-audit.html)) +// - SQL statements executed by users with the admin role +// - Operations that write to system tables // // This is typically configured in "audit" mode, with event // numbering and synchronous writes. @@ -4325,10 +4325,10 @@ func (loggerSensitiveAccess) Warningf(ctx context.Context, format string, args . // The `SENSITIVE_ACCESS` channel is used to report SQL // data access to sensitive data: // -// - Data access audit events (when table audit is enabled via -// [EXPERIMENTAL_AUDIT](experimental-audit.html)) -// - SQL statements executed by users with the admin role -// - Operations that write to system tables +// - Data access audit events (when table audit is enabled via +// [EXPERIMENTAL_AUDIT](experimental-audit.html)) +// - SQL statements executed by users with the admin role +// - Operations that write to system tables // // This is typically configured in "audit" mode, with event // numbering and synchronous writes. @@ -4348,10 +4348,10 @@ func (loggerSensitiveAccess) VWarningf(ctx context.Context, level Level, format // The `SENSITIVE_ACCESS` channel is used to report SQL // data access to sensitive data: // -// - Data access audit events (when table audit is enabled via -// [EXPERIMENTAL_AUDIT](experimental-audit.html)) -// - SQL statements executed by users with the admin role -// - Operations that write to system tables +// - Data access audit events (when table audit is enabled via +// [EXPERIMENTAL_AUDIT](experimental-audit.html)) +// - SQL statements executed by users with the admin role +// - Operations that write to system tables // // This is typically configured in "audit" mode, with event // numbering and synchronous writes. @@ -4370,10 +4370,10 @@ func (loggerSensitiveAccess) Warning(ctx context.Context, msg string) { // The `SENSITIVE_ACCESS` channel is used to report SQL // data access to sensitive data: // -// - Data access audit events (when table audit is enabled via -// [EXPERIMENTAL_AUDIT](experimental-audit.html)) -// - SQL statements executed by users with the admin role -// - Operations that write to system tables +// - Data access audit events (when table audit is enabled via +// [EXPERIMENTAL_AUDIT](experimental-audit.html)) +// - SQL statements executed by users with the admin role +// - Operations that write to system tables // // This is typically configured in "audit" mode, with event // numbering and synchronous writes. @@ -4391,10 +4391,10 @@ func (loggerSensitiveAccess) WarningfDepth(ctx context.Context, depth int, forma // The `SENSITIVE_ACCESS` channel is used to report SQL // data access to sensitive data: // -// - Data access audit events (when table audit is enabled via -// [EXPERIMENTAL_AUDIT](experimental-audit.html)) -// - SQL statements executed by users with the admin role -// - Operations that write to system tables +// - Data access audit events (when table audit is enabled via +// [EXPERIMENTAL_AUDIT](experimental-audit.html)) +// - SQL statements executed by users with the admin role +// - Operations that write to system tables // // This is typically configured in "audit" mode, with event // numbering and synchronous writes. @@ -4415,10 +4415,10 @@ func (loggerSensitiveAccess) Errorf(ctx context.Context, format string, args ... // The `SENSITIVE_ACCESS` channel is used to report SQL // data access to sensitive data: // -// - Data access audit events (when table audit is enabled via -// [EXPERIMENTAL_AUDIT](experimental-audit.html)) -// - SQL statements executed by users with the admin role -// - Operations that write to system tables +// - Data access audit events (when table audit is enabled via +// [EXPERIMENTAL_AUDIT](experimental-audit.html)) +// - SQL statements executed by users with the admin role +// - Operations that write to system tables // // This is typically configured in "audit" mode, with event // numbering and synchronous writes. @@ -4439,10 +4439,10 @@ func (loggerSensitiveAccess) VErrorf(ctx context.Context, level Level, format st // The `SENSITIVE_ACCESS` channel is used to report SQL // data access to sensitive data: // -// - Data access audit events (when table audit is enabled via -// [EXPERIMENTAL_AUDIT](experimental-audit.html)) -// - SQL statements executed by users with the admin role -// - Operations that write to system tables +// - Data access audit events (when table audit is enabled via +// [EXPERIMENTAL_AUDIT](experimental-audit.html)) +// - SQL statements executed by users with the admin role +// - Operations that write to system tables // // This is typically configured in "audit" mode, with event // numbering and synchronous writes. @@ -4462,10 +4462,10 @@ func (loggerSensitiveAccess) Error(ctx context.Context, msg string) { // The `SENSITIVE_ACCESS` channel is used to report SQL // data access to sensitive data: // -// - Data access audit events (when table audit is enabled via -// [EXPERIMENTAL_AUDIT](experimental-audit.html)) -// - SQL statements executed by users with the admin role -// - Operations that write to system tables +// - Data access audit events (when table audit is enabled via +// [EXPERIMENTAL_AUDIT](experimental-audit.html)) +// - SQL statements executed by users with the admin role +// - Operations that write to system tables // // This is typically configured in "audit" mode, with event // numbering and synchronous writes. @@ -4484,10 +4484,10 @@ func (loggerSensitiveAccess) ErrorfDepth(ctx context.Context, depth int, format // The `SENSITIVE_ACCESS` channel is used to report SQL // data access to sensitive data: // -// - Data access audit events (when table audit is enabled via -// [EXPERIMENTAL_AUDIT](experimental-audit.html)) -// - SQL statements executed by users with the admin role -// - Operations that write to system tables +// - Data access audit events (when table audit is enabled via +// [EXPERIMENTAL_AUDIT](experimental-audit.html)) +// - SQL statements executed by users with the admin role +// - Operations that write to system tables // // This is typically configured in "audit" mode, with event // numbering and synchronous writes. @@ -4508,10 +4508,10 @@ func (loggerSensitiveAccess) Fatalf(ctx context.Context, format string, args ... // The `SENSITIVE_ACCESS` channel is used to report SQL // data access to sensitive data: // -// - Data access audit events (when table audit is enabled via -// [EXPERIMENTAL_AUDIT](experimental-audit.html)) -// - SQL statements executed by users with the admin role -// - Operations that write to system tables +// - Data access audit events (when table audit is enabled via +// [EXPERIMENTAL_AUDIT](experimental-audit.html)) +// - SQL statements executed by users with the admin role +// - Operations that write to system tables // // This is typically configured in "audit" mode, with event // numbering and synchronous writes. @@ -4532,10 +4532,10 @@ func (loggerSensitiveAccess) VFatalf(ctx context.Context, level Level, format st // The `SENSITIVE_ACCESS` channel is used to report SQL // data access to sensitive data: // -// - Data access audit events (when table audit is enabled via -// [EXPERIMENTAL_AUDIT](experimental-audit.html)) -// - SQL statements executed by users with the admin role -// - Operations that write to system tables +// - Data access audit events (when table audit is enabled via +// [EXPERIMENTAL_AUDIT](experimental-audit.html)) +// - SQL statements executed by users with the admin role +// - Operations that write to system tables // // This is typically configured in "audit" mode, with event // numbering and synchronous writes. @@ -4555,10 +4555,10 @@ func (loggerSensitiveAccess) Fatal(ctx context.Context, msg string) { // The `SENSITIVE_ACCESS` channel is used to report SQL // data access to sensitive data: // -// - Data access audit events (when table audit is enabled via -// [EXPERIMENTAL_AUDIT](experimental-audit.html)) -// - SQL statements executed by users with the admin role -// - Operations that write to system tables +// - Data access audit events (when table audit is enabled via +// [EXPERIMENTAL_AUDIT](experimental-audit.html)) +// - SQL statements executed by users with the admin role +// - Operations that write to system tables // // This is typically configured in "audit" mode, with event // numbering and synchronous writes. @@ -4576,10 +4576,10 @@ func (loggerSensitiveAccess) FatalfDepth(ctx context.Context, depth int, format // The `SENSITIVE_ACCESS` channel is used to report SQL // data access to sensitive data: // -// - Data access audit events (when table audit is enabled via -// [EXPERIMENTAL_AUDIT](experimental-audit.html)) -// - SQL statements executed by users with the admin role -// - Operations that write to system tables +// - Data access audit events (when table audit is enabled via +// [EXPERIMENTAL_AUDIT](experimental-audit.html)) +// - SQL statements executed by users with the admin role +// - Operations that write to system tables // // This is typically configured in "audit" mode, with event // numbering and synchronous writes. @@ -4594,10 +4594,10 @@ func (loggerSensitiveAccess) Shout(ctx context.Context, sev Severity, msg string // The `SENSITIVE_ACCESS` channel is used to report SQL // data access to sensitive data: // -// - Data access audit events (when table audit is enabled via -// [EXPERIMENTAL_AUDIT](experimental-audit.html)) -// - SQL statements executed by users with the admin role -// - Operations that write to system tables +// - Data access audit events (when table audit is enabled via +// [EXPERIMENTAL_AUDIT](experimental-audit.html)) +// - SQL statements executed by users with the admin role +// - Operations that write to system tables // // This is typically configured in "audit" mode, with event // numbering and synchronous writes. @@ -4611,10 +4611,10 @@ func (loggerSensitiveAccess) Shoutf(ctx context.Context, sev Severity, format st // The `SENSITIVE_ACCESS` channel is used to report SQL // data access to sensitive data: // -// - Data access audit events (when table audit is enabled via -// [EXPERIMENTAL_AUDIT](experimental-audit.html)) -// - SQL statements executed by users with the admin role -// - Operations that write to system tables +// - Data access audit events (when table audit is enabled via +// [EXPERIMENTAL_AUDIT](experimental-audit.html)) +// - SQL statements executed by users with the admin role +// - Operations that write to system tables // // This is typically configured in "audit" mode, with event // numbering and synchronous writes. @@ -4628,10 +4628,10 @@ func (loggerSensitiveAccess) VEvent(ctx context.Context, level Level, msg string // The `SENSITIVE_ACCESS` channel is used to report SQL // data access to sensitive data: // -// - Data access audit events (when table audit is enabled via -// [EXPERIMENTAL_AUDIT](experimental-audit.html)) -// - SQL statements executed by users with the admin role -// - Operations that write to system tables +// - Data access audit events (when table audit is enabled via +// [EXPERIMENTAL_AUDIT](experimental-audit.html)) +// - SQL statements executed by users with the admin role +// - Operations that write to system tables // // This is typically configured in "audit" mode, with event // numbering and synchronous writes. @@ -4644,10 +4644,10 @@ func (loggerSensitiveAccess) VEventf(ctx context.Context, level Level, format st // The `SENSITIVE_ACCESS` channel is used to report SQL // data access to sensitive data: // -// - Data access audit events (when table audit is enabled via -// [EXPERIMENTAL_AUDIT](experimental-audit.html)) -// - SQL statements executed by users with the admin role -// - Operations that write to system tables +// - Data access audit events (when table audit is enabled via +// [EXPERIMENTAL_AUDIT](experimental-audit.html)) +// - SQL statements executed by users with the admin role +// - Operations that write to system tables // // This is typically configured in "audit" mode, with event // numbering and synchronous writes. @@ -4663,9 +4663,9 @@ type loggerSqlExec struct{} // The `SQL_EXEC` channel is used to report SQL execution on // behalf of client connections: // -// - Logical SQL statement executions (when enabled via the -// `sql.trace.log_statement_execute` [cluster setting](cluster-settings.html)) -// - uncaught Go panic errors during the execution of a SQL statement. +// - Logical SQL statement executions (when enabled via the +// `sql.trace.log_statement_execute` [cluster setting](cluster-settings.html)) +// - uncaught Go panic errors during the execution of a SQL statement. var SqlExec loggerSqlExec // SqlExec and loggerSqlExec implement ChannelLogger. @@ -4682,9 +4682,9 @@ var _ ChannelLogger = SqlExec // The `SQL_EXEC` channel is used to report SQL execution on // behalf of client connections: // -// - Logical SQL statement executions (when enabled via the -// `sql.trace.log_statement_execute` [cluster setting](cluster-settings.html)) -// - uncaught Go panic errors during the execution of a SQL statement. +// - Logical SQL statement executions (when enabled via the +// `sql.trace.log_statement_execute` [cluster setting](cluster-settings.html)) +// - uncaught Go panic errors during the execution of a SQL statement. // // The `INFO` severity is used for informational messages that do not // require action. @@ -4701,9 +4701,9 @@ func (loggerSqlExec) Infof(ctx context.Context, format string, args ...interface // The `SQL_EXEC` channel is used to report SQL execution on // behalf of client connections: // -// - Logical SQL statement executions (when enabled via the -// `sql.trace.log_statement_execute` [cluster setting](cluster-settings.html)) -// - uncaught Go panic errors during the execution of a SQL statement. +// - Logical SQL statement executions (when enabled via the +// `sql.trace.log_statement_execute` [cluster setting](cluster-settings.html)) +// - uncaught Go panic errors during the execution of a SQL statement. // // The `INFO` severity is used for informational messages that do not // require action. @@ -4720,9 +4720,9 @@ func (loggerSqlExec) VInfof(ctx context.Context, level Level, format string, arg // The `SQL_EXEC` channel is used to report SQL execution on // behalf of client connections: // -// - Logical SQL statement executions (when enabled via the -// `sql.trace.log_statement_execute` [cluster setting](cluster-settings.html)) -// - uncaught Go panic errors during the execution of a SQL statement. +// - Logical SQL statement executions (when enabled via the +// `sql.trace.log_statement_execute` [cluster setting](cluster-settings.html)) +// - uncaught Go panic errors during the execution of a SQL statement. // // The `INFO` severity is used for informational messages that do not // require action. @@ -4738,9 +4738,9 @@ func (loggerSqlExec) Info(ctx context.Context, msg string) { // The `SQL_EXEC` channel is used to report SQL execution on // behalf of client connections: // -// - Logical SQL statement executions (when enabled via the -// `sql.trace.log_statement_execute` [cluster setting](cluster-settings.html)) -// - uncaught Go panic errors during the execution of a SQL statement. +// - Logical SQL statement executions (when enabled via the +// `sql.trace.log_statement_execute` [cluster setting](cluster-settings.html)) +// - uncaught Go panic errors during the execution of a SQL statement. // // The `INFO` severity is used for informational messages that do not // require action. @@ -4755,9 +4755,9 @@ func (loggerSqlExec) InfofDepth(ctx context.Context, depth int, format string, a // The `SQL_EXEC` channel is used to report SQL execution on // behalf of client connections: // -// - Logical SQL statement executions (when enabled via the -// `sql.trace.log_statement_execute` [cluster setting](cluster-settings.html)) -// - uncaught Go panic errors during the execution of a SQL statement. +// - Logical SQL statement executions (when enabled via the +// `sql.trace.log_statement_execute` [cluster setting](cluster-settings.html)) +// - uncaught Go panic errors during the execution of a SQL statement. // // The `WARNING` severity is used for situations which may require special handling, // where normal operation is expected to resume automatically. @@ -4774,9 +4774,9 @@ func (loggerSqlExec) Warningf(ctx context.Context, format string, args ...interf // The `SQL_EXEC` channel is used to report SQL execution on // behalf of client connections: // -// - Logical SQL statement executions (when enabled via the -// `sql.trace.log_statement_execute` [cluster setting](cluster-settings.html)) -// - uncaught Go panic errors during the execution of a SQL statement. +// - Logical SQL statement executions (when enabled via the +// `sql.trace.log_statement_execute` [cluster setting](cluster-settings.html)) +// - uncaught Go panic errors during the execution of a SQL statement. // // The `WARNING` severity is used for situations which may require special handling, // where normal operation is expected to resume automatically. @@ -4793,9 +4793,9 @@ func (loggerSqlExec) VWarningf(ctx context.Context, level Level, format string, // The `SQL_EXEC` channel is used to report SQL execution on // behalf of client connections: // -// - Logical SQL statement executions (when enabled via the -// `sql.trace.log_statement_execute` [cluster setting](cluster-settings.html)) -// - uncaught Go panic errors during the execution of a SQL statement. +// - Logical SQL statement executions (when enabled via the +// `sql.trace.log_statement_execute` [cluster setting](cluster-settings.html)) +// - uncaught Go panic errors during the execution of a SQL statement. // // The `WARNING` severity is used for situations which may require special handling, // where normal operation is expected to resume automatically. @@ -4811,9 +4811,9 @@ func (loggerSqlExec) Warning(ctx context.Context, msg string) { // The `SQL_EXEC` channel is used to report SQL execution on // behalf of client connections: // -// - Logical SQL statement executions (when enabled via the -// `sql.trace.log_statement_execute` [cluster setting](cluster-settings.html)) -// - uncaught Go panic errors during the execution of a SQL statement. +// - Logical SQL statement executions (when enabled via the +// `sql.trace.log_statement_execute` [cluster setting](cluster-settings.html)) +// - uncaught Go panic errors during the execution of a SQL statement. // // The `WARNING` severity is used for situations which may require special handling, // where normal operation is expected to resume automatically. @@ -4828,9 +4828,9 @@ func (loggerSqlExec) WarningfDepth(ctx context.Context, depth int, format string // The `SQL_EXEC` channel is used to report SQL execution on // behalf of client connections: // -// - Logical SQL statement executions (when enabled via the -// `sql.trace.log_statement_execute` [cluster setting](cluster-settings.html)) -// - uncaught Go panic errors during the execution of a SQL statement. +// - Logical SQL statement executions (when enabled via the +// `sql.trace.log_statement_execute` [cluster setting](cluster-settings.html)) +// - uncaught Go panic errors during the execution of a SQL statement. // // The `ERROR` severity is used for situations that require special handling, // where normal operation could not proceed as expected. @@ -4848,9 +4848,9 @@ func (loggerSqlExec) Errorf(ctx context.Context, format string, args ...interfac // The `SQL_EXEC` channel is used to report SQL execution on // behalf of client connections: // -// - Logical SQL statement executions (when enabled via the -// `sql.trace.log_statement_execute` [cluster setting](cluster-settings.html)) -// - uncaught Go panic errors during the execution of a SQL statement. +// - Logical SQL statement executions (when enabled via the +// `sql.trace.log_statement_execute` [cluster setting](cluster-settings.html)) +// - uncaught Go panic errors during the execution of a SQL statement. // // The `ERROR` severity is used for situations that require special handling, // where normal operation could not proceed as expected. @@ -4868,9 +4868,9 @@ func (loggerSqlExec) VErrorf(ctx context.Context, level Level, format string, ar // The `SQL_EXEC` channel is used to report SQL execution on // behalf of client connections: // -// - Logical SQL statement executions (when enabled via the -// `sql.trace.log_statement_execute` [cluster setting](cluster-settings.html)) -// - uncaught Go panic errors during the execution of a SQL statement. +// - Logical SQL statement executions (when enabled via the +// `sql.trace.log_statement_execute` [cluster setting](cluster-settings.html)) +// - uncaught Go panic errors during the execution of a SQL statement. // // The `ERROR` severity is used for situations that require special handling, // where normal operation could not proceed as expected. @@ -4887,9 +4887,9 @@ func (loggerSqlExec) Error(ctx context.Context, msg string) { // The `SQL_EXEC` channel is used to report SQL execution on // behalf of client connections: // -// - Logical SQL statement executions (when enabled via the -// `sql.trace.log_statement_execute` [cluster setting](cluster-settings.html)) -// - uncaught Go panic errors during the execution of a SQL statement. +// - Logical SQL statement executions (when enabled via the +// `sql.trace.log_statement_execute` [cluster setting](cluster-settings.html)) +// - uncaught Go panic errors during the execution of a SQL statement. // // The `ERROR` severity is used for situations that require special handling, // where normal operation could not proceed as expected. @@ -4905,9 +4905,9 @@ func (loggerSqlExec) ErrorfDepth(ctx context.Context, depth int, format string, // The `SQL_EXEC` channel is used to report SQL execution on // behalf of client connections: // -// - Logical SQL statement executions (when enabled via the -// `sql.trace.log_statement_execute` [cluster setting](cluster-settings.html)) -// - uncaught Go panic errors during the execution of a SQL statement. +// - Logical SQL statement executions (when enabled via the +// `sql.trace.log_statement_execute` [cluster setting](cluster-settings.html)) +// - uncaught Go panic errors during the execution of a SQL statement. // // The `FATAL` severity is used for situations that require an immedate, hard // server shutdown. A report is also sent to telemetry if telemetry @@ -4925,9 +4925,9 @@ func (loggerSqlExec) Fatalf(ctx context.Context, format string, args ...interfac // The `SQL_EXEC` channel is used to report SQL execution on // behalf of client connections: // -// - Logical SQL statement executions (when enabled via the -// `sql.trace.log_statement_execute` [cluster setting](cluster-settings.html)) -// - uncaught Go panic errors during the execution of a SQL statement. +// - Logical SQL statement executions (when enabled via the +// `sql.trace.log_statement_execute` [cluster setting](cluster-settings.html)) +// - uncaught Go panic errors during the execution of a SQL statement. // // The `FATAL` severity is used for situations that require an immedate, hard // server shutdown. A report is also sent to telemetry if telemetry @@ -4945,9 +4945,9 @@ func (loggerSqlExec) VFatalf(ctx context.Context, level Level, format string, ar // The `SQL_EXEC` channel is used to report SQL execution on // behalf of client connections: // -// - Logical SQL statement executions (when enabled via the -// `sql.trace.log_statement_execute` [cluster setting](cluster-settings.html)) -// - uncaught Go panic errors during the execution of a SQL statement. +// - Logical SQL statement executions (when enabled via the +// `sql.trace.log_statement_execute` [cluster setting](cluster-settings.html)) +// - uncaught Go panic errors during the execution of a SQL statement. // // The `FATAL` severity is used for situations that require an immedate, hard // server shutdown. A report is also sent to telemetry if telemetry @@ -4964,9 +4964,9 @@ func (loggerSqlExec) Fatal(ctx context.Context, msg string) { // The `SQL_EXEC` channel is used to report SQL execution on // behalf of client connections: // -// - Logical SQL statement executions (when enabled via the -// `sql.trace.log_statement_execute` [cluster setting](cluster-settings.html)) -// - uncaught Go panic errors during the execution of a SQL statement. +// - Logical SQL statement executions (when enabled via the +// `sql.trace.log_statement_execute` [cluster setting](cluster-settings.html)) +// - uncaught Go panic errors during the execution of a SQL statement. // // The `FATAL` severity is used for situations that require an immedate, hard // server shutdown. A report is also sent to telemetry if telemetry @@ -4981,9 +4981,9 @@ func (loggerSqlExec) FatalfDepth(ctx context.Context, depth int, format string, // The `SQL_EXEC` channel is used to report SQL execution on // behalf of client connections: // -// - Logical SQL statement executions (when enabled via the -// `sql.trace.log_statement_execute` [cluster setting](cluster-settings.html)) -// - uncaught Go panic errors during the execution of a SQL statement. +// - Logical SQL statement executions (when enabled via the +// `sql.trace.log_statement_execute` [cluster setting](cluster-settings.html)) +// - uncaught Go panic errors during the execution of a SQL statement. func (loggerSqlExec) Shout(ctx context.Context, sev Severity, msg string) { shoutfDepth(ctx, 1, sev, channel.SQL_EXEC, msg) } @@ -4995,9 +4995,9 @@ func (loggerSqlExec) Shout(ctx context.Context, sev Severity, msg string) { // The `SQL_EXEC` channel is used to report SQL execution on // behalf of client connections: // -// - Logical SQL statement executions (when enabled via the -// `sql.trace.log_statement_execute` [cluster setting](cluster-settings.html)) -// - uncaught Go panic errors during the execution of a SQL statement. +// - Logical SQL statement executions (when enabled via the +// `sql.trace.log_statement_execute` [cluster setting](cluster-settings.html)) +// - uncaught Go panic errors during the execution of a SQL statement. func (loggerSqlExec) Shoutf(ctx context.Context, sev Severity, format string, args ...interface{}) { shoutfDepth(ctx, 1, sev, channel.SQL_EXEC, format, args...) } @@ -5008,9 +5008,9 @@ func (loggerSqlExec) Shoutf(ctx context.Context, sev Severity, format string, ar // The `SQL_EXEC` channel is used to report SQL execution on // behalf of client connections: // -// - Logical SQL statement executions (when enabled via the -// `sql.trace.log_statement_execute` [cluster setting](cluster-settings.html)) -// - uncaught Go panic errors during the execution of a SQL statement. +// - Logical SQL statement executions (when enabled via the +// `sql.trace.log_statement_execute` [cluster setting](cluster-settings.html)) +// - uncaught Go panic errors during the execution of a SQL statement. func (loggerSqlExec) VEvent(ctx context.Context, level Level, msg string) { vEventf(ctx, false /* isErr */, 1, level, channel.SQL_EXEC, msg) } @@ -5021,9 +5021,9 @@ func (loggerSqlExec) VEvent(ctx context.Context, level Level, msg string) { // The `SQL_EXEC` channel is used to report SQL execution on // behalf of client connections: // -// - Logical SQL statement executions (when enabled via the -// `sql.trace.log_statement_execute` [cluster setting](cluster-settings.html)) -// - uncaught Go panic errors during the execution of a SQL statement. +// - Logical SQL statement executions (when enabled via the +// `sql.trace.log_statement_execute` [cluster setting](cluster-settings.html)) +// - uncaught Go panic errors during the execution of a SQL statement. func (loggerSqlExec) VEventf(ctx context.Context, level Level, format string, args ...interface{}) { vEventf(ctx, false /* isErr */, 1, level, channel.SQL_EXEC, format, args...) } @@ -5033,9 +5033,9 @@ func (loggerSqlExec) VEventf(ctx context.Context, level Level, format string, ar // The `SQL_EXEC` channel is used to report SQL execution on // behalf of client connections: // -// - Logical SQL statement executions (when enabled via the -// `sql.trace.log_statement_execute` [cluster setting](cluster-settings.html)) -// - uncaught Go panic errors during the execution of a SQL statement. +// - Logical SQL statement executions (when enabled via the +// `sql.trace.log_statement_execute` [cluster setting](cluster-settings.html)) +// - uncaught Go panic errors during the execution of a SQL statement. func (loggerSqlExec) VEventfDepth(ctx context.Context, depth int, level Level, format string, args ...interface{}) { vEventf(ctx, false /* isErr */, 1+depth, level, channel.SQL_EXEC, format, args...) } diff --git a/pkg/util/log/logconfig/config.go b/pkg/util/log/logconfig/config.go index ca7f30f4b409..c400fb191199 100644 --- a/pkg/util/log/logconfig/config.go +++ b/pkg/util/log/logconfig/config.go @@ -158,21 +158,21 @@ type CaptureFd2Config struct { // Buffering may be configured with the following fields. It may also be explicitly // set to "NONE" to disable buffering. Example configuration: // -// file-defaults: -// dir: logs -// buffering: -// max-staleness: 20s -// flush-trigger-size: 25KB -// max-buffer-size: 10MB -// sinks: -// file-groups: -// health: -// channels: HEALTH -// buffering: -// max-staleness: 5s # Override max-staleness for this sink. -// ops: -// channels: OPS -// buffering: NONE # Disable buffering for this sink. +// file-defaults: +// dir: logs +// buffering: +// max-staleness: 20s +// flush-trigger-size: 25KB +// max-buffer-size: 10MB +// sinks: +// file-groups: +// health: +// channels: HEALTH +// buffering: +// max-staleness: 5s # Override max-staleness for this sink. +// ops: +// channels: OPS +// buffering: NONE # Disable buffering for this sink. type CommonBufferSinkConfig struct { // MaxStaleness is the maximum time a log message will sit in the buffer // before a flush is triggered. @@ -256,9 +256,9 @@ type SinkConfig struct { // The configuration key under the `sinks` key in the YAML configuration // is `stderr`. Example configuration: // -// sinks: -// stderr: # standard error sink configuration starts here -// channels: DEV +// sinks: +// stderr: # standard error sink configuration starts here +// channels: DEV // // {{site.data.alerts.callout_info}} // The server start-up messages are still emitted at the start of the standard error @@ -279,7 +279,6 @@ type SinkConfig struct { // For a similar reason, no guarantee of parsability of the output format is available // when `capture-stray-errors` is disabled, since the standard error stream can then // contain an arbitrary interleaving of non-formatted error data. -// type StderrSinkConfig struct { // Channels is the list of logging channels that use this sink. Channels ChannelFilters `yaml:",omitempty,flow"` @@ -330,25 +329,25 @@ type FluentDefaults struct { // The configuration key under the `sinks` key in the YAML // configuration is `fluent-servers`. Example configuration: // -// sinks: -// fluent-servers: # fluent configurations start here -// health: # defines one sink called "health" -// channels: HEALTH -// address: 127.0.0.1:5170 +// sinks: +// fluent-servers: # fluent configurations start here +// health: # defines one sink called "health" +// channels: HEALTH +// address: 127.0.0.1:5170 // // Every new server sink configured automatically inherits the configurations set in the `fluent-defaults` section. // // For example: // -// fluent-defaults: -// redactable: false # default: disable redaction markers -// sinks: -// fluent-servers: -// health: -// channels: HEALTH -// # This sink has redactable set to false, -// # as the setting is inherited from fluent-defaults -// # unless overridden here. +// fluent-defaults: +// redactable: false # default: disable redaction markers +// sinks: +// fluent-servers: +// health: +// channels: HEALTH +// # This sink has redactable set to false, +// # as the setting is inherited from fluent-defaults +// # unless overridden here. // // The default output format for Fluent sinks is // `json-fluent-compact`. The `fluent` variants of the JSON formats @@ -358,7 +357,6 @@ type FluentDefaults struct { // {{site.data.alerts.callout_info}} // Run `cockroach debug check-log-config` to verify the effect of defaults inheritance. // {{site.data.alerts.end}} -// type FluentSinkConfig struct { // Channels is the list of logging channels that use this sink. Channels ChannelFilters `yaml:",omitempty,flow"` @@ -423,10 +421,10 @@ type FileDefaults struct { // The configuration key under the `sinks` key in the YAML // configuration is `file-groups`. Example configuration: // -// sinks: -// file-groups: # file group configurations start here -// health: # defines one group called "health" -// channels: HEALTH +// sinks: +// file-groups: # file group configurations start here +// health: # defines one group called "health" +// channels: HEALTH // // Each generated log file is prefixed by the name of the process, // followed by the name of the group, separated by a hyphen. For example, @@ -444,24 +442,23 @@ type FileDefaults struct { // // For example: // -// file-defaults: -// redactable: false # default: disable redaction markers -// dir: logs -// sinks: -// file-groups: -// health: -// channels: HEALTH -// # This sink has redactable set to false, -// # as the setting is inherited from file-defaults -// # unless overridden here. -// # -// # Example override: -// dir: health-logs # override the default 'logs' +// file-defaults: +// redactable: false # default: disable redaction markers +// dir: logs +// sinks: +// file-groups: +// health: +// channels: HEALTH +// # This sink has redactable set to false, +// # as the setting is inherited from file-defaults +// # unless overridden here. +// # +// # Example override: +// dir: health-logs # override the default 'logs' // // {{site.data.alerts.callout_success}} // Run `cockroach debug check-log-config` to verify the effect of defaults inheritance. // {{site.data.alerts.end}} -// type FileSinkConfig struct { // Channels is the list of logging channels that use this sink. Channels ChannelFilters `yaml:",omitempty,flow"` @@ -513,25 +510,25 @@ type HTTPDefaults struct { // The configuration key under the `sinks` key in the YAML // configuration is `http-servers`. Example configuration: // -// sinks: -// http-servers: -// health: -// channels: HEALTH -// address: http://127.0.0.1 +// sinks: +// http-servers: +// health: +// channels: HEALTH +// address: http://127.0.0.1 // // Every new server sink configured automatically inherits the configuration set in the `http-defaults` section. // // For example: // -// http-defaults: -// redactable: false # default: disable redaction markers -// sinks: -// http-servers: -// health: -// channels: HEALTH -// # This sink has redactable set to false, -// # as the setting is inherited from fluent-defaults -// # unless overridden here. +// http-defaults: +// redactable: false # default: disable redaction markers +// sinks: +// http-servers: +// health: +// channels: HEALTH +// # This sink has redactable set to false, +// # as the setting is inherited from fluent-defaults +// # unless overridden here. // // The default output format for HTTP sinks is // `json-compact`. [Other supported formats.](log-formats.html) @@ -539,7 +536,6 @@ type HTTPDefaults struct { // {{site.data.alerts.callout_info}} // Run `cockroach debug check-log-config` to verify the effect of defaults inheritance. // {{site.data.alerts.end}} -// type HTTPSinkConfig struct { // Channels is the list of logging channels that use this sink. Channels ChannelFilters `yaml:",omitempty,flow"` @@ -675,12 +671,13 @@ func (c *ChannelList) Sort() { } // parseChannelList recognizes the following formats: -// all -// X,Y,Z -// [all] -// [X,Y,Z] -// all except X,Y,Z -// all except [X,Y,Z] +// +// all +// X,Y,Z +// [all] +// [X,Y,Z] +// all except X,Y,Z +// all except [X,Y,Z] func parseChannelList(s string) ([]logpb.Channel, error) { // We accept mixed case -- normalize everything. s = strings.ToUpper(strings.TrimSpace(s)) diff --git a/pkg/util/log/logconfig/doc.go b/pkg/util/log/logconfig/doc.go index ea91084619f0..d358ef5564fe 100644 --- a/pkg/util/log/logconfig/doc.go +++ b/pkg/util/log/logconfig/doc.go @@ -13,47 +13,46 @@ // // General format of the command-line flags: // -// --log= +// --log= // // The YAML configuration format works as follows: // -// file-defaults: #optional -// dir: # output directory, defaults to first store dir -// max-file-size: # max log file size, default 10MB -// max-group-size: # max log file group size, default 100MB -// sync-writes: # whether to sync each write, default false -// -// -// sinks: #optional -// stderr: #optional -// channels: # channel selection for stderr output, default ALL -// # if not specified, inherit from file-defaults -// -// file-groups: #optional -// : -// channels: # channel selection for this file output, mandatory -// max-file-size: # defaults to file-defaults.max-file-size -// max-group-size: # defaults to file-defaults.max-group-size -// sync-writes: # defaults to file-defaults.sync-writes -// # if not specified, inherit from file-defaults -// -// ... repeat ... -// -// capture-stray-errors: #optional -// enable: # whether to enable internal fd2 capture -// dir: # output directory, defaults to file-defaults.dir -// max-group-size: # defaults to file-defaults.max-group-size -// -// -// filter: # min severity level for file output, default INFO -// redact: # whether to remove sensitive info, default false -// redactable: # whether to strip redaction markers, default false -// format: # format to use for log enries, default -// # crdb-v1 for files, crdb-v1-tty for stderr -// exit-on-error: # whether to terminate upon a write error -// # default true for file+stderr sinks -// auditable: # if true, activates sink-specific features -// # that enhance non-repudiability. -// # also implies exit-on-error: true. -// +// file-defaults: #optional +// dir: # output directory, defaults to first store dir +// max-file-size: # max log file size, default 10MB +// max-group-size: # max log file group size, default 100MB +// sync-writes: # whether to sync each write, default false +// +// +// sinks: #optional +// stderr: #optional +// channels: # channel selection for stderr output, default ALL +// # if not specified, inherit from file-defaults +// +// file-groups: #optional +// : +// channels: # channel selection for this file output, mandatory +// max-file-size: # defaults to file-defaults.max-file-size +// max-group-size: # defaults to file-defaults.max-group-size +// sync-writes: # defaults to file-defaults.sync-writes +// # if not specified, inherit from file-defaults +// +// ... repeat ... +// +// capture-stray-errors: #optional +// enable: # whether to enable internal fd2 capture +// dir: # output directory, defaults to file-defaults.dir +// max-group-size: # defaults to file-defaults.max-group-size +// +// +// filter: # min severity level for file output, default INFO +// redact: # whether to remove sensitive info, default false +// redactable: # whether to strip redaction markers, default false +// format: # format to use for log enries, default +// # crdb-v1 for files, crdb-v1-tty for stderr +// exit-on-error: # whether to terminate upon a write error +// # default true for file+stderr sinks +// auditable: # if true, activates sink-specific features +// # that enhance non-repudiability. +// # also implies exit-on-error: true. package logconfig diff --git a/pkg/util/log/logcrash/crash_reporting.go b/pkg/util/log/logcrash/crash_reporting.go index 46736bcd8c35..0bb19bd3f5df 100644 --- a/pkg/util/log/logcrash/crash_reporting.go +++ b/pkg/util/log/logcrash/crash_reporting.go @@ -198,7 +198,7 @@ func PanicAsError(depth int, r interface{}) error { // Non-release builds wishing to use Sentry reports // are invited to use the following URL instead: // -// https://ignored@errors.cockroachdb.com/api/sentrydev/v2/1111 +// https://ignored@errors.cockroachdb.com/api/sentrydev/v2/1111 // // This can be set via e.g. the env var COCKROACH_CRASH_REPORTS. // Note that the special number "1111" is important as it diff --git a/pkg/util/log/logcrash/crash_reporting_test.go b/pkg/util/log/logcrash/crash_reporting_test.go index f35f2e1db97c..1c2576b94d57 100644 --- a/pkg/util/log/logcrash/crash_reporting_test.go +++ b/pkg/util/log/logcrash/crash_reporting_test.go @@ -379,7 +379,8 @@ func TestUptimeTag(t *testing.T) { } // makeTypeAssertionErr returns a runtime.Error with the message: -// interface conversion: interface {} is nil, not int +// +// interface conversion: interface {} is nil, not int func makeTypeAssertionErr() (result runtime.Error) { defer func() { e := recover() diff --git a/pkg/util/log/logpb/log.proto b/pkg/util/log/logpb/log.proto index 413aa630ffe7..806b63ee0bda 100644 --- a/pkg/util/log/logpb/log.proto +++ b/pkg/util/log/logpb/log.proto @@ -70,24 +70,24 @@ enum Channel { // OPS is used to report "point" operational events, // initiated by user operators or automation: // - // - Operator or system actions on server processes: process starts, - // stops, shutdowns, crashes (if they can be logged), - // including each time: command-line parameters, current version being run - // - Actions that impact the topology of a cluster: node additions, - // removals, decommissions, etc. - // - Job-related initiation or termination - // - [Cluster setting](cluster-settings.html) changes - // - [Zone configuration](configure-replication-zones.html) changes + // - Operator or system actions on server processes: process starts, + // stops, shutdowns, crashes (if they can be logged), + // including each time: command-line parameters, current version being run + // - Actions that impact the topology of a cluster: node additions, + // removals, decommissions, etc. + // - Job-related initiation or termination + // - [Cluster setting](cluster-settings.html) changes + // - [Zone configuration](configure-replication-zones.html) changes OPS = 1; // HEALTH is used to report "background" operational // events, initiated by CockroachDB or reporting on automatic processes: // - // - Current resource usage, including critical resource usage - // - Node-node connection events, including connection errors and - // gossip details - // - Range and table leasing events - // - Up- and down-replication, range unavailability + // - Current resource usage, including critical resource usage + // - Node-node connection events, including connection errors and + // gossip details + // - Range and table leasing events + // - Up- and down-replication, range unavailability HEALTH = 2; // STORAGE is used to report low-level storage @@ -98,9 +98,9 @@ enum Channel { // the `server.auth_log.sql_connections.enabled` and/or // `server.auth_log.sql_sessions.enabled` [cluster setting](cluster-settings.html): // - // - Connections opened/closed - // - Authentication events: logins, failed attempts - // - Session and query cancellation + // - Connections opened/closed + // - Authentication events: logins, failed attempts + // - Session and query cancellation // // This is typically configured in "audit" mode, with event // numbering and synchronous writes. @@ -113,9 +113,9 @@ enum Channel { // // This includes: // - // - Database/schema/table/sequence/view/type creation - // - Adding/removing/changing table columns - // - Changing sequence parameters + // - Database/schema/table/sequence/view/type creation + // - Adding/removing/changing table columns + // - Changing sequence parameters // // `SQL_SCHEMA` events generally comprise changes to the schema that affect the // functional behavior of client apps using stored objects. @@ -124,10 +124,10 @@ enum Channel { // USER_ADMIN is used to report changes // in users and roles, including: // - // - Users added/dropped - // - Changes to authentication credentials (e.g., passwords, validity, etc.) - // - Role grants/revocations - // - Role option grants/revocations + // - Users added/dropped + // - Changes to authentication credentials (e.g., passwords, validity, etc.) + // - Role grants/revocations + // - Role option grants/revocations // // This is typically configured in "audit" mode, with event // numbering and synchronous writes. @@ -136,8 +136,8 @@ enum Channel { // PRIVILEGES is used to report data // authorization changes, including: // - // - Privilege grants/revocations on database, objects, etc. - // - Object ownership changes + // - Privilege grants/revocations on database, objects, etc. + // - Object ownership changes // // This is typically configured in "audit" mode, with event // numbering and synchronous writes. @@ -146,10 +146,10 @@ enum Channel { // SENSITIVE_ACCESS is used to report SQL // data access to sensitive data: // - // - Data access audit events (when table audit is enabled via - // [EXPERIMENTAL_AUDIT](experimental-audit.html)) - // - SQL statements executed by users with the admin role - // - Operations that write to system tables + // - Data access audit events (when table audit is enabled via + // [EXPERIMENTAL_AUDIT](experimental-audit.html)) + // - SQL statements executed by users with the admin role + // - Operations that write to system tables // // This is typically configured in "audit" mode, with event // numbering and synchronous writes. @@ -158,9 +158,9 @@ enum Channel { // SQL_EXEC is used to report SQL execution on // behalf of client connections: // - // - Logical SQL statement executions (when enabled via the - // `sql.trace.log_statement_execute` [cluster setting](cluster-settings.html)) - // - uncaught Go panic errors during the execution of a SQL statement. + // - Logical SQL statement executions (when enabled via the + // `sql.trace.log_statement_execute` [cluster setting](cluster-settings.html)) + // - uncaught Go panic errors during the execution of a SQL statement. SQL_EXEC = 9; // SQL_PERF is used to report SQL executions @@ -196,10 +196,10 @@ enum Channel { } // Entry represents a cockroach log entry in the following two cases: -// - when reading a log file using the crdb-v1 format, entries -// are parsed into this struct. -// - when injecting an interceptor into the logging package, the -// interceptor is fed entries using this structure. +// - when reading a log file using the crdb-v1 format, entries +// are parsed into this struct. +// - when injecting an interceptor into the logging package, the +// interceptor is fed entries using this structure. message Entry { // Severity is the importance of the log entry. See the // documentation for the Severity enum for more details. diff --git a/pkg/util/metric/doc.go b/pkg/util/metric/doc.go index 029ecf5f2d9b..3b091857dc2e 100644 --- a/pkg/util/metric/doc.go +++ b/pkg/util/metric/doc.go @@ -22,8 +22,7 @@ while creating new rules. The exported rules are intended to be used as is or mo to set up alerts and dashboards within Prometheus by SREs (Cockroach Cloud clusters) and our customers(self-hosted CockroachDB clusters). - -Adding a new metric +# Adding a new metric First, add the metric to a Registry. @@ -48,7 +47,7 @@ updated as follows: To add the metric to the web UI, modify the appropriate file in "ui/ts/pages/*.ts". Someone more qualified than me can elaborate, like @maxlang. -Sub-registries +# Sub-registries It's common for a Registry to become part of another Registry through the "Add" and "MustAdd" methods. @@ -66,39 +65,40 @@ Node-level sub-registries are added by calling: (*metric.MetricRecorder).AddNodeRegistry(YOUR_NODE_SUBREGISTRY) -Adding a new rule +# Adding a new rule There are two types of rules: -1. AlertingRule: This rule is used to provide guidelines on how - one or metrics can be used for alerts. -2. AggregationRule: This rule is used to provide guidelines on - how one or more metrics can be aggregated to provide indicators - about system health. + 1. AlertingRule: This rule is used to provide guidelines on how + one or metrics can be used for alerts. + 2. AggregationRule: This rule is used to provide guidelines on + how one or more metrics can be aggregated to provide indicators + about system health. + Both rules use PromQL syntax for specifying the expression for the rule. The expression is validated while initializing a new rule using the Prometheus library. To export a new aggregation or alerting rule: -1. Create a new rule using the NewAlertingRule / NewAggregationRule - constructor. For example: - rule := metric.NewAlertingRule( - ruleName, - promQLExpression, - annotations, - labels, - recommendedHoldDuration, - help, - isKV, - ) - The isKV field is a boolean field and should be set to true - for all rules involving KV metrics. For alerting rules, it is highly - recommended providing runbook-like information about what steps need - to be taken to handle the alert. You can provide this information in - the annotations field of the AlertingRule. -2. Register the rules with the ruleRegistry parameter of the KV server - using AddRule() method within the RuleRegistry. - -Testing + 1. Create a new rule using the NewAlertingRule / NewAggregationRule + constructor. For example: + rule := metric.NewAlertingRule( + ruleName, + promQLExpression, + annotations, + labels, + recommendedHoldDuration, + help, + isKV, + ) + The isKV field is a boolean field and should be set to true + for all rules involving KV metrics. For alerting rules, it is highly + recommended providing runbook-like information about what steps need + to be taken to handle the alert. You can provide this information in + the annotations field of the AlertingRule. + 2. Register the rules with the ruleRegistry parameter of the KV server + using AddRule() method within the RuleRegistry. + +# Testing After your test does something to trigger your new metric update, you'll probably want to call methods in TestServer such as MustGetSQLCounter() to @@ -125,6 +125,5 @@ DB server locally and viewing the rules' endpoint. To view the endpoint, you can the Advanced Debug page of DB Console or as follows: $ curl http://localhost:8080/api/v2/rules/ - */ package metric diff --git a/pkg/util/metric/metric.go b/pkg/util/metric/metric.go index 98ffcabd469d..3da8def9c176 100644 --- a/pkg/util/metric/metric.go +++ b/pkg/util/metric/metric.go @@ -318,11 +318,11 @@ func (h *Histogram) Mean() float64 { // https://github.com/prometheus/prometheus/blob/d91621890a2ccb3191a6d74812cc1827dd4093bf/promql/quantile.go#L75 // This function is mostly taken from a prometheus internal function that // does the same thing. There are a few differences for our use case: -// 1. As a user of the prometheus go client library, we don't have access -// to the implicit +Inf bucket, so we don't need special cases to deal -// with the quantiles that include the +Inf bucket. -// 2. Since the prometheus client library ensures buckets are in a strictly -// increasing order at creation, we do not sort them. +// 1. As a user of the prometheus go client library, we don't have access +// to the implicit +Inf bucket, so we don't need special cases to deal +// with the quantiles that include the +Inf bucket. +// 2. Since the prometheus client library ensures buckets are in a strictly +// increasing order at creation, we do not sort them. func (h *Histogram) ValueAtQuantileWindowed(q float64) float64 { m := h.ToPrometheusMetricWindowed() diff --git a/pkg/util/metric/prometheus_exporter.go b/pkg/util/metric/prometheus_exporter.go index 036377728a83..3d6e3341f418 100644 --- a/pkg/util/metric/prometheus_exporter.go +++ b/pkg/util/metric/prometheus_exporter.go @@ -29,12 +29,13 @@ import ( // TODO(marc): we should really keep our metric objects here so we can avoid creating // new prometheus.Metric every time we are scraped. // see: https://github.com/cockroachdb/cockroach/issues/9326 -// pe := MakePrometheusExporter() -// pe.AddMetricsFromRegistry(nodeRegistry) -// pe.AddMetricsFromRegistry(storeOneRegistry) -// ... -// pe.AddMetricsFromRegistry(storeNRegistry) -// pe.Export(w) +// +// pe := MakePrometheusExporter() +// pe.AddMetricsFromRegistry(nodeRegistry) +// pe.AddMetricsFromRegistry(storeOneRegistry) +// ... +// pe.AddMetricsFromRegistry(storeNRegistry) +// pe.Export(w) type PrometheusExporter struct { muScrapeAndPrint syncutil.Mutex families map[string]*prometheusgo.MetricFamily diff --git a/pkg/util/mon/bytes_usage.go b/pkg/util/mon/bytes_usage.go index 55998382be19..5cb1adf18cfb 100644 --- a/pkg/util/mon/bytes_usage.go +++ b/pkg/util/mon/bytes_usage.go @@ -252,22 +252,23 @@ var DefaultPoolAllocationSize = envutil.EnvOrDefaultInt64("COCKROACH_ALLOCATION_ // NewMonitor creates a new monitor. // Arguments: -// - name is used to annotate log messages, can be used to distinguish -// monitors. // -// - resource specifies what kind of resource the monitor is tracking -// allocations for (e.g. memory or disk). +// - name is used to annotate log messages, can be used to distinguish +// monitors. // -// - curCount and maxHist are the metric objects to update with usage -// statistics. Can be nil. +// - resource specifies what kind of resource the monitor is tracking +// allocations for (e.g. memory or disk). // -// - increment is the block size used for upstream allocations from -// the pool. Note: if set to 0 or lower, the default pool allocation -// size is used. +// - curCount and maxHist are the metric objects to update with usage +// statistics. Can be nil. // -// - noteworthy determines the minimum total allocated size beyond -// which the monitor starts to log increases. Use 0 to always log -// or math.MaxInt64 to never log. +// - increment is the block size used for upstream allocations from +// the pool. Note: if set to 0 or lower, the default pool allocation +// size is used. +// +// - noteworthy determines the minimum total allocated size beyond +// which the monitor starts to log increases. Use 0 to always log +// or math.MaxInt64 to never log. func NewMonitor( name redact.RedactableString, res Resource, @@ -350,9 +351,9 @@ func (mm *BytesMonitor) StartNoReserved(ctx context.Context, pool *BytesMonitor) // Start begins a monitoring region. // Arguments: -// - pool is the upstream monitor that provision allocations exceeding the -// pre-reserved budget. If pool is nil, no upstream allocations are possible -// and the pre-reserved budget determines the entire capacity of this monitor. +// - pool is the upstream monitor that provision allocations exceeding the +// pre-reserved budget. If pool is nil, no upstream allocations are possible +// and the pre-reserved budget determines the entire capacity of this monitor. // // - reserved is the pre-reserved budget (see above). func (mm *BytesMonitor) Start(ctx context.Context, pool *BytesMonitor, reserved *BoundAccount) { diff --git a/pkg/util/pretty/document.go b/pkg/util/pretty/document.go index 0892bd5d34c4..62e4a79bddae 100644 --- a/pkg/util/pretty/document.go +++ b/pkg/util/pretty/document.go @@ -27,7 +27,6 @@ // // For example code with SQL to experiment further, refer to // https://github.com/knz/prettier/ -// package pretty import "fmt" @@ -82,8 +81,9 @@ var Line Doc = line{} // // For example, text "hello" <> softbreak <> text "world" // flattens to "helloworld" (one word) but splits across lines as: -// hello -// world +// +// hello +// world // // This is a common extension to Wadler's printer. // diff --git a/pkg/util/pretty/util.go b/pkg/util/pretty/util.go index 3e4843c18322..568461a6b52e 100644 --- a/pkg/util/pretty/util.go +++ b/pkg/util/pretty/util.go @@ -38,9 +38,12 @@ func JoinDoc(s Doc, d ...Doc) Doc { // For example: // aaaa // bbb -// bbb +// +// bbb +// // ccc -// ccc +// +// ccc func JoinNestedRight(sep Doc, nested ...Doc) Doc { switch len(nested) { case 0: @@ -216,16 +219,16 @@ const ( ) // Table defines a document that formats a list of pairs of items either: -// - as a 2-column table, with the two columns aligned for example: -// SELECT aaa -// bbb -// FROM ccc -// - as sections, for example: -// SELECT -// aaa -// bbb -// FROM -// ccc +// - as a 2-column table, with the two columns aligned for example: +// SELECT aaa +// bbb +// FROM ccc +// - as sections, for example: +// SELECT +// aaa +// bbb +// FROM +// ccc // // We restrict the left value in each list item to be a one-line string // to make the width computation efficient. diff --git a/pkg/util/quantile/stream.go b/pkg/util/quantile/stream.go index 7a112dd320d2..03c2bb5c7a20 100644 --- a/pkg/util/quantile/stream.go +++ b/pkg/util/quantile/stream.go @@ -25,7 +25,7 @@ // // For more detailed information about the algorithm used, see: // -// Effective Computation of Biased Quantiles over Data Streams +// # Effective Computation of Biased Quantiles over Data Streams // // http://www.cs.rutgers.edu/~muthu/bquant.pdf package quantile diff --git a/pkg/util/slidingwindow/sliding_window.go b/pkg/util/slidingwindow/sliding_window.go index 06a67fe9c500..958ed21b795f 100644 --- a/pkg/util/slidingwindow/sliding_window.go +++ b/pkg/util/slidingwindow/sliding_window.go @@ -15,11 +15,17 @@ import "time" // Swag represents a sliding window aggregator over a binary operation. // The aggregator will aggregate recorded values in two ways: // (1) Within each window, the binary operation is applied when recording a new -// value into the current window. +// +// value into the current window. +// // (2) On Query, the binary operation is accumulated over every window, from -// most to least recent. +// +// most to least recent. +// // The binary operator function must therefore be: -// associative :: binOp(binOp(a,b), c) = binOp(a,binOp(b,c)) +// +// associative :: binOp(binOp(a,b), c) = binOp(a,binOp(b,c)) +// // In order to have correct results. Note that this does not allow for a more // general class of aggregators that may be associative, such as geometric // mean, bloom filters etc. These require special treatment with user defined diff --git a/pkg/util/span/frontier.go b/pkg/util/span/frontier.go index d0265e972a0d..7afcdabb81b5 100644 --- a/pkg/util/span/frontier.go +++ b/pkg/util/span/frontier.go @@ -396,11 +396,14 @@ func (f *Frontier) Entries(fn Operation) { // 4| . h__k . // 3| . e__f . // 1 ---a----------------------m---q-- Frontier -// |___________span___________| +// +// |___________span___________| // // In the above example, frontier tracks [b, m) and the current frontier // timestamp is 1. SpanEntries for span [a-q) will invoke op with: -// ([b-c), 5), ([c-e), 1), ([e-f), 3], ([f, h], 1) ([h, k), 4), ([k, m), 1). +// +// ([b-c), 5), ([c-e), 1), ([e-f), 3], ([f, h], 1) ([h, k), 4), ([k, m), 1). +// // Note: neither [a-b) nor [m, q) will be emitted since they fall outside the spans // tracked by this frontier. func (f *Frontier) SpanEntries(span roachpb.Span, op Operation) { diff --git a/pkg/util/stop/stopper.go b/pkg/util/stop/stopper.go index f9dd295cb019..e388a56df282 100644 --- a/pkg/util/stop/stopper.go +++ b/pkg/util/stop/stopper.go @@ -110,48 +110,48 @@ func (f CloserFn) Close() { // A Stopper provides control over the lifecycle of goroutines started // through it via its RunTask, RunAsyncTask, and other similar methods. // -// When Stop is invoked, the Stopper +// # When Stop is invoked, the Stopper // -// - it invokes Quiesce, which causes the Stopper to refuse new work -// (that is, its Run* family of methods starts returning ErrUnavailable), -// closes the channel returned by ShouldQuiesce, and blocks until -// until no more tasks are tracked, then -// - it runs all of the methods supplied to AddCloser, then -// - closes the IsStopped channel. +// - it invokes Quiesce, which causes the Stopper to refuse new work +// (that is, its Run* family of methods starts returning ErrUnavailable), +// closes the channel returned by ShouldQuiesce, and blocks until +// until no more tasks are tracked, then +// - it runs all of the methods supplied to AddCloser, then +// - closes the IsStopped channel. // // When ErrUnavailable is returned from a task, the caller needs // to handle it appropriately by terminating any work that it had // hoped to defer to the task (which is guaranteed to never have been // invoked). A simple example of this can be seen in the below snippet: // -// var wg sync.WaitGroup -// wg.Add(1) -// if err := s.RunAsyncTask("foo", func(ctx context.Context) { -// defer wg.Done() -// }); err != nil { -// // Task never ran. -// wg.Done() -// } +// var wg sync.WaitGroup +// wg.Add(1) +// if err := s.RunAsyncTask("foo", func(ctx context.Context) { +// defer wg.Done() +// }); err != nil { +// // Task never ran. +// wg.Done() +// } // // To ensure that tasks that do get started are sensitive to Quiesce, // they need to observe the ShouldQuiesce channel similar to how they // are expected to observe context cancellation: // -// func x() { -// select { -// case <-s.ShouldQuiesce: -// return -// case <-ctx.Done(): -// return -// case <-someChan: -// // Do work. -// } -// } +// func x() { +// select { +// case <-s.ShouldQuiesce: +// return +// case <-ctx.Done(): +// return +// case <-someChan: +// // Do work. +// } +// } // // TODO(tbg): many improvements here are possible: -// - propagate quiescing via context cancellation -// - better API around refused tasks -// - all the other things mentioned in: +// - propagate quiescing via context cancellation +// - better API around refused tasks +// - all the other things mentioned in: // https://github.com/cockroachdb/cockroach/issues/58164 type Stopper struct { quiescer chan struct{} // Closed when quiescing diff --git a/pkg/util/strings.go b/pkg/util/strings.go index 9eacbc39811a..97f3ad98c9cf 100644 --- a/pkg/util/strings.go +++ b/pkg/util/strings.go @@ -96,10 +96,10 @@ type StringListBuilder struct { // MakeStringListBuilder creates a StringListBuilder, which is used to print out // lists of items. Sample usage: // -// b := MakeStringListBuilder("(", ", ", ")") -// b.Add(&buf, "x") -// b.Add(&buf, "y") -// b.Finish(&buf) // By now, we wrote "(x, y)". +// b := MakeStringListBuilder("(", ", ", ")") +// b.Add(&buf, "x") +// b.Add(&buf, "y") +// b.Finish(&buf) // By now, we wrote "(x, y)". // // If Add is not called, nothing is written. func MakeStringListBuilder(begin, separator, end string) StringListBuilder { diff --git a/pkg/util/timetz/timetz.go b/pkg/util/timetz/timetz.go index e6b73f2ec9d7..9969649fc190 100644 --- a/pkg/util/timetz/timetz.go +++ b/pkg/util/timetz/timetz.go @@ -99,7 +99,6 @@ func Now() TimeTZ { // // The dependsOnContext return value indicates if we had to consult the given // `now` value (either for the time or the local timezone). -// func ParseTimeTZ( now time.Time, dateStyle pgdate.DateStyle, s string, precision time.Duration, ) (_ TimeTZ, dependsOnContext bool, _ error) { diff --git a/pkg/util/timeutil/pgdate/field_extract.go b/pkg/util/timeutil/pgdate/field_extract.go index 74dd841c37d0..da5e94f1a92f 100644 --- a/pkg/util/timeutil/pgdate/field_extract.go +++ b/pkg/util/timeutil/pgdate/field_extract.go @@ -682,9 +682,12 @@ func (fe *fieldExtract) MakeTimeWithoutTimezone() time.Time { // stropTimezone converts the given time to a time that looks the same but is in // UTC, e.g. from -// 2020-06-26 01:02:03 +0200 CEST +// +// 2020-06-26 01:02:03 +0200 CEST +// // to -// 2020-06-27 01:02:03 +0000 UTC. +// +// 2020-06-27 01:02:03 +0000 UTC. // // Note that the two times don't represent the same time instant. func stripTimezone(t time.Time) time.Time { diff --git a/pkg/util/timeutil/pgdate/parsing.go b/pkg/util/timeutil/pgdate/parsing.go index 56d5744fec90..0a081b9f2555 100644 --- a/pkg/util/timeutil/pgdate/parsing.go +++ b/pkg/util/timeutil/pgdate/parsing.go @@ -85,13 +85,12 @@ var ( // ParseDate converts a string into Date. // // Any specified timezone is inconsequential. Examples: -// - "now": parses to the local date (in the current timezone) -// - "2020-06-26 01:09:15.511971": parses to '2020-06-26' -// - "2020-06-26 01:09:15.511971-05": parses to '2020-06-26' +// - "now": parses to the local date (in the current timezone) +// - "2020-06-26 01:09:15.511971": parses to '2020-06-26' +// - "2020-06-26 01:09:15.511971-05": parses to '2020-06-26' // // The dependsOnContext return value indicates if we had to consult the given // `now` value (either for the time or the local timezone). -// func ParseDate( now time.Time, dateStyle DateStyle, s string, ) (_ Date, dependsOnContext bool, _ error) { @@ -148,8 +147,8 @@ func ParseTime( // location. // // Any specified timezone is inconsequential. Examples: -// - "now": parses to the local time of day (in the current timezone) -// - "01:09:15.511971" and "01:09:15.511971-05" parse to the same result +// - "now": parses to the local time of day (in the current timezone) +// - "01:09:15.511971" and "01:09:15.511971-05" parse to the same result // // The dependsOnContext return value indicates if we had to consult the given // `now` value (either for the time or the local timezone). diff --git a/pkg/util/timeutil/pgdate/parsing_test.go b/pkg/util/timeutil/pgdate/parsing_test.go index b5f39dc54813..914c8191be3d 100644 --- a/pkg/util/timeutil/pgdate/parsing_test.go +++ b/pkg/util/timeutil/pgdate/parsing_test.go @@ -749,16 +749,17 @@ func TestMain(m *testing.M) { // TestParse does the following: // * For each parsing order: -// * Pick an example date input: 2018-01-01 -// * Test ParseDate() -// * Pick an example time input: 12:34:56 -// * Derive a timestamp from date + time -// * Test ParseTimestame() -// * Test ParseDate() -// * Test ParseTime() -// * Test one-off timestamp formats +// - Pick an example date input: 2018-01-01 +// - Test ParseDate() +// - Pick an example time input: 12:34:56 +// - Derive a timestamp from date + time +// - Test ParseTimestame() +// - Test ParseDate() +// - Test ParseTime() +// - Test one-off timestamp formats +// // * Pick an example time input: -// * Test ParseTime() +// - Test ParseTime() func TestParse(t *testing.T) { for _, order := range []pgdate.Order{ pgdate.Order_YMD, diff --git a/pkg/util/timeutil/time_zone_util.go b/pkg/util/timeutil/time_zone_util.go index e4a9a580ed38..c93b6bb3a922 100644 --- a/pkg/util/timeutil/time_zone_util.go +++ b/pkg/util/timeutil/time_zone_util.go @@ -204,9 +204,10 @@ func timeZoneOffsetStringConversion( } // The timestamp must be of one of the following formats: -// HH -// HH:MM -// HH:MM:SS +// +// HH +// HH:MM +// HH:MM:SS func hoursMinutesSecondsToSeconds(timeString string) int { var ( hoursString = "0" @@ -230,9 +231,10 @@ func hoursMinutesSecondsToSeconds(timeString string) int { } // secondsToHoursMinutesSeconds converts seconds to a timestamp of the format -// HH -// HH:MM -// HH:MM:SS +// +// HH +// HH:MM +// HH:MM:SS func secondsToHoursMinutesSeconds(totalSeconds int) string { secondsPerHour := 60 * 60 secondsPerMinute := 60 @@ -257,9 +259,10 @@ func secondsToHoursMinutesSeconds(totalSeconds int) string { // The minutes and seconds sections are only included in the precision is // necessary. // For example: -// 11.00 -> 11 -// 11.5 -> 11:30 -// 11.51 -> 11:30:36 +// +// 11.00 -> 11 +// 11.5 -> 11:30 +// 11.51 -> 11:30:36 func floatToHoursMinutesSeconds(f float64) string { hours := int(f) remaining := f - float64(hours) diff --git a/pkg/util/timeutil/timer.go b/pkg/util/timeutil/timer.go index 41934174f9b3..a090cd5d2d89 100644 --- a/pkg/util/timeutil/timer.go +++ b/pkg/util/timeutil/timer.go @@ -35,16 +35,16 @@ var timeTimerPool sync.Pool // channel is read from, the next call to Timer.Reset will deadlock. // This pattern looks something like: // -// var timer timeutil.Timer -// defer timer.Stop() -// for { -// timer.Reset(wait) -// select { -// case <-timer.C: -// timer.Read = true -// ... -// } -// } +// var timer timeutil.Timer +// defer timer.Stop() +// for { +// timer.Reset(wait) +// select { +// case <-timer.C: +// timer.Read = true +// ... +// } +// } // // Note that unlike the standard library's Timer type, this Timer will // not begin counting down until Reset is called for the first time, as diff --git a/pkg/util/tracing/collector/collector_test.go b/pkg/util/tracing/collector/collector_test.go index 7427e685a297..14b9a90a379c 100644 --- a/pkg/util/tracing/collector/collector_test.go +++ b/pkg/util/tracing/collector/collector_test.go @@ -54,7 +54,9 @@ func newTestStructured(i string) *testStructuredImpl { // Trace for t1: // ------------- // root <-- traceID1 -// root.child <-- traceID1 +// +// root.child <-- traceID1 +// // root2.child.remotechild <-- traceID2 // root2.child.remotechild2 <-- traceID2 // @@ -63,7 +65,8 @@ func newTestStructured(i string) *testStructuredImpl { // root.child.remotechild <-- traceID1 // root.child.remotechilddone <-- traceID1 // root2 <-- traceID2 -// root2.child <-- traceID2 +// +// root2.child <-- traceID2 func setupTraces(t1, t2 *tracing.Tracer) (tracingpb.TraceID, tracingpb.TraceID, func()) { // Start a root span on "node 1". root := t1.StartSpan("root", tracing.WithRecording(tracingpb.RecordingVerbose)) diff --git a/pkg/util/tracing/context.go b/pkg/util/tracing/context.go index a7ac1e6df7c4..6ae85b909123 100644 --- a/pkg/util/tracing/context.go +++ b/pkg/util/tracing/context.go @@ -30,9 +30,9 @@ func SpanFromContext(ctx context.Context) *Span { } // maybeWrapCtx returns a Context wrapping the Span, with two exceptions: -// 1. if ctx==noCtx, it's a noop -// 2. if ctx contains the noop Span, and sp is also the noop Span, elide -// allocating a new Context. +// 1. if ctx==noCtx, it's a noop +// 2. if ctx contains the noop Span, and sp is also the noop Span, elide +// allocating a new Context. // // NOTE(andrei): Our detection of Span use-after-Finish() is not reliable // because spans are reused through a sync.Pool; we fail to detect someone diff --git a/pkg/util/tracing/doc.go b/pkg/util/tracing/doc.go index 0e5aa4c9fa65..0de760e650fc 100644 --- a/pkg/util/tracing/doc.go +++ b/pkg/util/tracing/doc.go @@ -16,44 +16,41 @@ // // 1. The Data Model // -// [Span A] <--- (root span) -// | -// +------+------+ -// | | -// [Span B] [Span C] <--- (C is a "child of" A) -// | | -// [Span D] +---+-------+ -// | | -// [Span E] [Span F] >>> [Span G] <--- (G "follows from" F) -// +// [Span A] <--- (root span) +// | +// +------+------+ +// | | +// [Span B] [Span C] <--- (C is a "child of" A) +// | | +// [Span D] +---+-------+ +// | | +// [Span E] [Span F] >>> [Span G] <--- (G "follows from" F) // // Traces are defined implicitly by their Spans. A Trace can be thought of a // directed acyclic graph of Spans, where edges between Spans indicate that // they're causally related. An alternate (and usually the more useful) // rendering[3] of traces is a temporal one: // +// ––|–––––––|–––––––|–––––––|–––––––|–––––––|–––––––|–––––––|–> time // -// ––|–––––––|–––––––|–––––––|–––––––|–––––––|–––––––|–––––––|–> time -// -// [Span A···················································] -// [Span B··············································] -// [Span D··········································] -// [Span C········································] -// [Span E·······] [Span F··] [Span G··] -// +// [Span A···················································] +// [Span B··············································] +// [Span D··········································] +// [Span C········································] +// [Span E·······] [Span F··] [Span G··] // // The causal relation between spans can be one of two types: -// - Parent-child relation: Typically used when the parent span depends on the -// result of the child span (during an RPC call, the -// client-side span would be the parent of the -// server-side span). See [4]. -// - Follows-from relation: Typically used when the first span does not in any -// way depend on the result of the second (think of a -// goroutine that spins off another that outlives it). -// Note that we still refer to the "first" and "second" -// span as being the "parent" and "child" respectively -// (they're still nodes in the DAG, just with a -// different kind of edge between them)[5]. +// - Parent-child relation: Typically used when the parent span depends on the +// result of the child span (during an RPC call, the +// client-side span would be the parent of the +// server-side span). See [4]. +// - Follows-from relation: Typically used when the first span does not in any +// way depend on the result of the second (think of a +// goroutine that spins off another that outlives it). +// Note that we still refer to the "first" and "second" +// span as being the "parent" and "child" respectively +// (they're still nodes in the DAG, just with a +// different kind of edge between them)[5]. // // Each Span[6] is logically comprised of the following: // - An operation name @@ -90,11 +87,15 @@ // [3]: `Recording.String` // [4]: `ChildSpan` // [5]: `ForkSpan`. "forking" a Span is the same as creating a new one -// with a "follows from" relation. +// +// with a "follows from" relation. +// // [6]: `crdbSpan` // [7]: `Span.SetVerbose`. To understand the specifics of what exactly is -// captured in Span recording, when Spans have children that may be either -// local or remote, look towards `WithParent` and `WithDetachedRecording`. +// +// captured in Span recording, when Spans have children that may be either +// local or remote, look towards `WithParent` and `WithDetachedRecording`. +// // [8]: `Tracer.{InjectMetaInto,ExtractMetaFrom}` // [9]: `SpanMeta` // [10]: `{Client,Server}Interceptor` diff --git a/pkg/util/tracing/grpcinterceptor/grpc_interceptor.go b/pkg/util/tracing/grpcinterceptor/grpc_interceptor.go index 0804cba5674c..9c63796edde1 100644 --- a/pkg/util/tracing/grpcinterceptor/grpc_interceptor.go +++ b/pkg/util/tracing/grpcinterceptor/grpc_interceptor.go @@ -74,9 +74,9 @@ func methodExcludedFromTracing(method string) bool { // // For example: // -// s := grpcutil.NewServer( -// ..., // (existing ServerOptions) -// grpc.UnaryInterceptor(ServerInterceptor(tracer))) +// s := grpcutil.NewServer( +// ..., // (existing ServerOptions) +// grpc.UnaryInterceptor(ServerInterceptor(tracer))) // // All gRPC server spans will look for an tracing SpanMeta in the gRPC // metadata; if found, the server span will act as the ChildOf that RPC @@ -126,9 +126,9 @@ func ServerInterceptor(tracer *tracing.Tracer) grpc.UnaryServerInterceptor { // // For example: // -// s := grpcutil.NewServer( -// ..., // (existing ServerOptions) -// grpc.StreamInterceptor(StreamServerInterceptor(tracer))) +// s := grpcutil.NewServer( +// ..., // (existing ServerOptions) +// grpc.StreamInterceptor(StreamServerInterceptor(tracer))) // // All gRPC server spans will look for a SpanMeta in the gRPC // metadata; if found, the server span will act as the ChildOf that RPC @@ -196,10 +196,10 @@ func injectSpanMeta( // // For example: // -// conn, err := grpc.Dial( -// address, -// ..., // (existing DialOptions) -// grpc.WithUnaryInterceptor(ClientInterceptor(tracer))) +// conn, err := grpc.Dial( +// address, +// ..., // (existing DialOptions) +// grpc.WithUnaryInterceptor(ClientInterceptor(tracer))) // // All gRPC client spans will inject the tracing SpanMeta into the gRPC // metadata; they will also look in the context.Context for an active @@ -260,10 +260,10 @@ func ClientInterceptor( // // For example: // -// conn, err := grpc.Dial( -// address, -// ..., // (existing DialOptions) -// grpc.WithStreamInterceptor(StreamClientInterceptor(tracer))) +// conn, err := grpc.Dial( +// address, +// ..., // (existing DialOptions) +// grpc.WithStreamInterceptor(StreamClientInterceptor(tracer))) // // All gRPC client spans will inject the tracing SpanMeta into the gRPC // metadata; they will also look in the context.Context for an active diff --git a/pkg/util/tracing/span.go b/pkg/util/tracing/span.go index 0eb66fe4535e..5dddd76aea64 100644 --- a/pkg/util/tracing/span.go +++ b/pkg/util/tracing/span.go @@ -321,9 +321,9 @@ func (sp *Span) FinishAndGetConfiguredRecording() tracingpb.Recording { // // A few internal tags are added to denote span properties: // -// "_unfinished" The span was never Finish()ed -// "_verbose" The span is a verbose one -// "_dropped" The span dropped recordings due to sizing constraints +// "_unfinished" The span was never Finish()ed +// "_verbose" The span is a verbose one +// "_dropped" The span dropped recordings due to sizing constraints // // If recType is RecordingStructured, the return value will be nil if the span // doesn't have any structured events. @@ -474,14 +474,14 @@ type LazyTag interface { // SetLazyTag adds a tag to the span. The tag's value is expected to implement // either fmt.Stringer or LazyTag, and is only stringified using one of // the two on demand: -// - if the Span has an otel span or a net.Trace, the tag -// is stringified immediately and passed to the external trace (see -// SetLazyStatusTag if you want to avoid that). -//- if/when the span's recording is collected, the tag is stringified on demand. -// If the recording is collected multiple times, the tag is stringified -// multiple times (so, the tag can evolve over time). Since generally the -// collection of a recording can happen asynchronously, the implementation of -// Stringer or LazyTag should be thread-safe. +// - if the Span has an otel span or a net.Trace, the tag +// is stringified immediately and passed to the external trace (see +// SetLazyStatusTag if you want to avoid that). +// - if/when the span's recording is collected, the tag is stringified on demand. +// If the recording is collected multiple times, the tag is stringified +// multiple times (so, the tag can evolve over time). Since generally the +// collection of a recording can happen asynchronously, the implementation of +// Stringer or LazyTag should be thread-safe. func (sp *Span) SetLazyTag(key string, value interface{}) { if sp.detectUseAfterFinish() { return diff --git a/pkg/util/tracing/span_options.go b/pkg/util/tracing/span_options.go index 9b0a74f44561..a171c6cbfb9e 100644 --- a/pkg/util/tracing/span_options.go +++ b/pkg/util/tracing/span_options.go @@ -157,12 +157,12 @@ type parentOption spanRef // WithParent will be a no-op (i.e. the span resulting from // applying this option will be a root span, just as if this option hadn't been // specified) in the following cases: -// - if `sp` is nil -// - if `sp` is a no-op span -// - if `sp` is a sterile span (i.e. a span explicitly marked as not wanting -// children). Note that the singleton Tracer.noop span is marked as sterile, -// which makes this condition mostly encompass the previous one, however in -// theory there could be no-op spans other than the singleton one. +// - if `sp` is nil +// - if `sp` is a no-op span +// - if `sp` is a sterile span (i.e. a span explicitly marked as not wanting +// children). Note that the singleton Tracer.noop span is marked as sterile, +// which makes this condition mostly encompass the previous one, however in +// theory there could be no-op spans other than the singleton one. // // The child inherits the parent's log tags. The data collected in the // child trace will be retrieved automatically when the parent's data is @@ -241,7 +241,9 @@ type remoteParent SpanMeta // node 1 (network) node 2 // -------------------------------------------------------------------------- // Span.Meta() ----------> sp2 := Tracer.StartSpan(WithRemoteParentFromSpanMeta(.)) -// doSomething(sp2) +// +// doSomething(sp2) +// // Span.ImportRemoteRecording(.) <---------- sp2.FinishAndGetRecording() // // By default, the child span is derived using a ChildOf relationship, which diff --git a/pkg/util/tracing/test_utils.go b/pkg/util/tracing/test_utils.go index 66f11c9be411..8bbf8edfb26b 100644 --- a/pkg/util/tracing/test_utils.go +++ b/pkg/util/tracing/test_utils.go @@ -58,15 +58,15 @@ func CountLogMessages(sp tracingpb.RecordedSpan, msg string) int { // one represented by a string with one line per expected span and one line per // expected event (i.e. log message), with a tab-indentation for child spans. // -// if err := CheckRecordedSpans(Span.GetRecording(), ` -// span: root -// event: a -// span: child -// event: [ambient] b -// event: c -// `); err != nil { -// t.Fatal(err) -// } +// if err := CheckRecordedSpans(Span.GetRecording(), ` +// span: root +// event: a +// span: child +// event: [ambient] b +// event: c +// `); err != nil { +// t.Fatal(err) +// } // // The event lines can (and generally should) omit the file:line part that they // might contain (depending on the level at which they were logged). @@ -164,16 +164,15 @@ func CheckRecordedSpans(rec tracingpb.Recording, expected string) error { // one. The expected string is allowed to elide timing information, and the // outer-most indentation level is adjusted for when comparing. // -// if err := CheckRecording(sp.GetRecording(), ` -// === operation:root -// [childrenMetadata] -// event:root 1 -// === operation:remote child -// event:remote child 1 -// `); err != nil { -// t.Fatal(err) -// } -// +// if err := CheckRecording(sp.GetRecording(), ` +// === operation:root +// [childrenMetadata] +// event:root 1 +// === operation:remote child +// event:remote child 1 +// `); err != nil { +// t.Fatal(err) +// } func CheckRecording(rec tracingpb.Recording, expected string) error { normalize := func(rec string) string { // normalize the string form of a recording for ease of comparison. diff --git a/pkg/util/tracing/tracer.go b/pkg/util/tracing/tracer.go index 93e5d771bef4..cdb0562704ad 100644 --- a/pkg/util/tracing/tracer.go +++ b/pkg/util/tracing/tracer.go @@ -248,12 +248,12 @@ var spanReusePercent = util.ConstantWithMetamorphicTestRange( // Tracer implements tracing requests. It supports: // -// - forwarding events to x/net/trace instances +// - forwarding events to x/net/trace instances // -// - recording traces. Recorded events can be retrieved at any time. +// - recording traces. Recorded events can be retrieved at any time. // -// - OpenTelemetry tracing. This is implemented by maintaining a "shadow" -// OpenTelemetry Span inside each of our spans. +// - OpenTelemetry tracing. This is implemented by maintaining a "shadow" +// OpenTelemetry Span inside each of our spans. // // Even when tracing is disabled, we still use this Tracer (with x/net/trace and // lightstep disabled) because of its recording capability (verbose tracing needs @@ -950,7 +950,7 @@ type spanAllocHelper struct { // newSpan allocates a span using the Tracer's sync.Pool. A span that was // previously Finish()ed be returned if the Tracer is configured for Span reuse. -//+(...) must be called on the returned span before further use. +// +(...) must be called on the returned span before further use. func (t *Tracer) newSpan( traceID tracingpb.TraceID, spanID tracingpb.SpanID, @@ -1488,7 +1488,8 @@ func (t *Tracer) TestingGetStatsAndReset() (int, int) { // // [1]: Looking towards the provided context to see if one exists. // [2]: Unless configured differently by tests, see -// TestingRecordAsyncSpans. +// +// TestingRecordAsyncSpans. func ForkSpan(ctx context.Context, opName string) (context.Context, *Span) { sp := SpanFromContext(ctx) if sp == nil { diff --git a/pkg/util/treeprinter/tree_printer.go b/pkg/util/treeprinter/tree_printer.go index d4496b9729a9..c2218fa018fd 100644 --- a/pkg/util/treeprinter/tree_printer.go +++ b/pkg/util/treeprinter/tree_printer.go @@ -34,22 +34,22 @@ type Node struct { // New creates a tree printer and returns a sentinel node reference which // should be used to add the root. Sample usage: // -// tp := New() -// root := tp.Child("root") -// root.Child("child-1") -// root.Child("child-2").Child("grandchild\ngrandchild-more-info") -// root.Child("child-3") +// tp := New() +// root := tp.Child("root") +// root.Child("child-1") +// root.Child("child-2").Child("grandchild\ngrandchild-more-info") +// root.Child("child-3") // -// fmt.Print(tp.String()) +// fmt.Print(tp.String()) // // Output: // -// root -// ├── child-1 -// ├── child-2 -// │ └── grandchild -// │ grandchild-more-info -// └── child-3 +// root +// ├── child-1 +// ├── child-2 +// │ └── grandchild +// │ grandchild-more-info +// └── child-3 // // Note that the Child calls can't be rearranged arbitrarily; they have // to be in the order they need to be displayed (depth-first pre-order). @@ -133,41 +133,41 @@ const ( // When new nodes are added, some of the characters of the previous formatted // tree need to be updated. Here is an example stepping through the state: // -// API call Rows +// API call Rows // // -// tp := New() +// tp := New() // // -// root := tp.Child("root") root +// root := tp.Child("root") root // // -// root.Child("child-1") root -// └── child-1 +// root.Child("child-1") root +// └── child-1 // // -// c2 := root.Child("child-2") root -// ├── child-1 -// └── child-2 +// c2 := root.Child("child-2") root +// ├── child-1 +// └── child-2 // -// Note: here we had to go back up and change └─ into ├─ for child-1. +// Note: here we had to go back up and change └─ into ├─ for child-1. // // -// c2.Child("grandchild") root -// ├── child-1 -// └── child-2 -// └── grandchild +// c2.Child("grandchild") root +// ├── child-1 +// └── child-2 +// └── grandchild // // -// root.Child("child-3" root -// ├── child-1 -// ├── child-2 -// │ └── grandchild -// └── child-3 +// root.Child("child-3" root +// ├── child-1 +// ├── child-2 +// │ └── grandchild +// └── child-3 // -// Note: here we had to go back up and change └─ into ├─ for child-2, and -// add a │ on the grandchild row. In general, we may need to add an -// arbitrary number of vertical bars. +// Note: here we had to go back up and change └─ into ├─ for child-2, and +// add a │ on the grandchild row. In general, we may need to add an +// arbitrary number of vertical bars. // // In order to perform these character changes, we maintain information about // the nodes on the bottom-most path. diff --git a/pkg/util/ulid/ulid.go b/pkg/util/ulid/ulid.go index 832fd2432d27..ad691785cf08 100644 --- a/pkg/util/ulid/ulid.go +++ b/pkg/util/ulid/ulid.go @@ -407,7 +407,8 @@ func MaxTime() uint64 { return maxTime } // Now is a convenience function that returns the current // UTC time in Unix milliseconds. Equivalent to: -// Timestamp(timeutil.Now().UTC()) +// +// Timestamp(timeutil.Now().UTC()) func Now() uint64 { return Timestamp(timeutil.Now().UTC()) } // Timestamp converts a time.Time to Unix milliseconds. @@ -488,33 +489,32 @@ func (id *ULID) Scan(src interface{}) error { // representation instead, you can create a wrapper type that calls String() // instead. // -// type stringValuer ulid.ULID +// type stringValuer ulid.ULID // -// func (v stringValuer) Value() (driver.Value, error) { -// return ulid.ULID(v).String(), nil -// } +// func (v stringValuer) Value() (driver.Value, error) { +// return ulid.ULID(v).String(), nil +// } // -// // Example usage. -// db.Exec("...", stringValuer(id)) +// // Example usage. +// db.Exec("...", stringValuer(id)) // // All valid ULIDs, including zero-value ULIDs, return a valid Value with a nil // error. If your use case requires zero-value ULIDs to return a non-nil error, // you can create a wrapper type that special-cases this behavior. // -// var zeroValueULID ulid.ULID -// -// type invalidZeroValuer ulid.ULID +// var zeroValueULID ulid.ULID // -// func (v invalidZeroValuer) Value() (driver.Value, error) { -// if ulid.ULID(v).Compare(zeroValueULID) == 0 { -// return nil, fmt.Errorf("zero value") -// } -// return ulid.ULID(v).Value() -// } +// type invalidZeroValuer ulid.ULID // -// // Example usage. -// db.Exec("...", invalidZeroValuer(id)) +// func (v invalidZeroValuer) Value() (driver.Value, error) { +// if ulid.ULID(v).Compare(zeroValueULID) == 0 { +// return nil, fmt.Errorf("zero value") +// } +// return ulid.ULID(v).Value() +// } // +// // Example usage. +// db.Exec("...", invalidZeroValuer(id)) func (id ULID) Value() (driver.Value, error) { return id.MarshalBinary() } diff --git a/pkg/util/uuid/codec.go b/pkg/util/uuid/codec.go index ee73f118fd69..2aeae2751fa7 100644 --- a/pkg/util/uuid/codec.go +++ b/pkg/util/uuid/codec.go @@ -67,41 +67,40 @@ func (u UUID) MarshalText() ([]byte, error) { // UnmarshalText implements the encoding.TextUnmarshaler interface. // Following formats are supported: // -// "6ba7b810-9dad-11d1-80b4-00c04fd430c8", -// "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}", -// "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8" -// "6ba7b8109dad11d180b400c04fd430c8" -// "{6ba7b8109dad11d180b400c04fd430c8}", -// "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}", -// "urn:uuid:6ba7b8109dad11d180b400c04fd430c8", -// "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8", -// "6ba7-b810-9dad-11d1-80b4-00c0-4fd4-30c8" +// "6ba7b810-9dad-11d1-80b4-00c04fd430c8", +// "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}", +// "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8" +// "6ba7b8109dad11d180b400c04fd430c8" +// "{6ba7b8109dad11d180b400c04fd430c8}", +// "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}", +// "urn:uuid:6ba7b8109dad11d180b400c04fd430c8", +// "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8", +// "6ba7-b810-9dad-11d1-80b4-00c0-4fd4-30c8" // // ABNF for supported UUID text representation follows: // -// URN := 'urn' -// UUID-NID := 'uuid' +// URN := 'urn' +// UUID-NID := 'uuid' // -// hexdig := '0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9' | -// 'a' | 'b' | 'c' | 'd' | 'e' | 'f' | -// 'A' | 'B' | 'C' | 'D' | 'E' | 'F' +// hexdig := '0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9' | +// 'a' | 'b' | 'c' | 'd' | 'e' | 'f' | +// 'A' | 'B' | 'C' | 'D' | 'E' | 'F' // -// hexoct := hexdig hexdig -// 2hexoct := hexoct hexoct -// 4hexoct := 2hexoct 2hexoct -// 6hexoct := 4hexoct 2hexoct -// 12hexoct := 6hexoct 6hexoct +// hexoct := hexdig hexdig +// 2hexoct := hexoct hexoct +// 4hexoct := 2hexoct 2hexoct +// 6hexoct := 4hexoct 2hexoct +// 12hexoct := 6hexoct 6hexoct // -// hashlike := 12hexoct -// hyphenated := hyphen after any group of 4 hexdig -// Ex.6ba7-b810-9dad-11d1-80b4-00c0-4fd4-30c8 -// Ex.6ba7-b810-9dad11d1-80b400c0-4fd4-30c8 +// hashlike := 12hexoct +// hyphenated := hyphen after any group of 4 hexdig +// Ex.6ba7-b810-9dad-11d1-80b4-00c0-4fd4-30c8 +// Ex.6ba7-b810-9dad11d1-80b400c0-4fd4-30c8 // -// uuid := hyphenated | hashlike | braced | urn -// -// braced := '{' hyphenated '}' | '{' hashlike '}' -// urn := URN ':' UUID-NID ':' hyphenated +// uuid := hyphenated | hashlike | braced | urn // +// braced := '{' hyphenated '}' | '{' hashlike '}' +// urn := URN ':' UUID-NID ':' hyphenated func (u *UUID) UnmarshalText(text []byte) error { l := len(text) stringifiedText := string(text) @@ -118,7 +117,8 @@ func (u *UUID) UnmarshalText(text []byte) error { } // decodeHashLike decodes UUID strings that are using the following format: -// "6ba7b8109dad11d180b400c04fd430c8". +// +// "6ba7b8109dad11d180b400c04fd430c8". func (u *UUID) decodeHashLike(t []byte) error { src := t[:] dst := u[:] @@ -128,8 +128,9 @@ func (u *UUID) decodeHashLike(t []byte) error { } // decodeHyphenated decodes UUID strings that are using the following format: -// "6ba7-b810-9dad-11d1-80b4-00c0-4fd4-30c8" -// "6ba7b810-9dad-11d1-80b400c0-4fd4-30c8" +// +// "6ba7-b810-9dad-11d1-80b4-00c0-4fd4-30c8" +// "6ba7b810-9dad-11d1-80b400c0-4fd4-30c8" func (u *UUID) decodeHyphenated(t []byte) error { l := len(t) if l < 32 || l > 40 { diff --git a/pkg/util/uuid/fuzz.go b/pkg/util/uuid/fuzz.go index 4d8d60465372..5777372e0225 100644 --- a/pkg/util/uuid/fuzz.go +++ b/pkg/util/uuid/fuzz.go @@ -23,15 +23,15 @@ package uuid // // To run: // -// $ go get github.com/dvyukov/go-fuzz/... -// $ cd $GOPATH/src/github.com/gofrs/uuid -// $ go-fuzz-build github.com/gofrs/uuid -// $ go-fuzz -bin=uuid-fuzz.zip -workdir=./testdata +// $ go get github.com/dvyukov/go-fuzz/... +// $ cd $GOPATH/src/github.com/gofrs/uuid +// $ go-fuzz-build github.com/gofrs/uuid +// $ go-fuzz -bin=uuid-fuzz.zip -workdir=./testdata // // If you make significant changes to FromString / UnmarshalText and add // new cases to fromStringTests (in codec_test.go), please run // -// $ go test -seed_fuzz_corpus +// $ go test -seed_fuzz_corpus // // to seed the corpus with the new interesting inputs, then run the fuzzer. func Fuzz(data []byte) int { diff --git a/pkg/util/uuid/uuid.go b/pkg/util/uuid/uuid.go index e16c2b7b9060..f161a6b72b87 100644 --- a/pkg/util/uuid/uuid.go +++ b/pkg/util/uuid/uuid.go @@ -188,7 +188,8 @@ func (u *UUID) SetVariant(v byte) { // Must is a helper that wraps a call to a function returning (UUID, error) // and panics if the error is non-nil. It is intended for use in variable // initializations such as -// var packageUUID = uuid.Must(uuid.FromString("123e4567-e89b-12d3-a456-426655440000")) +// +// var packageUUID = uuid.Must(uuid.FromString("123e4567-e89b-12d3-a456-426655440000")) func Must(u UUID, err error) UUID { if err != nil { panic(err) diff --git a/pkg/util/version/version.go b/pkg/util/version/version.go index cee0a24e8981..f34c40326957 100644 --- a/pkg/util/version/version.go +++ b/pkg/util/version/version.go @@ -55,7 +55,8 @@ func (v *Version) Metadata() string { } // String returns the string representation, in the format: -// "v1.2.3-beta+md" +// +// "v1.2.3-beta+md" func (v Version) String() string { var b strings.Builder fmt.Fprintf(&b, "v%d.%d.%d", v.major, v.minor, v.patch) @@ -81,7 +82,9 @@ var numericRE = regexp.MustCompile(`^(0|[1-9][0-9]*)$`) // Parse creates a version from a string. The string must be a valid semantic // version (as per https://semver.org/spec/v2.0.0.html) in the format: -// "vMINOR.MAJOR.PATCH[-PRERELEASE][+METADATA]". +// +// "vMINOR.MAJOR.PATCH[-PRERELEASE][+METADATA]". +// // MINOR, MAJOR, and PATCH are numeric values (without any leading 0s). // PRERELEASE and METADATA can contain ASCII characters and digits, hyphens and // dots. diff --git a/pkg/workload/bulkingest/bulkingest.go b/pkg/workload/bulkingest/bulkingest.go index 3d6857aa88a7..08095d801b2d 100644 --- a/pkg/workload/bulkingest/bulkingest.go +++ b/pkg/workload/bulkingest/bulkingest.go @@ -42,7 +42,6 @@ The workload's main parameters are number of distinct values of a, b and c. Initial data batches each correspond to one a/b pair containing c rows. By default, batches are ordered by a then b (a=1/b=1, a=1/b=2, a=1,b=3, ...) though this can optionally be inverted (a=1/b=1, a=2,b=1, a=3,b=1,...). - */ package bulkingest diff --git a/pkg/workload/querylog/querylog.go b/pkg/workload/querylog/querylog.go index 12d6d52df9bd..2ead08dfd7e2 100644 --- a/pkg/workload/querylog/querylog.go +++ b/pkg/workload/querylog/querylog.go @@ -17,7 +17,6 @@ import ( gosql "database/sql" "fmt" "io" - "io/ioutil" "math/rand" "os" "path/filepath" @@ -600,7 +599,7 @@ const ( // parseFile parses the file one line at a time. First queryLogHeaderLines are // skipped, and all other lines are expected to match the pattern of re. -func (w *querylog) parseFile(ctx context.Context, fileInfo os.FileInfo, re *regexp.Regexp) error { +func (w *querylog) parseFile(ctx context.Context, fileInfo os.DirEntry, re *regexp.Regexp) error { start := timeutil.Now() file, err := os.Open(w.dirPath + "/" + fileInfo.Name()) if err != nil { @@ -682,7 +681,7 @@ func (w *querylog) processQueryLog(ctx context.Context) error { log.Infof(ctx, "Unzipping to %s is complete", w.dirPath) } - files, err := ioutil.ReadDir(w.dirPath) + files, err := os.ReadDir(w.dirPath) if err != nil { return err } diff --git a/pkg/workload/sql_runner.go b/pkg/workload/sql_runner.go index 89e5e65df216..22980e901ace 100644 --- a/pkg/workload/sql_runner.go +++ b/pkg/workload/sql_runner.go @@ -27,19 +27,20 @@ import ( // must be initialized, after which we can use the handles returned by Define. // // Sample usage: -// sr := &workload.SQLRunner{} // -// sel:= sr.Define("SELECT x FROM t WHERE y = $1") -// ins:= sr.Define("INSERT INTO t(x, y) VALUES ($1, $2)") +// sr := &workload.SQLRunner{} // -// err := sr.Init(ctx, conn, flags) -// // [handle err] +// sel:= sr.Define("SELECT x FROM t WHERE y = $1") +// ins:= sr.Define("INSERT INTO t(x, y) VALUES ($1, $2)") // -// row := sel.QueryRow(1) -// // [use row] +// err := sr.Init(ctx, conn, flags) +// // [handle err] // -// _, err := ins.Exec(5, 6) -// // [handle err] +// row := sel.QueryRow(1) +// // [use row] +// +// _, err := ins.Exec(5, 6) +// // [handle err] // // A runner should typically be associated with a single worker. type SQLRunner struct { @@ -85,23 +86,22 @@ func (sr *SQLRunner) Define(sql string) StmtHandle { // // The way we issue queries is set by flags.Method: // -// - "prepare": explicitly prepare the query once per connection, then we reuse -// it for each execution. This results in a Bind and Execute on the server -// each time we run a query (on the given connection). Note that it's -// important to prepare on separate connections if there are many parallel -// workers; this avoids lock contention in the sql.Rows objects they produce. -// See #30811. -// -// - "noprepare": each query is issued separately (on the given connection). -// This results in Parse, Bind, Execute on the server each time we run a -// query. The statement is an anonymous prepared statement; that is, the -// name is the empty string. +// - "prepare": explicitly prepare the query once per connection, then we reuse +// it for each execution. This results in a Bind and Execute on the server +// each time we run a query (on the given connection). Note that it's +// important to prepare on separate connections if there are many parallel +// workers; this avoids lock contention in the sql.Rows objects they produce. +// See #30811. // -// - "simple": each query is issued in a single string; parameters are -// rendered inside the string. This results in a single SimpleExecute -// request to the server for each query. Note that only a few parameter types -// are supported. +// - "noprepare": each query is issued separately (on the given connection). +// This results in Parse, Bind, Execute on the server each time we run a +// query. The statement is an anonymous prepared statement; that is, the +// name is the empty string. // +// - "simple": each query is issued in a single string; parameters are +// rendered inside the string. This results in a single SimpleExecute +// request to the server for each query. Note that only a few parameter types +// are supported. func (sr *SQLRunner) Init( ctx context.Context, name string, mcp *MultiConnPool, flags *ConnFlags, ) error { diff --git a/pkg/workload/tpcc/result.go b/pkg/workload/tpcc/result.go index 1432b1c4a443..3a72be61406e 100644 --- a/pkg/workload/tpcc/result.go +++ b/pkg/workload/tpcc/result.go @@ -31,12 +31,11 @@ const SpecWarehouseFactor = 12.86 // The 12.605 is computed from the operation mix and the number of secs // it takes to cycle through a deck: // -// 10*(18+12) + 10*(3+12) + 1*(2+10) + 1*(2+5) + 1*(2+5) = 476 +// 10*(18+12) + 10*(3+12) + 1*(2+10) + 1*(2+5) + 1*(2+5) = 476 // // 10 workers per warehouse times 10 newOrder ops per deck results in: // -// (10*10)/(476/60) = 12.605... -// +// (10*10)/(476/60) = 12.605... const DeckWarehouseFactor = 12.605 // PassingEfficiency is a percentage of the theoretical maximum tpmC required