diff --git a/go.mod b/go.mod index 40bebdf900f..430fa054a5a 100644 --- a/go.mod +++ b/go.mod @@ -1,10 +1,12 @@ module github.com/opencontainers/runc -go 1.20 +go 1.21.0 + +toolchain go1.22.2 require ( github.com/checkpoint-restore/go-criu/v6 v6.3.0 - github.com/cilium/ebpf v0.12.3 + github.com/cilium/ebpf v0.13.2 github.com/containerd/console v1.0.4 github.com/coreos/go-systemd/v22 v22.5.0 github.com/cyphar/filepath-securejoin v0.2.4 diff --git a/go.sum b/go.sum index 956a5945fca..19deb47bdce 100644 --- a/go.sum +++ b/go.sum @@ -1,8 +1,8 @@ github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/checkpoint-restore/go-criu/v6 v6.3.0 h1:mIdrSO2cPNWQY1truPg6uHLXyKHk3Z5Odx4wjKOASzA= github.com/checkpoint-restore/go-criu/v6 v6.3.0/go.mod h1:rrRTN/uSwY2X+BPRl/gkulo9gsKOSAeVp9/K2tv7xZI= -github.com/cilium/ebpf v0.12.3 h1:8ht6F9MquybnY97at+VDZb3eQQr8ev79RueWeVaEcG4= -github.com/cilium/ebpf v0.12.3/go.mod h1:TctK1ivibvI3znr66ljgi4hqOT8EYQjz1KWBfb1UVgM= +github.com/cilium/ebpf v0.13.2 h1:uhLimLX+jF9BTPPvoCUYh/mBeoONkjgaJ9w9fn0mRj4= +github.com/cilium/ebpf v0.13.2/go.mod h1:DHp1WyrLeiBh19Cf/tfiSMhqheEiK8fXFZ4No0P1Hso= github.com/containerd/console v1.0.4 h1:F2g4+oChYvBTsASRTz8NP6iIAi97J3TtSAsLbIFn4ro= github.com/containerd/console v1.0.4/go.mod h1:YynlIjWYF8myEu6sdkwKIvGQq+cOckRm6So2avqoYAk= github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= @@ -16,16 +16,20 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/frankban/quicktest v1.14.5 h1:dfYrrRyLtiqT9GyKXgdh+k4inNeTvmGbuSgZ3lx3GhA= +github.com/go-quicktest/qt v1.101.0 h1:O1K29Txy5P2OK0dGo59b7b0LR6wKfIhttaAhHUyn7eI= +github.com/go-quicktest/qt v1.101.0/go.mod h1:14Bz/f7NwaXPtdYEgzsx46kqSxVwTbzVZsDC26tQJow= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/moby/sys/mountinfo v0.7.1 h1:/tTvQaSJRr2FshkhXiIpux6fQ2Zvc4j7tAhMTStAG2g= github.com/moby/sys/mountinfo v0.7.1/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI= github.com/moby/sys/user v0.1.0 h1:WmZ93f5Ux6het5iituh9x2zAG7NFY9Aqi49jjE1PaQg= @@ -38,7 +42,8 @@ github.com/opencontainers/selinux v1.11.0 h1:+5Zbo97w3Lbmb3PeqQtpmTkMwsW5nRI3YaL github.com/opencontainers/selinux v1.11.0/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= +github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= +github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/seccomp/libseccomp-golang v0.10.0 h1:aA4bp+/Zzi0BnWZ2F1wgNBs5gTpm+na2rWM6M9YjLpY= diff --git a/vendor/github.com/cilium/ebpf/ARCHITECTURE.md b/vendor/github.com/cilium/ebpf/ARCHITECTURE.md deleted file mode 100644 index 26f555eb7a7..00000000000 --- a/vendor/github.com/cilium/ebpf/ARCHITECTURE.md +++ /dev/null @@ -1,92 +0,0 @@ -Architecture of the library -=== - -```mermaid -graph RL - Program --> ProgramSpec --> ELF - btf.Spec --> ELF - Map --> MapSpec --> ELF - Links --> Map & Program - ProgramSpec -.-> btf.Spec - MapSpec -.-> btf.Spec - subgraph Collection - Program & Map - end - subgraph CollectionSpec - ProgramSpec & MapSpec & btf.Spec - end -``` - -ELF ---- - -BPF is usually produced by using Clang to compile a subset of C. Clang outputs -an ELF file which contains program byte code (aka BPF), but also metadata for -maps used by the program. The metadata follows the conventions set by libbpf -shipped with the kernel. Certain ELF sections have special meaning -and contain structures defined by libbpf. Newer versions of clang emit -additional metadata in [BPF Type Format](#BTF). - -The library aims to be compatible with libbpf so that moving from a C toolchain -to a Go one creates little friction. To that end, the [ELF reader](elf_reader.go) -is tested against the Linux selftests and avoids introducing custom behaviour -if possible. - -The output of the ELF reader is a `CollectionSpec` which encodes -all of the information contained in the ELF in a form that is easy to work with -in Go. The returned `CollectionSpec` should be deterministic: reading the same ELF -file on different systems must produce the same output. -As a corollary, any changes that depend on the runtime environment like the -current kernel version must happen when creating [Objects](#Objects). - -Specifications ---- - -`CollectionSpec` is a very simple container for `ProgramSpec`, `MapSpec` and -`btf.Spec`. Avoid adding functionality to it if possible. - -`ProgramSpec` and `MapSpec` are blueprints for in-kernel -objects and contain everything necessary to execute the relevant `bpf(2)` -syscalls. They refer to `btf.Spec` for type information such as `Map` key and -value types. - -The [asm](asm/) package provides an assembler that can be used to generate -`ProgramSpec` on the fly. - -Objects ---- - -`Program` and `Map` are the result of loading specifications into the kernel. -Features that depend on knowledge of the current system (e.g kernel version) -are implemented at this point. - -Sometimes loading a spec will fail because the kernel is too old, or a feature is not -enabled. There are multiple ways the library deals with that: - -* Fallback: older kernels don't allow naming programs and maps. The library - automatically detects support for names, and omits them during load if - necessary. This works since name is primarily a debug aid. - -* Sentinel error: sometimes it's possible to detect that a feature isn't available. - In that case the library will return an error wrapping `ErrNotSupported`. - This is also useful to skip tests that can't run on the current kernel. - -Once program and map objects are loaded they expose the kernel's low-level API, -e.g. `NextKey`. Often this API is awkward to use in Go, so there are safer -wrappers on top of the low-level API, like `MapIterator`. The low-level API is -useful when our higher-level API doesn't support a particular use case. - -Links ---- - -Programs can be attached to many different points in the kernel and newer BPF hooks -tend to use bpf_link to do so. Older hooks unfortunately use a combination of -syscalls, netlink messages, etc. Adding support for a new link type should not -pull in large dependencies like netlink, so XDP programs or tracepoints are -out of scope. - -Each bpf_link_type has one corresponding Go type, e.g. `link.tracing` corresponds -to BPF_LINK_TRACING. In general, these types should be unexported as long as they -don't export methods outside of the Link interface. Each Go type may have multiple -exported constructors. For example `AttachTracing` and `AttachLSM` create a -tracing link, but are distinct functions since they may require different arguments. diff --git a/vendor/github.com/cilium/ebpf/CONTRIBUTING.md b/vendor/github.com/cilium/ebpf/CONTRIBUTING.md deleted file mode 100644 index bf57da93953..00000000000 --- a/vendor/github.com/cilium/ebpf/CONTRIBUTING.md +++ /dev/null @@ -1,48 +0,0 @@ -# How to contribute - -Development is on [GitHub](https://github.com/cilium/ebpf) and contributions in -the form of pull requests and issues reporting bugs or suggesting new features -are welcome. Please take a look at [the architecture](ARCHITECTURE.md) to get -a better understanding for the high-level goals. - -## Adding a new feature - -1. [Join](https://ebpf.io/slack) the -[#ebpf-go](https://cilium.slack.com/messages/ebpf-go) channel to discuss your requirements and how the feature can be implemented. The most important part is figuring out how much new exported API is necessary. **The less new API is required the easier it will be to land the feature.** -2. (*optional*) Create a draft PR if you want to discuss the implementation or have hit a problem. It's fine if this doesn't compile or contains debug statements. -3. Create a PR that is ready to merge. This must pass CI and have tests. - -### API stability - -The library doesn't guarantee the stability of its API at the moment. - -1. If possible avoid breakage by introducing new API and deprecating the old one - at the same time. If an API was deprecated in v0.x it can be removed in v0.x+1. -2. Breaking API in a way that causes compilation failures is acceptable but must - have good reasons. -3. Changing the semantics of the API without causing compilation failures is - heavily discouraged. - -## Running the tests - -Many of the tests require privileges to set resource limits and load eBPF code. -The easiest way to obtain these is to run the tests with `sudo`. - -To test the current package with your local kernel you can simply run: -``` -go test -exec sudo ./... -``` - -To test the current package with a different kernel version you can use the [run-tests.sh](run-tests.sh) script. -It requires [virtme](https://github.com/amluto/virtme) and qemu to be installed. - -Examples: - -```bash -# Run all tests on a 5.4 kernel -./run-tests.sh 5.4 - -# Run a subset of tests: -./run-tests.sh 5.4 ./link -``` - diff --git a/vendor/github.com/cilium/ebpf/Makefile b/vendor/github.com/cilium/ebpf/Makefile index 0fa8cdc521c..3727a6e11ea 100644 --- a/vendor/github.com/cilium/ebpf/Makefile +++ b/vendor/github.com/cilium/ebpf/Makefile @@ -47,6 +47,7 @@ TARGETS := \ btf/testdata/relocs \ btf/testdata/relocs_read \ btf/testdata/relocs_read_tgt \ + btf/testdata/relocs_enum \ cmd/bpf2go/testdata/minimal .PHONY: all clean container-all container-shell generate @@ -84,7 +85,8 @@ all: format $(addsuffix -el.elf,$(TARGETS)) $(addsuffix -eb.elf,$(TARGETS)) gene ln -srf testdata/loader-$(CLANG)-eb.elf testdata/loader-eb.elf generate: - go generate ./... + go generate -run "internal/cmd/gentypes" ./... + go generate -skip "internal/cmd/gentypes" ./... testdata/loader-%-el.elf: testdata/loader.c $* $(CFLAGS) -target bpfel -c $< -o $@ @@ -102,13 +104,8 @@ testdata/loader-%-eb.elf: testdata/loader.c $(CLANG) $(CFLAGS) -target bpfeb -c $< -o $@ $(STRIP) -g $@ -.PHONY: generate-btf -generate-btf: KERNEL_VERSION?=6.1.29 -generate-btf: - $(eval TMP := $(shell mktemp -d)) - curl -fL "$(CI_KERNEL_URL)/linux-$(KERNEL_VERSION)-amd64.tgz" -o "$(TMP)/linux.tgz" - tar xvf "$(TMP)/linux.tgz" -C "$(TMP)" --strip-components=2 ./boot/vmlinuz ./lib/modules - /lib/modules/$(shell uname -r)/build/scripts/extract-vmlinux "$(TMP)/vmlinuz" > "$(TMP)/vmlinux" - $(OBJCOPY) --dump-section .BTF=/dev/stdout "$(TMP)/vmlinux" /dev/null | gzip > "btf/testdata/vmlinux.btf.gz" - find "$(TMP)/modules" -type f -name bpf_testmod.ko -exec $(OBJCOPY) --dump-section .BTF="btf/testdata/btf_testmod.btf" {} /dev/null \; - $(RM) -r "$(TMP)" +.PHONY: update-kernel-deps +update-kernel-deps: export KERNEL_VERSION?=6.7 +update-kernel-deps: + ./testdata/sh/update-kernel-deps.sh + $(MAKE) container-all diff --git a/vendor/github.com/cilium/ebpf/README.md b/vendor/github.com/cilium/ebpf/README.md index 81235a69dd3..b36f8c0bec2 100644 --- a/vendor/github.com/cilium/ebpf/README.md +++ b/vendor/github.com/cilium/ebpf/README.md @@ -13,10 +13,9 @@ ecosystem. ## Getting Started -A small collection of Go and eBPF programs that serve as examples for building -your own tools can be found under [examples/](examples/). +Please take a look at our [Getting Started] guide. -[Contributions](CONTRIBUTING.md) are highly encouraged, as they highlight certain use cases of +[Contributions](https://ebpf-go.dev/contributing) are highly encouraged, as they highlight certain use cases of eBPF and the library, and help shape the future of the project. ## Getting Help @@ -62,17 +61,6 @@ This library includes the following packages: * Linux >= 4.9. CI is run against kernel.org LTS releases. 4.4 should work but is not tested against. -## Regenerating Testdata - -Run `make` in the root of this repository to rebuild testdata in all -subpackages. This requires Docker, as it relies on a standardized build -environment to keep the build output stable. - -It is possible to regenerate data using Podman by overriding the `CONTAINER_*` -variables: `CONTAINER_ENGINE=podman CONTAINER_RUN_ARGS= make`. - -The toolchain image build files are kept in [testdata/docker/](testdata/docker/). - ## License MIT @@ -80,3 +68,5 @@ MIT ### eBPF Gopher The eBPF honeygopher is based on the Go gopher designed by Renee French. + +[Getting Started]: https://ebpf-go.dev/guides/getting-started/ diff --git a/vendor/github.com/cilium/ebpf/attachtype_string.go b/vendor/github.com/cilium/ebpf/attachtype_string.go index add2a3b5cc9..7e3caed328d 100644 --- a/vendor/github.com/cilium/ebpf/attachtype_string.go +++ b/vendor/github.com/cilium/ebpf/attachtype_string.go @@ -52,11 +52,17 @@ func _() { _ = x[AttachSkReuseportSelectOrMigrate-40] _ = x[AttachPerfEvent-41] _ = x[AttachTraceKprobeMulti-42] + _ = x[AttachLSMCgroup-43] + _ = x[AttachStructOps-44] + _ = x[AttachNetfilter-45] + _ = x[AttachTCXIngress-46] + _ = x[AttachTCXEgress-47] + _ = x[AttachTraceUprobeMulti-48] } -const _AttachType_name = "NoneCGroupInetEgressCGroupInetSockCreateCGroupSockOpsSkSKBStreamParserSkSKBStreamVerdictCGroupDeviceSkMsgVerdictCGroupInet4BindCGroupInet6BindCGroupInet4ConnectCGroupInet6ConnectCGroupInet4PostBindCGroupInet6PostBindCGroupUDP4SendmsgCGroupUDP6SendmsgLircMode2FlowDissectorCGroupSysctlCGroupUDP4RecvmsgCGroupUDP6RecvmsgCGroupGetsockoptCGroupSetsockoptTraceRawTpTraceFEntryTraceFExitModifyReturnLSMMacTraceIterCgroupInet4GetPeernameCgroupInet6GetPeernameCgroupInet4GetSocknameCgroupInet6GetSocknameXDPDevMapCgroupInetSockReleaseXDPCPUMapSkLookupXDPSkSKBVerdictSkReuseportSelectSkReuseportSelectOrMigratePerfEventTraceKprobeMulti" +const _AttachType_name = "NoneCGroupInetEgressCGroupInetSockCreateCGroupSockOpsSkSKBStreamParserSkSKBStreamVerdictCGroupDeviceSkMsgVerdictCGroupInet4BindCGroupInet6BindCGroupInet4ConnectCGroupInet6ConnectCGroupInet4PostBindCGroupInet6PostBindCGroupUDP4SendmsgCGroupUDP6SendmsgLircMode2FlowDissectorCGroupSysctlCGroupUDP4RecvmsgCGroupUDP6RecvmsgCGroupGetsockoptCGroupSetsockoptTraceRawTpTraceFEntryTraceFExitModifyReturnLSMMacTraceIterCgroupInet4GetPeernameCgroupInet6GetPeernameCgroupInet4GetSocknameCgroupInet6GetSocknameXDPDevMapCgroupInetSockReleaseXDPCPUMapSkLookupXDPSkSKBVerdictSkReuseportSelectSkReuseportSelectOrMigratePerfEventTraceKprobeMultiLSMCgroupStructOpsNetfilterTCXIngressTCXEgressTraceUprobeMulti" -var _AttachType_index = [...]uint16{0, 4, 20, 40, 53, 70, 88, 100, 112, 127, 142, 160, 178, 197, 216, 233, 250, 259, 272, 284, 301, 318, 334, 350, 360, 371, 381, 393, 399, 408, 430, 452, 474, 496, 505, 526, 535, 543, 546, 558, 575, 601, 610, 626} +var _AttachType_index = [...]uint16{0, 4, 20, 40, 53, 70, 88, 100, 112, 127, 142, 160, 178, 197, 216, 233, 250, 259, 272, 284, 301, 318, 334, 350, 360, 371, 381, 393, 399, 408, 430, 452, 474, 496, 505, 526, 535, 543, 546, 558, 575, 601, 610, 626, 635, 644, 653, 663, 672, 688} func (i AttachType) String() string { if i >= AttachType(len(_AttachType_index)-1) { diff --git a/vendor/github.com/cilium/ebpf/btf/btf.go b/vendor/github.com/cilium/ebpf/btf/btf.go index 80f64d78aee..74a17a9e1f5 100644 --- a/vendor/github.com/cilium/ebpf/btf/btf.go +++ b/vendor/github.com/cilium/ebpf/btf/btf.go @@ -29,9 +29,8 @@ var ( // ID represents the unique ID of a BTF object. type ID = sys.BTFID -// Spec allows querying a set of Types and loading the set into the -// kernel. -type Spec struct { +// immutableTypes is a set of types which musn't be changed. +type immutableTypes struct { // All types contained by the spec, not including types from the base in // case the spec was parsed from split BTF. types []Type @@ -44,13 +43,155 @@ type Spec struct { // Types indexed by essential name. // Includes all struct flavors and types with the same name. - namedTypes map[essentialName][]Type + namedTypes map[essentialName][]TypeID + + // Byte order of the types. This affects things like struct member order + // when using bitfields. + byteOrder binary.ByteOrder +} + +func (s *immutableTypes) typeByID(id TypeID) (Type, bool) { + if id < s.firstTypeID { + return nil, false + } + + index := int(id - s.firstTypeID) + if index >= len(s.types) { + return nil, false + } + + return s.types[index], true +} + +// mutableTypes is a set of types which may be changed. +type mutableTypes struct { + imm immutableTypes + mu *sync.RWMutex // protects copies below + copies map[Type]Type // map[orig]copy + copiedTypeIDs map[Type]TypeID // map[copy]origID +} + +// add a type to the set of mutable types. +// +// Copies type and all of its children once. Repeated calls with the same type +// do not copy again. +func (mt *mutableTypes) add(typ Type, typeIDs map[Type]TypeID) Type { + mt.mu.RLock() + cpy, ok := mt.copies[typ] + mt.mu.RUnlock() + + if ok { + // Fast path: the type has been copied before. + return cpy + } + + // modifyGraphPreorder copies the type graph node by node, so we can't drop + // the lock in between. + mt.mu.Lock() + defer mt.mu.Unlock() + + return modifyGraphPreorder(typ, func(t Type) (Type, bool) { + cpy, ok := mt.copies[t] + if ok { + // This has been copied previously, no need to continue. + return cpy, false + } + + cpy = t.copy() + mt.copies[t] = cpy + + if id, ok := typeIDs[t]; ok { + mt.copiedTypeIDs[cpy] = id + } + + // This is a new copy, keep copying children. + return cpy, true + }) +} + +// copy a set of mutable types. +func (mt *mutableTypes) copy() mutableTypes { + mtCopy := mutableTypes{ + mt.imm, + &sync.RWMutex{}, + make(map[Type]Type, len(mt.copies)), + make(map[Type]TypeID, len(mt.copiedTypeIDs)), + } + + // Prevent concurrent modification of mt.copiedTypeIDs. + mt.mu.RLock() + defer mt.mu.RUnlock() + + copies := make(map[Type]Type, len(mt.copies)) + for orig, copy := range mt.copies { + // NB: We make a copy of copy, not orig, so that changes to mutable types + // are preserved. + copyOfCopy := mtCopy.add(copy, mt.copiedTypeIDs) + copies[orig] = copyOfCopy + } + + // mtCopy.copies is currently map[copy]copyOfCopy, replace it with + // map[orig]copyOfCopy. + mtCopy.copies = copies + return mtCopy +} + +func (mt *mutableTypes) typeID(typ Type) (TypeID, error) { + if _, ok := typ.(*Void); ok { + // Equality is weird for void, since it is a zero sized type. + return 0, nil + } + + mt.mu.RLock() + defer mt.mu.RUnlock() + + id, ok := mt.copiedTypeIDs[typ] + if !ok { + return 0, fmt.Errorf("no ID for type %s: %w", typ, ErrNotFound) + } + + return id, nil +} + +func (mt *mutableTypes) typeByID(id TypeID) (Type, bool) { + immT, ok := mt.imm.typeByID(id) + if !ok { + return nil, false + } + + return mt.add(immT, mt.imm.typeIDs), true +} + +func (mt *mutableTypes) anyTypesByName(name string) ([]Type, error) { + immTypes := mt.imm.namedTypes[newEssentialName(name)] + if len(immTypes) == 0 { + return nil, fmt.Errorf("type name %s: %w", name, ErrNotFound) + } + + // Return a copy to prevent changes to namedTypes. + result := make([]Type, 0, len(immTypes)) + for _, id := range immTypes { + immT, ok := mt.imm.typeByID(id) + if !ok { + return nil, fmt.Errorf("no type with ID %d", id) + } + + // Match against the full name, not just the essential one + // in case the type being looked up is a struct flavor. + if immT.TypeName() == name { + result = append(result, mt.add(immT, mt.imm.typeIDs)) + } + } + return result, nil +} + +// Spec allows querying a set of Types and loading the set into the +// kernel. +type Spec struct { + mutableTypes // String table from ELF. strings *stringTable - - // Byte order of the ELF we decoded the spec from, may be nil. - byteOrder binary.ByteOrder } // LoadSpec opens file and calls LoadSpecFromReader on it. @@ -181,7 +322,7 @@ func loadSpecFromELF(file *internal.SafeELFFile) (*Spec, error) { return nil, err } - err = fixupDatasec(spec.types, sectionSizes, offsets) + err = fixupDatasec(spec.imm.types, sectionSizes, offsets) if err != nil { return nil, err } @@ -197,7 +338,7 @@ func loadRawSpec(btf io.ReaderAt, bo binary.ByteOrder, base *Spec) (*Spec, error ) if base != nil { - if base.firstTypeID != 0 { + if base.imm.firstTypeID != 0 { return nil, fmt.Errorf("can't use split BTF as base") } @@ -217,16 +358,23 @@ func loadRawSpec(btf io.ReaderAt, bo binary.ByteOrder, base *Spec) (*Spec, error typeIDs, typesByName := indexTypes(types, firstTypeID) return &Spec{ - namedTypes: typesByName, - typeIDs: typeIDs, - types: types, - firstTypeID: firstTypeID, - strings: rawStrings, - byteOrder: bo, + mutableTypes{ + immutableTypes{ + types, + typeIDs, + firstTypeID, + typesByName, + bo, + }, + &sync.RWMutex{}, + make(map[Type]Type), + make(map[Type]TypeID), + }, + rawStrings, }, nil } -func indexTypes(types []Type, firstTypeID TypeID) (map[Type]TypeID, map[essentialName][]Type) { +func indexTypes(types []Type, firstTypeID TypeID) (map[Type]TypeID, map[essentialName][]TypeID) { namedTypes := 0 for _, typ := range types { if typ.TypeName() != "" { @@ -238,13 +386,15 @@ func indexTypes(types []Type, firstTypeID TypeID) (map[Type]TypeID, map[essentia } typeIDs := make(map[Type]TypeID, len(types)) - typesByName := make(map[essentialName][]Type, namedTypes) + typesByName := make(map[essentialName][]TypeID, namedTypes) for i, typ := range types { + id := firstTypeID + TypeID(i) + typeIDs[typ] = id + if name := newEssentialName(typ.TypeName()); name != "" { - typesByName[name] = append(typesByName[name], typ) + typesByName[name] = append(typesByName[name], id) } - typeIDs[typ] = firstTypeID + TypeID(i) } return typeIDs, typesByName @@ -492,17 +642,9 @@ func fixupDatasecLayout(ds *Datasec) error { // Copy creates a copy of Spec. func (s *Spec) Copy() *Spec { - types := copyTypes(s.types, nil) - typeIDs, typesByName := indexTypes(types, s.firstTypeID) - - // NB: Other parts of spec are not copied since they are immutable. return &Spec{ - types, - typeIDs, - s.firstTypeID, - typesByName, + s.mutableTypes.copy(), s.strings, - s.byteOrder, } } @@ -519,8 +661,8 @@ func (sw sliceWriter) Write(p []byte) (int, error) { // nextTypeID returns the next unallocated type ID or an error if there are no // more type IDs. func (s *Spec) nextTypeID() (TypeID, error) { - id := s.firstTypeID + TypeID(len(s.types)) - if id < s.firstTypeID { + id := s.imm.firstTypeID + TypeID(len(s.imm.types)) + if id < s.imm.firstTypeID { return 0, fmt.Errorf("no more type IDs") } return id, nil @@ -531,33 +673,19 @@ func (s *Spec) nextTypeID() (TypeID, error) { // Returns an error wrapping ErrNotFound if a Type with the given ID // does not exist in the Spec. func (s *Spec) TypeByID(id TypeID) (Type, error) { - if id < s.firstTypeID { - return nil, fmt.Errorf("look up type with ID %d (first ID is %d): %w", id, s.firstTypeID, ErrNotFound) - } - - index := int(id - s.firstTypeID) - if index >= len(s.types) { - return nil, fmt.Errorf("look up type with ID %d: %w", id, ErrNotFound) + typ, ok := s.typeByID(id) + if !ok { + return nil, fmt.Errorf("look up type with ID %d (first ID is %d): %w", id, s.imm.firstTypeID, ErrNotFound) } - return s.types[index], nil + return typ, nil } // TypeID returns the ID for a given Type. // // Returns an error wrapping ErrNoFound if the type isn't part of the Spec. func (s *Spec) TypeID(typ Type) (TypeID, error) { - if _, ok := typ.(*Void); ok { - // Equality is weird for void, since it is a zero sized type. - return 0, nil - } - - id, ok := s.typeIDs[typ] - if !ok { - return 0, fmt.Errorf("no ID for type %s: %w", typ, ErrNotFound) - } - - return id, nil + return s.mutableTypes.typeID(typ) } // AnyTypesByName returns a list of BTF Types with the given name. @@ -568,21 +696,7 @@ func (s *Spec) TypeID(typ Type) (TypeID, error) { // // Returns an error wrapping ErrNotFound if no matching Type exists in the Spec. func (s *Spec) AnyTypesByName(name string) ([]Type, error) { - types := s.namedTypes[newEssentialName(name)] - if len(types) == 0 { - return nil, fmt.Errorf("type name %s: %w", name, ErrNotFound) - } - - // Return a copy to prevent changes to namedTypes. - result := make([]Type, 0, len(types)) - for _, t := range types { - // Match against the full name, not just the essential one - // in case the type being looked up is a struct flavor. - if t.TypeName() == name { - result = append(result, t) - } - } - return result, nil + return s.mutableTypes.anyTypesByName(name) } // AnyTypeByName returns a Type with the given name. @@ -671,26 +785,27 @@ func LoadSplitSpecFromReader(r io.ReaderAt, base *Spec) (*Spec, error) { // TypesIterator iterates over types of a given spec. type TypesIterator struct { - types []Type - index int + spec *Spec + id TypeID + done bool // The last visited type in the spec. Type Type } // Iterate returns the types iterator. func (s *Spec) Iterate() *TypesIterator { - // We share the backing array of types with the Spec. This is safe since - // we don't allow deletion or shuffling of types. - return &TypesIterator{types: s.types, index: 0} + return &TypesIterator{spec: s, id: s.imm.firstTypeID} } // Next returns true as long as there are any remaining types. func (iter *TypesIterator) Next() bool { - if len(iter.types) <= iter.index { + if iter.done { return false } - iter.Type = iter.types[iter.index] - iter.index++ - return true + var ok bool + iter.Type, ok = iter.spec.typeByID(iter.id) + iter.id++ + iter.done = !ok + return !iter.done } diff --git a/vendor/github.com/cilium/ebpf/btf/core.go b/vendor/github.com/cilium/ebpf/btf/core.go index ded7d43d482..e26383ba31f 100644 --- a/vendor/github.com/cilium/ebpf/btf/core.go +++ b/vendor/github.com/cilium/ebpf/btf/core.go @@ -43,7 +43,15 @@ func (f *COREFixup) Apply(ins *asm.Instruction) error { if f.poison { const badRelo = 0xbad2310 - *ins = asm.BuiltinFunc(badRelo).Call() + // Relocation is poisoned, replace the instruction with an invalid one. + + if ins.OpCode.IsDWordLoad() { + // Replace a dword load with a invalid dword load to preserve instruction size. + *ins = asm.LoadImm(asm.R10, badRelo, asm.DWord) + } else { + // Replace all single size instruction with a invalid call instruction. + *ins = asm.BuiltinFunc(badRelo).Call() + } return nil } @@ -159,12 +167,17 @@ func (k coreKind) String() string { // CORERelocate calculates changes needed to adjust eBPF instructions for differences // in types. // +// resolveLocalTypeID is called for each local type which requires a stable TypeID. +// Calling the function with the same type multiple times must produce the same +// result. It is the callers responsibility to ensure that the relocated instructions +// are loaded with matching BTF. +// // Returns a list of fixups which can be applied to instructions to make them // match the target type(s). // // Fixups are returned in the order of relos, e.g. fixup[i] is the solution // for relos[i]. -func CORERelocate(relos []*CORERelocation, target *Spec, bo binary.ByteOrder) ([]COREFixup, error) { +func CORERelocate(relos []*CORERelocation, target *Spec, bo binary.ByteOrder, resolveLocalTypeID func(Type) (TypeID, error)) ([]COREFixup, error) { if target == nil { var err error target, _, err = kernelSpec() @@ -173,8 +186,8 @@ func CORERelocate(relos []*CORERelocation, target *Spec, bo binary.ByteOrder) ([ } } - if bo != target.byteOrder { - return nil, fmt.Errorf("can't relocate %s against %s", bo, target.byteOrder) + if bo != target.imm.byteOrder { + return nil, fmt.Errorf("can't relocate %s against %s", bo, target.imm.byteOrder) } type reloGroup struct { @@ -194,14 +207,15 @@ func CORERelocate(relos []*CORERelocation, target *Spec, bo binary.ByteOrder) ([ return nil, fmt.Errorf("%s: unexpected accessor %v", relo.kind, relo.accessor) } + id, err := resolveLocalTypeID(relo.typ) + if err != nil { + return nil, fmt.Errorf("%s: get type id: %w", relo.kind, err) + } + result[i] = COREFixup{ - kind: relo.kind, - local: uint64(relo.id), - // NB: Using relo.id as the target here is incorrect, since - // it doesn't match the BTF we generate on the fly. This isn't - // too bad for now since there are no uses of the local type ID - // in the kernel, yet. - target: uint64(relo.id), + kind: relo.kind, + local: uint64(relo.id), + target: uint64(id), } continue } @@ -221,7 +235,7 @@ func CORERelocate(relos []*CORERelocation, target *Spec, bo binary.ByteOrder) ([ return nil, fmt.Errorf("relocate unnamed or anonymous type %s: %w", localType, ErrNotSupported) } - targets := target.namedTypes[newEssentialName(localTypeName)] + targets := target.imm.namedTypes[newEssentialName(localTypeName)] fixups, err := coreCalculateFixups(group.relos, target, targets, bo) if err != nil { return nil, fmt.Errorf("relocate %s: %w", localType, err) @@ -245,13 +259,13 @@ var errIncompatibleTypes = errors.New("incompatible types") // // The best target is determined by scoring: the less poisoning we have to do // the better the target is. -func coreCalculateFixups(relos []*CORERelocation, targetSpec *Spec, targets []Type, bo binary.ByteOrder) ([]COREFixup, error) { +func coreCalculateFixups(relos []*CORERelocation, targetSpec *Spec, targets []TypeID, bo binary.ByteOrder) ([]COREFixup, error) { bestScore := len(relos) var bestFixups []COREFixup - for _, target := range targets { - targetID, err := targetSpec.TypeID(target) + for _, targetID := range targets { + target, err := targetSpec.TypeByID(targetID) if err != nil { - return nil, fmt.Errorf("target type ID: %w", err) + return nil, fmt.Errorf("look up target: %w", err) } score := 0 // lower is better @@ -903,7 +917,7 @@ func coreAreTypesCompatible(localType Type, targetType Type) error { targetType = UnderlyingType(*t) if reflect.TypeOf(localType) != reflect.TypeOf(targetType) { - return fmt.Errorf("type mismatch: %w", errIncompatibleTypes) + return fmt.Errorf("type mismatch between %v and %v: %w", localType, targetType, errIncompatibleTypes) } switch lv := (localType).(type) { diff --git a/vendor/github.com/cilium/ebpf/btf/ext_info.go b/vendor/github.com/cilium/ebpf/btf/ext_info.go index d85f45ae9d8..d5652bad512 100644 --- a/vendor/github.com/cilium/ebpf/btf/ext_info.go +++ b/vendor/github.com/cilium/ebpf/btf/ext_info.go @@ -142,15 +142,7 @@ func AssignMetadataToInstructions( // MarshalExtInfos encodes function and line info embedded in insns into kernel // wire format. -// -// Returns ErrNotSupported if the kernel doesn't support BTF-associated programs. -func MarshalExtInfos(insns asm.Instructions) (_ *Handle, funcInfos, lineInfos []byte, _ error) { - // Bail out early if the kernel doesn't support Func(Proto). If this is the - // case, func_info will also be unsupported. - if err := haveProgBTF(); err != nil { - return nil, nil, nil, err - } - +func MarshalExtInfos(insns asm.Instructions, b *Builder) (funcInfos, lineInfos []byte, _ error) { iter := insns.Iterate() for iter.Next() { _, ok := iter.Ins.Source().(*Line) @@ -160,10 +152,9 @@ func MarshalExtInfos(insns asm.Instructions) (_ *Handle, funcInfos, lineInfos [] } } - return nil, nil, nil, nil + return nil, nil, nil marshal: - var b Builder var fiBuf, liBuf bytes.Buffer for { if fn := FuncMetadata(iter.Ins); fn != nil { @@ -171,8 +162,8 @@ marshal: fn: fn, offset: iter.Offset, } - if err := fi.marshal(&fiBuf, &b); err != nil { - return nil, nil, nil, fmt.Errorf("write func info: %w", err) + if err := fi.marshal(&fiBuf, b); err != nil { + return nil, nil, fmt.Errorf("write func info: %w", err) } } @@ -181,8 +172,8 @@ marshal: line: line, offset: iter.Offset, } - if err := li.marshal(&liBuf, &b); err != nil { - return nil, nil, nil, fmt.Errorf("write line info: %w", err) + if err := li.marshal(&liBuf, b); err != nil { + return nil, nil, fmt.Errorf("write line info: %w", err) } } @@ -191,8 +182,7 @@ marshal: } } - handle, err := NewHandle(&b) - return handle, fiBuf.Bytes(), liBuf.Bytes(), err + return fiBuf.Bytes(), liBuf.Bytes(), nil } // btfExtHeader is found at the start of the .BTF.ext section. diff --git a/vendor/github.com/cilium/ebpf/btf/marshal.go b/vendor/github.com/cilium/ebpf/btf/marshal.go index 0d093c66564..744c84c4c0d 100644 --- a/vendor/github.com/cilium/ebpf/btf/marshal.go +++ b/vendor/github.com/cilium/ebpf/btf/marshal.go @@ -6,11 +6,10 @@ import ( "errors" "fmt" "math" + "slices" "sync" "github.com/cilium/ebpf/internal" - - "golang.org/x/exp/slices" ) type MarshalOptions struct { @@ -93,6 +92,11 @@ func NewBuilder(types []Type) (*Builder, error) { return b, nil } +// Empty returns true if [Add] has not been invoked on the builder. +func (b *Builder) Empty() bool { + return len(b.types) == 0 +} + // Add a Type and allocate a stable ID for it. // // Adding the identical Type multiple times is valid and will return the same ID. diff --git a/vendor/github.com/cilium/ebpf/btf/strings.go b/vendor/github.com/cilium/ebpf/btf/strings.go index 114d250018b..7c31461c306 100644 --- a/vendor/github.com/cilium/ebpf/btf/strings.go +++ b/vendor/github.com/cilium/ebpf/btf/strings.go @@ -6,10 +6,9 @@ import ( "errors" "fmt" "io" + "maps" + "slices" "strings" - - "golang.org/x/exp/maps" - "golang.org/x/exp/slices" ) type stringTable struct { diff --git a/vendor/github.com/cilium/ebpf/btf/traversal.go b/vendor/github.com/cilium/ebpf/btf/traversal.go index a3a9dec940a..5a7387b0638 100644 --- a/vendor/github.com/cilium/ebpf/btf/traversal.go +++ b/vendor/github.com/cilium/ebpf/btf/traversal.go @@ -87,6 +87,43 @@ func (po *postorderIterator) Next() bool { return po.Type != nil } +// modifyGraphPreorder allows modifying every Type in a graph. +// +// fn is invoked in preorder for every unique Type in a graph. See [Type] for the definition +// of equality. Every occurrence of node is substituted with its replacement. +// +// If cont is true, fn is invoked for every child of replacement. Otherwise +// traversal stops. +// +// Returns the substitution of the root node. +func modifyGraphPreorder(root Type, fn func(node Type) (replacement Type, cont bool)) Type { + sub, cont := fn(root) + replacements := map[Type]Type{root: sub} + + // This is a preorder traversal. + var walk func(*Type) + walk = func(node *Type) { + sub, visited := replacements[*node] + if visited { + *node = sub + return + } + + sub, cont := fn(*node) + replacements[*node] = sub + *node = sub + + if cont { + walkType(*node, walk) + } + } + + if cont { + walkType(sub, walk) + } + return sub +} + // walkType calls fn on each child of typ. func walkType(typ Type, fn func(*Type)) { // Explicitly type switch on the most common types to allow the inliner to diff --git a/vendor/github.com/cilium/ebpf/btf/types.go b/vendor/github.com/cilium/ebpf/btf/types.go index 8bd7fc77fc6..1e57a9791fe 100644 --- a/vendor/github.com/cilium/ebpf/btf/types.go +++ b/vendor/github.com/cilium/ebpf/btf/types.go @@ -6,12 +6,12 @@ import ( "fmt" "io" "math" + "slices" "strings" "github.com/cilium/ebpf/asm" "github.com/cilium/ebpf/internal" "github.com/cilium/ebpf/internal/sys" - "golang.org/x/exp/slices" ) // Mirrors MAX_RESOLVE_DEPTH in libbpf. @@ -678,52 +678,31 @@ type Transformer func(Type) Type // typ may form a cycle. If transform is not nil, it is called with the // to be copied type, and the returned value is copied instead. func Copy(typ Type, transform Transformer) Type { - copies := copier{copies: make(map[Type]Type)} - copies.copy(&typ, transform) - return typ + copies := make(copier) + return copies.copy(typ, transform) } -// copy a slice of Types recursively. -// -// See Copy for the semantics. -func copyTypes(types []Type, transform Transformer) []Type { - result := make([]Type, len(types)) - copy(result, types) - - copies := copier{copies: make(map[Type]Type, len(types))} - for i := range result { - copies.copy(&result[i], transform) - } - - return result -} - -type copier struct { - copies map[Type]Type - work typeDeque -} +// A map of a type to its copy. +type copier map[Type]Type -func (c *copier) copy(typ *Type, transform Transformer) { - for t := typ; t != nil; t = c.work.Pop() { - // *t is the identity of the type. - if cpy := c.copies[*t]; cpy != nil { - *t = cpy - continue +func (c copier) copy(typ Type, transform Transformer) Type { + return modifyGraphPreorder(typ, func(t Type) (Type, bool) { + cpy, ok := c[t] + if ok { + // This has been copied previously, no need to continue. + return cpy, false } - var cpy Type if transform != nil { - cpy = transform(*t).copy() + cpy = transform(t).copy() } else { - cpy = (*t).copy() + cpy = t.copy() } + c[t] = cpy - c.copies[*t] = cpy - *t = cpy - - // Mark any nested types for copying. - walkType(cpy, c.work.Push) - } + // This is a new copy, keep copying children. + return cpy, true + }) } type typeDeque = internal.Deque[*Type] diff --git a/vendor/github.com/cilium/ebpf/elf_reader.go b/vendor/github.com/cilium/ebpf/elf_reader.go index 5a85cbc24ff..e409e97f9fc 100644 --- a/vendor/github.com/cilium/ebpf/elf_reader.go +++ b/vendor/github.com/cilium/ebpf/elf_reader.go @@ -15,6 +15,7 @@ import ( "github.com/cilium/ebpf/asm" "github.com/cilium/ebpf/btf" "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/sys" "github.com/cilium/ebpf/internal/unix" ) @@ -456,6 +457,8 @@ func jumpTarget(offset uint64, ins asm.Instruction) uint64 { return uint64(dest) } +var errUnsupportedBinding = errors.New("unsupported binding") + func (ec *elfCode) relocateInstruction(ins *asm.Instruction, rel elf.Symbol) error { var ( typ = elf.ST_TYPE(rel.Info) @@ -472,7 +475,7 @@ func (ec *elfCode) relocateInstruction(ins *asm.Instruction, rel elf.Symbol) err } if bind != elf.STB_GLOBAL { - return fmt.Errorf("map %q: unsupported relocation %s", name, bind) + return fmt.Errorf("map %q: %w: %s", name, errUnsupportedBinding, bind) } if typ != elf.STT_OBJECT && typ != elf.STT_NOTYPE { @@ -488,7 +491,7 @@ func (ec *elfCode) relocateInstruction(ins *asm.Instruction, rel elf.Symbol) err switch typ { case elf.STT_SECTION: if bind != elf.STB_LOCAL { - return fmt.Errorf("direct load: %s: unsupported section relocation %s", name, bind) + return fmt.Errorf("direct load: %s: %w: %s", name, errUnsupportedBinding, bind) } // This is really a reference to a static symbol, which clang doesn't @@ -499,7 +502,7 @@ func (ec *elfCode) relocateInstruction(ins *asm.Instruction, rel elf.Symbol) err case elf.STT_OBJECT: // LLVM 9 emits OBJECT-LOCAL symbols for anonymous constants. if bind != elf.STB_GLOBAL && bind != elf.STB_LOCAL { - return fmt.Errorf("direct load: %s: unsupported object relocation %s", name, bind) + return fmt.Errorf("direct load: %s: %w: %s", name, errUnsupportedBinding, bind) } offset = uint32(rel.Value) @@ -507,7 +510,7 @@ func (ec *elfCode) relocateInstruction(ins *asm.Instruction, rel elf.Symbol) err case elf.STT_NOTYPE: // LLVM 7 emits NOTYPE-LOCAL symbols for anonymous constants. if bind != elf.STB_LOCAL { - return fmt.Errorf("direct load: %s: unsupported untyped relocation %s", name, bind) + return fmt.Errorf("direct load: %s: %w: %s", name, errUnsupportedBinding, bind) } offset = uint32(rel.Value) @@ -535,12 +538,12 @@ func (ec *elfCode) relocateInstruction(ins *asm.Instruction, rel elf.Symbol) err switch typ { case elf.STT_NOTYPE, elf.STT_FUNC: if bind != elf.STB_GLOBAL { - return fmt.Errorf("call: %s: unsupported binding: %s", name, bind) + return fmt.Errorf("call: %s: %w: %s", name, errUnsupportedBinding, bind) } case elf.STT_SECTION: if bind != elf.STB_LOCAL { - return fmt.Errorf("call: %s: unsupported binding: %s", name, bind) + return fmt.Errorf("call: %s: %w: %s", name, errUnsupportedBinding, bind) } // The function we want to call is in the indicated section, @@ -563,12 +566,12 @@ func (ec *elfCode) relocateInstruction(ins *asm.Instruction, rel elf.Symbol) err switch typ { case elf.STT_FUNC: if bind != elf.STB_GLOBAL { - return fmt.Errorf("load: %s: unsupported binding: %s", name, bind) + return fmt.Errorf("load: %s: %w: %s", name, errUnsupportedBinding, bind) } case elf.STT_SECTION: if bind != elf.STB_LOCAL { - return fmt.Errorf("load: %s: unsupported binding: %s", name, bind) + return fmt.Errorf("load: %s: %w: %s", name, errUnsupportedBinding, bind) } // ins.Constant already contains the offset in bytes from the @@ -598,7 +601,7 @@ func (ec *elfCode) relocateInstruction(ins *asm.Instruction, rel elf.Symbol) err // and extern kconfig variables declared using __kconfig. case undefSection: if bind != elf.STB_GLOBAL { - return fmt.Errorf("asm relocation: %s: unsupported binding: %s", name, bind) + return fmt.Errorf("asm relocation: %s: %w: %s", name, errUnsupportedBinding, bind) } if typ != elf.STT_NOTYPE { @@ -1181,109 +1184,106 @@ func (ec *elfCode) loadKsymsSection() error { return nil } +type libbpfElfSectionDef struct { + pattern string + programType sys.ProgType + attachType sys.AttachType + flags libbpfElfSectionFlag +} + +type libbpfElfSectionFlag uint32 + +// The values correspond to enum sec_def_flags in libbpf. +const ( + _SEC_NONE libbpfElfSectionFlag = 0 + + _SEC_EXP_ATTACH_OPT libbpfElfSectionFlag = 1 << (iota - 1) + _SEC_ATTACHABLE + _SEC_ATTACH_BTF + _SEC_SLEEPABLE + _SEC_XDP_FRAGS + _SEC_USDT + + // Ignore any present extra in order to preserve backwards compatibility + // with earlier versions of the library. + ignoreExtra + + _SEC_ATTACHABLE_OPT = _SEC_ATTACHABLE | _SEC_EXP_ATTACH_OPT +) + +func init() { + // Compatibility with older versions of the library. + // We prepend libbpf definitions since they contain a prefix match + // for "xdp". + elfSectionDefs = append([]libbpfElfSectionDef{ + {"xdp.frags/", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP, _SEC_XDP_FRAGS | ignoreExtra}, + {"xdp.frags_devmap/", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP_DEVMAP, _SEC_XDP_FRAGS}, + {"xdp_devmap/", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP_DEVMAP, 0}, + {"xdp.frags_cpumap/", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP_CPUMAP, _SEC_XDP_FRAGS}, + {"xdp_cpumap/", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP_CPUMAP, 0}, + // This has been in the library since the beginning of time. Not sure + // where it came from. + {"seccomp", sys.BPF_PROG_TYPE_SOCKET_FILTER, 0, _SEC_NONE}, + }, elfSectionDefs...) +} + func getProgType(sectionName string) (ProgramType, AttachType, uint32, string) { - types := []struct { - prefix string - progType ProgramType - attachType AttachType - progFlags uint32 - }{ - // Please update the types from libbpf.c and follow the order of it. - // https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/tools/lib/bpf/libbpf.c - {"socket", SocketFilter, AttachNone, 0}, - {"sk_reuseport/migrate", SkReuseport, AttachSkReuseportSelectOrMigrate, 0}, - {"sk_reuseport", SkReuseport, AttachSkReuseportSelect, 0}, - {"kprobe/", Kprobe, AttachNone, 0}, - {"uprobe/", Kprobe, AttachNone, 0}, - {"kretprobe/", Kprobe, AttachNone, 0}, - {"uretprobe/", Kprobe, AttachNone, 0}, - {"tc", SchedCLS, AttachNone, 0}, - {"classifier", SchedCLS, AttachNone, 0}, - {"action", SchedACT, AttachNone, 0}, - {"tracepoint/", TracePoint, AttachNone, 0}, - {"tp/", TracePoint, AttachNone, 0}, - {"raw_tracepoint/", RawTracepoint, AttachNone, 0}, - {"raw_tp/", RawTracepoint, AttachNone, 0}, - {"raw_tracepoint.w/", RawTracepointWritable, AttachNone, 0}, - {"raw_tp.w/", RawTracepointWritable, AttachNone, 0}, - {"tp_btf/", Tracing, AttachTraceRawTp, 0}, - {"fentry/", Tracing, AttachTraceFEntry, 0}, - {"fmod_ret/", Tracing, AttachModifyReturn, 0}, - {"fexit/", Tracing, AttachTraceFExit, 0}, - {"fentry.s/", Tracing, AttachTraceFEntry, unix.BPF_F_SLEEPABLE}, - {"fmod_ret.s/", Tracing, AttachModifyReturn, unix.BPF_F_SLEEPABLE}, - {"fexit.s/", Tracing, AttachTraceFExit, unix.BPF_F_SLEEPABLE}, - {"freplace/", Extension, AttachNone, 0}, - {"lsm/", LSM, AttachLSMMac, 0}, - {"lsm.s/", LSM, AttachLSMMac, unix.BPF_F_SLEEPABLE}, - {"iter/", Tracing, AttachTraceIter, 0}, - {"iter.s/", Tracing, AttachTraceIter, unix.BPF_F_SLEEPABLE}, - {"syscall", Syscall, AttachNone, 0}, - {"xdp.frags_devmap/", XDP, AttachXDPDevMap, unix.BPF_F_XDP_HAS_FRAGS}, - {"xdp_devmap/", XDP, AttachXDPDevMap, 0}, - {"xdp.frags_cpumap/", XDP, AttachXDPCPUMap, unix.BPF_F_XDP_HAS_FRAGS}, - {"xdp_cpumap/", XDP, AttachXDPCPUMap, 0}, - {"xdp.frags", XDP, AttachNone, unix.BPF_F_XDP_HAS_FRAGS}, - {"xdp", XDP, AttachNone, 0}, - {"perf_event", PerfEvent, AttachNone, 0}, - {"lwt_in", LWTIn, AttachNone, 0}, - {"lwt_out", LWTOut, AttachNone, 0}, - {"lwt_xmit", LWTXmit, AttachNone, 0}, - {"lwt_seg6local", LWTSeg6Local, AttachNone, 0}, - {"cgroup_skb/ingress", CGroupSKB, AttachCGroupInetIngress, 0}, - {"cgroup_skb/egress", CGroupSKB, AttachCGroupInetEgress, 0}, - {"cgroup/skb", CGroupSKB, AttachNone, 0}, - {"cgroup/sock_create", CGroupSock, AttachCGroupInetSockCreate, 0}, - {"cgroup/sock_release", CGroupSock, AttachCgroupInetSockRelease, 0}, - {"cgroup/sock", CGroupSock, AttachCGroupInetSockCreate, 0}, - {"cgroup/post_bind4", CGroupSock, AttachCGroupInet4PostBind, 0}, - {"cgroup/post_bind6", CGroupSock, AttachCGroupInet6PostBind, 0}, - {"cgroup/dev", CGroupDevice, AttachCGroupDevice, 0}, - {"sockops", SockOps, AttachCGroupSockOps, 0}, - {"sk_skb/stream_parser", SkSKB, AttachSkSKBStreamParser, 0}, - {"sk_skb/stream_verdict", SkSKB, AttachSkSKBStreamVerdict, 0}, - {"sk_skb", SkSKB, AttachNone, 0}, - {"sk_msg", SkMsg, AttachSkMsgVerdict, 0}, - {"lirc_mode2", LircMode2, AttachLircMode2, 0}, - {"flow_dissector", FlowDissector, AttachFlowDissector, 0}, - {"cgroup/bind4", CGroupSockAddr, AttachCGroupInet4Bind, 0}, - {"cgroup/bind6", CGroupSockAddr, AttachCGroupInet6Bind, 0}, - {"cgroup/connect4", CGroupSockAddr, AttachCGroupInet4Connect, 0}, - {"cgroup/connect6", CGroupSockAddr, AttachCGroupInet6Connect, 0}, - {"cgroup/sendmsg4", CGroupSockAddr, AttachCGroupUDP4Sendmsg, 0}, - {"cgroup/sendmsg6", CGroupSockAddr, AttachCGroupUDP6Sendmsg, 0}, - {"cgroup/recvmsg4", CGroupSockAddr, AttachCGroupUDP4Recvmsg, 0}, - {"cgroup/recvmsg6", CGroupSockAddr, AttachCGroupUDP6Recvmsg, 0}, - {"cgroup/getpeername4", CGroupSockAddr, AttachCgroupInet4GetPeername, 0}, - {"cgroup/getpeername6", CGroupSockAddr, AttachCgroupInet6GetPeername, 0}, - {"cgroup/getsockname4", CGroupSockAddr, AttachCgroupInet4GetSockname, 0}, - {"cgroup/getsockname6", CGroupSockAddr, AttachCgroupInet6GetSockname, 0}, - {"cgroup/sysctl", CGroupSysctl, AttachCGroupSysctl, 0}, - {"cgroup/getsockopt", CGroupSockopt, AttachCGroupGetsockopt, 0}, - {"cgroup/setsockopt", CGroupSockopt, AttachCGroupSetsockopt, 0}, - {"struct_ops+", StructOps, AttachNone, 0}, - {"sk_lookup/", SkLookup, AttachSkLookup, 0}, - {"seccomp", SocketFilter, AttachNone, 0}, - {"kprobe.multi", Kprobe, AttachTraceKprobeMulti, 0}, - {"kretprobe.multi", Kprobe, AttachTraceKprobeMulti, 0}, - // Document all prefixes in docs/ebpf/concepts/elf-sections.md. - } + // Skip optional program marking for now. + sectionName = strings.TrimPrefix(sectionName, "?") - for _, t := range types { - if !strings.HasPrefix(sectionName, t.prefix) { + for _, t := range elfSectionDefs { + extra, ok := matchSectionName(sectionName, t.pattern) + if !ok { continue } - if !strings.HasSuffix(t.prefix, "/") { - return t.progType, t.attachType, t.progFlags, "" + programType := ProgramType(t.programType) + attachType := AttachType(t.attachType) + + var flags uint32 + if t.flags&_SEC_SLEEPABLE > 0 { + flags |= unix.BPF_F_SLEEPABLE + } + if t.flags&_SEC_XDP_FRAGS > 0 { + flags |= unix.BPF_F_XDP_HAS_FRAGS + } + if t.flags&_SEC_EXP_ATTACH_OPT > 0 { + if programType == XDP { + // The library doesn't yet have code to fallback to not specifying + // attach type. Only do this for XDP since we've enforced correct + // attach type for all other program types. + attachType = AttachNone + } + } + if t.flags&ignoreExtra > 0 { + extra = "" } - return t.progType, t.attachType, t.progFlags, sectionName[len(t.prefix):] + return programType, attachType, flags, extra } return UnspecifiedProgram, AttachNone, 0, "" } +// matchSectionName checks a section name against a pattern. +// +// It's behaviour mirrors that of libbpf's sec_def_matches. +func matchSectionName(sectionName, pattern string) (extra string, found bool) { + have, extra, found := strings.Cut(sectionName, "/") + want := strings.TrimRight(pattern, "+/") + + if strings.HasSuffix(pattern, "/") { + // Section name must have a slash and extra may be empty. + return extra, have == want && found + } else if strings.HasSuffix(pattern, "+") { + // Section name may have a slash and extra may be empty. + return extra, have == want + } + + // Section name must have a prefix. extra is ignored. + return "", strings.HasPrefix(sectionName, pattern) +} + func (ec *elfCode) loadSectionRelocations(sec *elf.Section, symbols []elf.Symbol) (map[uint64]elf.Symbol, error) { rels := make(map[uint64]elf.Symbol) diff --git a/vendor/github.com/cilium/ebpf/internal/cpu.go b/vendor/github.com/cilium/ebpf/internal/cpu.go deleted file mode 100644 index 9e908b610b5..00000000000 --- a/vendor/github.com/cilium/ebpf/internal/cpu.go +++ /dev/null @@ -1,51 +0,0 @@ -package internal - -import ( - "fmt" - "os" - "strings" -) - -// PossibleCPUs returns the max number of CPUs a system may possibly have -// Logical CPU numbers must be of the form 0-n -var PossibleCPUs = Memoize(func() (int, error) { - return parseCPUsFromFile("/sys/devices/system/cpu/possible") -}) - -func parseCPUsFromFile(path string) (int, error) { - spec, err := os.ReadFile(path) - if err != nil { - return 0, err - } - - n, err := parseCPUs(string(spec)) - if err != nil { - return 0, fmt.Errorf("can't parse %s: %v", path, err) - } - - return n, nil -} - -// parseCPUs parses the number of cpus from a string produced -// by bitmap_list_string() in the Linux kernel. -// Multiple ranges are rejected, since they can't be unified -// into a single number. -// This is the format of /sys/devices/system/cpu/possible, it -// is not suitable for /sys/devices/system/cpu/online, etc. -func parseCPUs(spec string) (int, error) { - if strings.Trim(spec, "\n") == "0" { - return 1, nil - } - - var low, high int - n, err := fmt.Sscanf(spec, "%d-%d\n", &low, &high) - if n != 2 || err != nil { - return 0, fmt.Errorf("invalid format: %s", spec) - } - if low != 0 { - return 0, fmt.Errorf("CPU spec doesn't start at zero: %s", spec) - } - - // cpus is 0 indexed - return high + 1, nil -} diff --git a/vendor/github.com/cilium/ebpf/internal/kconfig/kconfig.go b/vendor/github.com/cilium/ebpf/internal/kconfig/kconfig.go index fa530857824..1921e4f15ad 100644 --- a/vendor/github.com/cilium/ebpf/internal/kconfig/kconfig.go +++ b/vendor/github.com/cilium/ebpf/internal/kconfig/kconfig.go @@ -263,12 +263,25 @@ func PutInteger(data []byte, integer *btf.Int, n uint64) error { return fmt.Errorf("invalid boolean value: %d", n) } + if len(data) < int(integer.Size) { + return fmt.Errorf("can't fit an integer of size %d into a byte slice of length %d", integer.Size, len(data)) + } + switch integer.Size { case 1: + if integer.Encoding == btf.Signed && (int64(n) > math.MaxInt8 || int64(n) < math.MinInt8) { + return fmt.Errorf("can't represent %d as a signed integer of size %d", int64(n), integer.Size) + } data[0] = byte(n) case 2: + if integer.Encoding == btf.Signed && (int64(n) > math.MaxInt16 || int64(n) < math.MinInt16) { + return fmt.Errorf("can't represent %d as a signed integer of size %d", int64(n), integer.Size) + } internal.NativeEndian.PutUint16(data, uint16(n)) case 4: + if integer.Encoding == btf.Signed && (int64(n) > math.MaxInt32 || int64(n) < math.MinInt32) { + return fmt.Errorf("can't represent %d as a signed integer of size %d", int64(n), integer.Size) + } internal.NativeEndian.PutUint32(data, uint32(n)) case 8: internal.NativeEndian.PutUint64(data, uint64(n)) diff --git a/vendor/github.com/cilium/ebpf/internal/memoize.go b/vendor/github.com/cilium/ebpf/internal/memoize.go deleted file mode 100644 index 3de0a3fb95a..00000000000 --- a/vendor/github.com/cilium/ebpf/internal/memoize.go +++ /dev/null @@ -1,26 +0,0 @@ -package internal - -import ( - "sync" -) - -type memoizedFunc[T any] struct { - once sync.Once - fn func() (T, error) - result T - err error -} - -func (mf *memoizedFunc[T]) do() (T, error) { - mf.once.Do(func() { - mf.result, mf.err = mf.fn() - }) - return mf.result, mf.err -} - -// Memoize the result of a function call. -// -// fn is only ever called once, even if it returns an error. -func Memoize[T any](fn func() (T, error)) func() (T, error) { - return (&memoizedFunc[T]{fn: fn}).do -} diff --git a/vendor/github.com/cilium/ebpf/internal/sys/mapflags_string.go b/vendor/github.com/cilium/ebpf/internal/sys/mapflags_string.go index c80744ae0e0..d9fe217222b 100644 --- a/vendor/github.com/cilium/ebpf/internal/sys/mapflags_string.go +++ b/vendor/github.com/cilium/ebpf/internal/sys/mapflags_string.go @@ -21,24 +21,28 @@ func _() { _ = x[BPF_F_MMAPABLE-1024] _ = x[BPF_F_PRESERVE_ELEMS-2048] _ = x[BPF_F_INNER_MAP-4096] + _ = x[BPF_F_LINK-8192] + _ = x[BPF_F_PATH_FD-16384] } -const _MapFlags_name = "BPF_F_NO_PREALLOCBPF_F_NO_COMMON_LRUBPF_F_NUMA_NODEBPF_F_RDONLYBPF_F_WRONLYBPF_F_STACK_BUILD_IDBPF_F_ZERO_SEEDBPF_F_RDONLY_PROGBPF_F_WRONLY_PROGBPF_F_CLONEBPF_F_MMAPABLEBPF_F_PRESERVE_ELEMSBPF_F_INNER_MAP" +const _MapFlags_name = "BPF_F_NO_PREALLOCBPF_F_NO_COMMON_LRUBPF_F_NUMA_NODEBPF_F_RDONLYBPF_F_WRONLYBPF_F_STACK_BUILD_IDBPF_F_ZERO_SEEDBPF_F_RDONLY_PROGBPF_F_WRONLY_PROGBPF_F_CLONEBPF_F_MMAPABLEBPF_F_PRESERVE_ELEMSBPF_F_INNER_MAPBPF_F_LINKBPF_F_PATH_FD" var _MapFlags_map = map[MapFlags]string{ - 1: _MapFlags_name[0:17], - 2: _MapFlags_name[17:36], - 4: _MapFlags_name[36:51], - 8: _MapFlags_name[51:63], - 16: _MapFlags_name[63:75], - 32: _MapFlags_name[75:95], - 64: _MapFlags_name[95:110], - 128: _MapFlags_name[110:127], - 256: _MapFlags_name[127:144], - 512: _MapFlags_name[144:155], - 1024: _MapFlags_name[155:169], - 2048: _MapFlags_name[169:189], - 4096: _MapFlags_name[189:204], + 1: _MapFlags_name[0:17], + 2: _MapFlags_name[17:36], + 4: _MapFlags_name[36:51], + 8: _MapFlags_name[51:63], + 16: _MapFlags_name[63:75], + 32: _MapFlags_name[75:95], + 64: _MapFlags_name[95:110], + 128: _MapFlags_name[110:127], + 256: _MapFlags_name[127:144], + 512: _MapFlags_name[144:155], + 1024: _MapFlags_name[155:169], + 2048: _MapFlags_name[169:189], + 4096: _MapFlags_name[189:204], + 8192: _MapFlags_name[204:214], + 16384: _MapFlags_name[214:227], } func (i MapFlags) String() string { diff --git a/vendor/github.com/cilium/ebpf/internal/sys/syscall.go b/vendor/github.com/cilium/ebpf/internal/sys/syscall.go index 088e82eea2a..b1d49b87041 100644 --- a/vendor/github.com/cilium/ebpf/internal/sys/syscall.go +++ b/vendor/github.com/cilium/ebpf/internal/sys/syscall.go @@ -139,6 +139,17 @@ const ( BPF_F_MMAPABLE BPF_F_PRESERVE_ELEMS BPF_F_INNER_MAP + BPF_F_LINK + BPF_F_PATH_FD +) + +// Flags used by bpf_mprog. +const ( + BPF_F_REPLACE = 1 << (iota + 2) + BPF_F_BEFORE + BPF_F_AFTER + BPF_F_ID + BPF_F_LINK_MPROG = 1 << 13 // aka BPF_F_LINK ) // wrappedErrno wraps syscall.Errno to prevent direct comparisons with diff --git a/vendor/github.com/cilium/ebpf/internal/sys/types.go b/vendor/github.com/cilium/ebpf/internal/sys/types.go index 51698e06b47..db7baa0224b 100644 --- a/vendor/github.com/cilium/ebpf/internal/sys/types.go +++ b/vendor/github.com/cilium/ebpf/internal/sys/types.go @@ -65,7 +65,14 @@ const ( BPF_TCX_INGRESS AttachType = 46 BPF_TCX_EGRESS AttachType = 47 BPF_TRACE_UPROBE_MULTI AttachType = 48 - __MAX_BPF_ATTACH_TYPE AttachType = 49 + BPF_CGROUP_UNIX_CONNECT AttachType = 49 + BPF_CGROUP_UNIX_SENDMSG AttachType = 50 + BPF_CGROUP_UNIX_RECVMSG AttachType = 51 + BPF_CGROUP_UNIX_GETPEERNAME AttachType = 52 + BPF_CGROUP_UNIX_GETSOCKNAME AttachType = 53 + BPF_NETKIT_PRIMARY AttachType = 54 + BPF_NETKIT_PEER AttachType = 55 + __MAX_BPF_ATTACH_TYPE AttachType = 56 ) type Cmd uint32 @@ -351,46 +358,48 @@ const ( BPF_LINK_TYPE_NETFILTER LinkType = 10 BPF_LINK_TYPE_TCX LinkType = 11 BPF_LINK_TYPE_UPROBE_MULTI LinkType = 12 - MAX_BPF_LINK_TYPE LinkType = 13 + BPF_LINK_TYPE_NETKIT LinkType = 13 + MAX_BPF_LINK_TYPE LinkType = 14 ) type MapType uint32 const ( - BPF_MAP_TYPE_UNSPEC MapType = 0 - BPF_MAP_TYPE_HASH MapType = 1 - BPF_MAP_TYPE_ARRAY MapType = 2 - BPF_MAP_TYPE_PROG_ARRAY MapType = 3 - BPF_MAP_TYPE_PERF_EVENT_ARRAY MapType = 4 - BPF_MAP_TYPE_PERCPU_HASH MapType = 5 - BPF_MAP_TYPE_PERCPU_ARRAY MapType = 6 - BPF_MAP_TYPE_STACK_TRACE MapType = 7 - BPF_MAP_TYPE_CGROUP_ARRAY MapType = 8 - BPF_MAP_TYPE_LRU_HASH MapType = 9 - BPF_MAP_TYPE_LRU_PERCPU_HASH MapType = 10 - BPF_MAP_TYPE_LPM_TRIE MapType = 11 - BPF_MAP_TYPE_ARRAY_OF_MAPS MapType = 12 - BPF_MAP_TYPE_HASH_OF_MAPS MapType = 13 - BPF_MAP_TYPE_DEVMAP MapType = 14 - BPF_MAP_TYPE_SOCKMAP MapType = 15 - BPF_MAP_TYPE_CPUMAP MapType = 16 - BPF_MAP_TYPE_XSKMAP MapType = 17 - BPF_MAP_TYPE_SOCKHASH MapType = 18 - BPF_MAP_TYPE_CGROUP_STORAGE_DEPRECATED MapType = 19 - BPF_MAP_TYPE_CGROUP_STORAGE MapType = 19 - BPF_MAP_TYPE_REUSEPORT_SOCKARRAY MapType = 20 - BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE MapType = 21 - BPF_MAP_TYPE_QUEUE MapType = 22 - BPF_MAP_TYPE_STACK MapType = 23 - BPF_MAP_TYPE_SK_STORAGE MapType = 24 - BPF_MAP_TYPE_DEVMAP_HASH MapType = 25 - BPF_MAP_TYPE_STRUCT_OPS MapType = 26 - BPF_MAP_TYPE_RINGBUF MapType = 27 - BPF_MAP_TYPE_INODE_STORAGE MapType = 28 - BPF_MAP_TYPE_TASK_STORAGE MapType = 29 - BPF_MAP_TYPE_BLOOM_FILTER MapType = 30 - BPF_MAP_TYPE_USER_RINGBUF MapType = 31 - BPF_MAP_TYPE_CGRP_STORAGE MapType = 32 + BPF_MAP_TYPE_UNSPEC MapType = 0 + BPF_MAP_TYPE_HASH MapType = 1 + BPF_MAP_TYPE_ARRAY MapType = 2 + BPF_MAP_TYPE_PROG_ARRAY MapType = 3 + BPF_MAP_TYPE_PERF_EVENT_ARRAY MapType = 4 + BPF_MAP_TYPE_PERCPU_HASH MapType = 5 + BPF_MAP_TYPE_PERCPU_ARRAY MapType = 6 + BPF_MAP_TYPE_STACK_TRACE MapType = 7 + BPF_MAP_TYPE_CGROUP_ARRAY MapType = 8 + BPF_MAP_TYPE_LRU_HASH MapType = 9 + BPF_MAP_TYPE_LRU_PERCPU_HASH MapType = 10 + BPF_MAP_TYPE_LPM_TRIE MapType = 11 + BPF_MAP_TYPE_ARRAY_OF_MAPS MapType = 12 + BPF_MAP_TYPE_HASH_OF_MAPS MapType = 13 + BPF_MAP_TYPE_DEVMAP MapType = 14 + BPF_MAP_TYPE_SOCKMAP MapType = 15 + BPF_MAP_TYPE_CPUMAP MapType = 16 + BPF_MAP_TYPE_XSKMAP MapType = 17 + BPF_MAP_TYPE_SOCKHASH MapType = 18 + BPF_MAP_TYPE_CGROUP_STORAGE_DEPRECATED MapType = 19 + BPF_MAP_TYPE_CGROUP_STORAGE MapType = 19 + BPF_MAP_TYPE_REUSEPORT_SOCKARRAY MapType = 20 + BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE_DEPRECATED MapType = 21 + BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE MapType = 21 + BPF_MAP_TYPE_QUEUE MapType = 22 + BPF_MAP_TYPE_STACK MapType = 23 + BPF_MAP_TYPE_SK_STORAGE MapType = 24 + BPF_MAP_TYPE_DEVMAP_HASH MapType = 25 + BPF_MAP_TYPE_STRUCT_OPS MapType = 26 + BPF_MAP_TYPE_RINGBUF MapType = 27 + BPF_MAP_TYPE_INODE_STORAGE MapType = 28 + BPF_MAP_TYPE_TASK_STORAGE MapType = 29 + BPF_MAP_TYPE_BLOOM_FILTER MapType = 30 + BPF_MAP_TYPE_USER_RINGBUF MapType = 31 + BPF_MAP_TYPE_CGRP_STORAGE MapType = 32 ) type ProgType uint32 @@ -462,6 +471,15 @@ const ( BPF_STATS_RUN_TIME StatsType = 0 ) +type TcxActionBase int32 + +const ( + TCX_NEXT TcxActionBase = -1 + TCX_PASS TcxActionBase = 0 + TCX_DROP TcxActionBase = 2 + TCX_REDIRECT TcxActionBase = 7 +) + type XdpAction uint32 const ( @@ -498,7 +516,7 @@ type LinkInfo struct { Id LinkID ProgId uint32 _ [4]byte - Extra [32]uint8 + Extra [40]uint8 } type MapInfo struct { @@ -702,6 +720,26 @@ func LinkCreateKprobeMulti(attr *LinkCreateKprobeMultiAttr) (*FD, error) { return NewFD(int(fd)) } +type LinkCreateNetfilterAttr struct { + ProgFd uint32 + TargetFd uint32 + AttachType AttachType + Flags uint32 + Pf uint32 + Hooknum uint32 + Priority int32 + NetfilterFlags uint32 + _ [32]byte +} + +func LinkCreateNetfilter(attr *LinkCreateNetfilterAttr) (*FD, error) { + fd, err := BPF(BPF_LINK_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + type LinkCreatePerfEventAttr struct { ProgFd uint32 TargetFd uint32 @@ -719,6 +757,25 @@ func LinkCreatePerfEvent(attr *LinkCreatePerfEventAttr) (*FD, error) { return NewFD(int(fd)) } +type LinkCreateTcxAttr struct { + ProgFd uint32 + TargetIfindex uint32 + AttachType AttachType + Flags uint32 + RelativeFdOrId uint32 + _ [4]byte + ExpectedRevision uint64 + _ [32]byte +} + +func LinkCreateTcx(attr *LinkCreateTcxAttr) (*FD, error) { + fd, err := BPF(BPF_LINK_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + type LinkCreateTracingAttr struct { ProgFd uint32 TargetFd uint32 @@ -738,6 +795,29 @@ func LinkCreateTracing(attr *LinkCreateTracingAttr) (*FD, error) { return NewFD(int(fd)) } +type LinkCreateUprobeMultiAttr struct { + ProgFd uint32 + TargetFd uint32 + AttachType AttachType + Flags uint32 + Path Pointer + Offsets Pointer + RefCtrOffsets Pointer + Cookies Pointer + Count uint32 + UprobeMultiFlags uint32 + Pid uint32 + _ [4]byte +} + +func LinkCreateUprobeMulti(attr *LinkCreateUprobeMultiAttr) (*FD, error) { + fd, err := BPF(BPF_LINK_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + type LinkUpdateAttr struct { LinkFd uint32 NewProgFd uint32 @@ -971,13 +1051,13 @@ func ObjPin(attr *ObjPinAttr) error { } type ProgAttachAttr struct { - TargetFd uint32 - AttachBpfFd uint32 - AttachType uint32 - AttachFlags uint32 - ReplaceBpfFd uint32 - RelativeFd uint32 - ExpectedRevision uint64 + TargetFdOrIfindex uint32 + AttachBpfFd uint32 + AttachType uint32 + AttachFlags uint32 + ReplaceBpfFd uint32 + RelativeFdOrId uint32 + ExpectedRevision uint64 } func ProgAttach(attr *ProgAttachAttr) error { @@ -997,9 +1077,13 @@ func ProgBindMap(attr *ProgBindMapAttr) error { } type ProgDetachAttr struct { - TargetFd uint32 - AttachBpfFd uint32 - AttachType uint32 + TargetFdOrIfindex uint32 + AttachBpfFd uint32 + AttachType uint32 + AttachFlags uint32 + _ [4]byte + RelativeFdOrId uint32 + ExpectedRevision uint64 } func ProgDetach(attr *ProgDetachAttr) error { @@ -1065,17 +1149,17 @@ func ProgLoad(attr *ProgLoadAttr) (*FD, error) { } type ProgQueryAttr struct { - TargetFd uint32 - AttachType AttachType - QueryFlags uint32 - AttachFlags uint32 - ProgIds Pointer - ProgCount uint32 - _ [4]byte - ProgAttachFlags Pointer - LinkIds Pointer - LinkAttachFlags Pointer - Revision uint64 + TargetFdOrIfindex uint32 + AttachType AttachType + QueryFlags uint32 + AttachFlags uint32 + ProgIds Pointer + Count uint32 + _ [4]byte + ProgAttachFlags Pointer + LinkIds Pointer + LinkAttachFlags Pointer + Revision uint64 } func ProgQuery(attr *ProgQueryAttr) error { @@ -1137,12 +1221,24 @@ type NetNsLinkInfo struct { AttachType AttachType } +type NetfilterLinkInfo struct { + Pf uint32 + Hooknum uint32 + Priority int32 + Flags uint32 +} + type RawTracepointLinkInfo struct { TpName Pointer TpNameLen uint32 _ [4]byte } +type TcxLinkInfo struct { + Ifindex uint32 + AttachType AttachType +} + type TracingLinkInfo struct { AttachType AttachType TargetObjId uint32 diff --git a/vendor/github.com/cilium/ebpf/internal/sysenc/buffer.go b/vendor/github.com/cilium/ebpf/internal/sysenc/buffer.go index c6959d9cc97..d184ea196ae 100644 --- a/vendor/github.com/cilium/ebpf/internal/sysenc/buffer.go +++ b/vendor/github.com/cilium/ebpf/internal/sysenc/buffer.go @@ -32,6 +32,7 @@ func UnsafeBuffer(ptr unsafe.Pointer) Buffer { // SyscallOutput prepares a Buffer for a syscall to write into. // +// size is the length of the desired buffer in bytes. // The buffer may point at the underlying memory of dst, in which case [Unmarshal] // becomes a no-op. // @@ -53,6 +54,11 @@ func (b Buffer) CopyTo(dst []byte) int { return copy(dst, b.unsafeBytes()) } +// AppendTo appends the buffer onto dst. +func (b Buffer) AppendTo(dst []byte) []byte { + return append(dst, b.unsafeBytes()...) +} + // Pointer returns the location where a syscall should write. func (b Buffer) Pointer() sys.Pointer { // NB: This deliberately ignores b.length to support zero-copy diff --git a/vendor/github.com/cilium/ebpf/internal/sysenc/marshal.go b/vendor/github.com/cilium/ebpf/internal/sysenc/marshal.go index 52339456aaf..0026af8f24f 100644 --- a/vendor/github.com/cilium/ebpf/internal/sysenc/marshal.go +++ b/vendor/github.com/cilium/ebpf/internal/sysenc/marshal.go @@ -7,12 +7,11 @@ import ( "errors" "fmt" "reflect" + "slices" "sync" "unsafe" "github.com/cilium/ebpf/internal" - - "golang.org/x/exp/slices" ) // Marshal turns data into a byte slice using the system's native endianness. diff --git a/vendor/github.com/cilium/ebpf/internal/tracefs/kprobe.go b/vendor/github.com/cilium/ebpf/internal/tracefs/kprobe.go index 1b45a9a7427..897740fec0c 100644 --- a/vendor/github.com/cilium/ebpf/internal/tracefs/kprobe.go +++ b/vendor/github.com/cilium/ebpf/internal/tracefs/kprobe.go @@ -8,6 +8,7 @@ import ( "path/filepath" "runtime" "strings" + "sync" "syscall" "github.com/cilium/ebpf/internal" @@ -110,7 +111,7 @@ func sanitizeTracefsPath(path ...string) (string, error) { // Since kernel 4.1 tracefs should be mounted by default at /sys/kernel/tracing, // but may be also be available at /sys/kernel/debug/tracing if debugfs is mounted. // The available tracefs paths will depends on distribution choices. -var getTracefsPath = internal.Memoize(func() (string, error) { +var getTracefsPath = sync.OnceValues(func() (string, error) { for _, p := range []struct { path string fsType int64 diff --git a/vendor/github.com/cilium/ebpf/internal/unix/types_linux.go b/vendor/github.com/cilium/ebpf/internal/unix/types_linux.go index 51ed7d0597e..2f22b1278ed 100644 --- a/vendor/github.com/cilium/ebpf/internal/unix/types_linux.go +++ b/vendor/github.com/cilium/ebpf/internal/unix/types_linux.go @@ -25,6 +25,7 @@ const ( EACCES = linux.EACCES EILSEQ = linux.EILSEQ EOPNOTSUPP = linux.EOPNOTSUPP + ESTALE = linux.ESTALE ) const ( @@ -39,6 +40,7 @@ const ( BPF_F_MMAPABLE = linux.BPF_F_MMAPABLE BPF_F_INNER_MAP = linux.BPF_F_INNER_MAP BPF_F_KPROBE_MULTI_RETURN = linux.BPF_F_KPROBE_MULTI_RETURN + BPF_F_UPROBE_MULTI_RETURN = linux.BPF_F_UPROBE_MULTI_RETURN BPF_OBJ_NAME_LEN = linux.BPF_OBJ_NAME_LEN BPF_TAG_SIZE = linux.BPF_TAG_SIZE BPF_RINGBUF_BUSY_BIT = linux.BPF_RINGBUF_BUSY_BIT diff --git a/vendor/github.com/cilium/ebpf/internal/unix/types_other.go b/vendor/github.com/cilium/ebpf/internal/unix/types_other.go index 1760e9e796b..e5bad046986 100644 --- a/vendor/github.com/cilium/ebpf/internal/unix/types_other.go +++ b/vendor/github.com/cilium/ebpf/internal/unix/types_other.go @@ -27,6 +27,7 @@ const ( EACCES EILSEQ EOPNOTSUPP + ESTALE ) // Constants are distinct to avoid breaking switch statements. @@ -41,6 +42,7 @@ const ( BPF_F_MMAPABLE BPF_F_INNER_MAP BPF_F_KPROBE_MULTI_RETURN + BPF_F_UPROBE_MULTI_RETURN BPF_F_XDP_HAS_FRAGS BPF_OBJ_NAME_LEN BPF_TAG_SIZE diff --git a/vendor/github.com/cilium/ebpf/internal/vdso.go b/vendor/github.com/cilium/ebpf/internal/vdso.go index c444a41c4b1..1049278554e 100644 --- a/vendor/github.com/cilium/ebpf/internal/vdso.go +++ b/vendor/github.com/cilium/ebpf/internal/vdso.go @@ -8,7 +8,6 @@ import ( "io" "math" "os" - "unsafe" "github.com/cilium/ebpf/internal/unix" ) @@ -20,21 +19,14 @@ var ( // vdsoVersion returns the LINUX_VERSION_CODE embedded in the vDSO library // linked into the current process image. func vdsoVersion() (uint32, error) { - const uintptrIs32bits = unsafe.Sizeof((uintptr)(0)) == 4 - - // Read data from the auxiliary vector, which is normally passed directly - // to the process. Go does not expose that data, so we must read it from procfs. - // https://man7.org/linux/man-pages/man3/getauxval.3.html - av, err := os.Open("/proc/self/auxv") - if errors.Is(err, unix.EACCES) { - return 0, fmt.Errorf("opening auxv: %w (process may not be dumpable due to file capabilities)", err) - } + av, err := newAuxvRuntimeReader() if err != nil { - return 0, fmt.Errorf("opening auxv: %w", err) + return 0, err } + defer av.Close() - vdsoAddr, err := vdsoMemoryAddress(av, NativeEndian, uintptrIs32bits) + vdsoAddr, err := vdsoMemoryAddress(av) if err != nil { return 0, fmt.Errorf("finding vDSO memory address: %w", err) } @@ -55,43 +47,13 @@ func vdsoVersion() (uint32, error) { return c, nil } -type auxvPair32 struct { - Tag, Value uint32 -} - -type auxvPair64 struct { - Tag, Value uint64 -} - -func readAuxvPair(r io.Reader, order binary.ByteOrder, uintptrIs32bits bool) (tag, value uint64, _ error) { - if uintptrIs32bits { - var aux auxvPair32 - if err := binary.Read(r, order, &aux); err != nil { - return 0, 0, fmt.Errorf("reading auxv entry: %w", err) - } - return uint64(aux.Tag), uint64(aux.Value), nil - } - - var aux auxvPair64 - if err := binary.Read(r, order, &aux); err != nil { - return 0, 0, fmt.Errorf("reading auxv entry: %w", err) - } - return aux.Tag, aux.Value, nil -} - // vdsoMemoryAddress returns the memory address of the vDSO library // linked into the current process image. r is an io.Reader into an auxv blob. -func vdsoMemoryAddress(r io.Reader, order binary.ByteOrder, uintptrIs32bits bool) (uintptr, error) { - // See https://elixir.bootlin.com/linux/v6.5.5/source/include/uapi/linux/auxvec.h - const ( - _AT_NULL = 0 // End of vector - _AT_SYSINFO_EHDR = 33 // Offset to vDSO blob in process image - ) - +func vdsoMemoryAddress(r auxvPairReader) (uintptr, error) { // Loop through all tag/value pairs in auxv until we find `AT_SYSINFO_EHDR`, // the address of a page containing the virtual Dynamic Shared Object (vDSO). for { - tag, value, err := readAuxvPair(r, order, uintptrIs32bits) + tag, value, err := r.ReadAuxvPair() if err != nil { return 0, err } diff --git a/vendor/github.com/cilium/ebpf/internal/version.go b/vendor/github.com/cilium/ebpf/internal/version.go index 9b17ffb44de..acd4650af73 100644 --- a/vendor/github.com/cilium/ebpf/internal/version.go +++ b/vendor/github.com/cilium/ebpf/internal/version.go @@ -2,6 +2,7 @@ package internal import ( "fmt" + "sync" "github.com/cilium/ebpf/internal/unix" ) @@ -79,7 +80,7 @@ func (v Version) Kernel() uint32 { } // KernelVersion returns the version of the currently running kernel. -var KernelVersion = Memoize(func() (Version, error) { +var KernelVersion = sync.OnceValues(func() (Version, error) { return detectKernelVersion() }) diff --git a/vendor/github.com/cilium/ebpf/link/cgroup.go b/vendor/github.com/cilium/ebpf/link/cgroup.go index 58e85fe9d47..79f3d2b7f4c 100644 --- a/vendor/github.com/cilium/ebpf/link/cgroup.go +++ b/vendor/github.com/cilium/ebpf/link/cgroup.go @@ -143,8 +143,7 @@ func (cg *progAttachCgroup) Update(prog *ebpf.Program) error { // Atomically replacing multiple programs requires at least // 5.5 (commit 7dd68b3279f17921 "bpf: Support replacing cgroup-bpf // program in MULTI mode") - args.Flags |= uint32(flagReplace) - args.Replace = cg.current + args.Anchor = ReplaceProgram(cg.current) } if err := RawAttachProgram(args); err != nil { diff --git a/vendor/github.com/cilium/ebpf/link/link.go b/vendor/github.com/cilium/ebpf/link/link.go index 36acd6ee4b9..03368a2019c 100644 --- a/vendor/github.com/cilium/ebpf/link/link.go +++ b/vendor/github.com/cilium/ebpf/link/link.go @@ -96,8 +96,14 @@ func wrapRawLink(raw *RawLink) (_ Link, err error) { return &NetNsLink{*raw}, nil case KprobeMultiType: return &kprobeMultiLink{*raw}, nil + case UprobeMultiType: + return &uprobeMultiLink{*raw}, nil case PerfEventType: return nil, fmt.Errorf("recovering perf event fd: %w", ErrNotSupported) + case TCXType: + return &tcxLink{*raw}, nil + case NetfilterType: + return &netfilterLink{*raw}, nil default: return raw, nil } @@ -132,6 +138,8 @@ type TracingInfo sys.TracingLinkInfo type CgroupInfo sys.CgroupLinkInfo type NetNsInfo sys.NetNsLinkInfo type XDPInfo sys.XDPLinkInfo +type TCXInfo sys.TcxLinkInfo +type NetfilterInfo sys.NetfilterLinkInfo // Tracing returns tracing type-specific link info. // @@ -157,7 +165,7 @@ func (r Info) NetNs() *NetNsInfo { return e } -// ExtraNetNs returns XDP type-specific link info. +// XDP returns XDP type-specific link info. // // Returns nil if the type-specific link info isn't available. func (r Info) XDP() *XDPInfo { @@ -165,6 +173,22 @@ func (r Info) XDP() *XDPInfo { return e } +// TCX returns TCX type-specific link info. +// +// Returns nil if the type-specific link info isn't available. +func (r Info) TCX() *TCXInfo { + e, _ := r.extra.(*TCXInfo) + return e +} + +// Netfilter returns netfilter type-specific link info. +// +// Returns nil if the type-specific link info isn't available. +func (r Info) Netfilter() *NetfilterInfo { + e, _ := r.extra.(*NetfilterInfo) + return e +} + // RawLink is the low-level API to bpf_link. // // You should consider using the higher level interfaces in this @@ -313,8 +337,12 @@ func (l *RawLink) Info() (*Info, error) { case XDPType: extra = &XDPInfo{} case RawTracepointType, IterType, - PerfEventType, KprobeMultiType: + PerfEventType, KprobeMultiType, UprobeMultiType: // Extra metadata not supported. + case TCXType: + extra = &TCXInfo{} + case NetfilterType: + extra = &NetfilterInfo{} default: return nil, fmt.Errorf("unknown link info type: %d", info.Type) } diff --git a/vendor/github.com/cilium/ebpf/link/program.go b/vendor/github.com/cilium/ebpf/link/program.go index 053735a6773..d8a2a15f937 100644 --- a/vendor/github.com/cilium/ebpf/link/program.go +++ b/vendor/github.com/cilium/ebpf/link/program.go @@ -2,22 +2,27 @@ package link import ( "fmt" + "runtime" "github.com/cilium/ebpf" "github.com/cilium/ebpf/internal/sys" ) type RawAttachProgramOptions struct { - // File descriptor to attach to. This differs for each attach type. + // Target to query. This is usually a file descriptor but may refer to + // something else based on the attach type. Target int // Program to attach. Program *ebpf.Program - // Program to replace (cgroups). - Replace *ebpf.Program - // Attach must match the attach type of Program (and Replace). + // Attach must match the attach type of Program. Attach ebpf.AttachType - // Flags control the attach behaviour. This differs for each attach type. + // Attach relative to an anchor. Optional. + Anchor Anchor + // Flags control the attach behaviour. Specify an Anchor instead of + // F_LINK, F_ID, F_BEFORE, F_AFTER and F_REPLACE. Optional. Flags uint32 + // Only attach if the internal revision matches the given value. + ExpectedRevision uint64 } // RawAttachProgram is a low level wrapper around BPF_PROG_ATTACH. @@ -25,45 +30,72 @@ type RawAttachProgramOptions struct { // You should use one of the higher level abstractions available in this // package if possible. func RawAttachProgram(opts RawAttachProgramOptions) error { - var replaceFd uint32 - if opts.Replace != nil { - replaceFd = uint32(opts.Replace.FD()) + if opts.Flags&anchorFlags != 0 { + return fmt.Errorf("disallowed flags: use Anchor to specify attach target") } attr := sys.ProgAttachAttr{ - TargetFd: uint32(opts.Target), - AttachBpfFd: uint32(opts.Program.FD()), - ReplaceBpfFd: replaceFd, - AttachType: uint32(opts.Attach), - AttachFlags: uint32(opts.Flags), + TargetFdOrIfindex: uint32(opts.Target), + AttachBpfFd: uint32(opts.Program.FD()), + AttachType: uint32(opts.Attach), + AttachFlags: uint32(opts.Flags), + ExpectedRevision: opts.ExpectedRevision, + } + + if opts.Anchor != nil { + fdOrID, flags, err := opts.Anchor.anchor() + if err != nil { + return fmt.Errorf("attach program: %w", err) + } + + if flags == sys.BPF_F_REPLACE { + // Ensure that replacing a program works on old kernels. + attr.ReplaceBpfFd = fdOrID + } else { + attr.RelativeFdOrId = fdOrID + attr.AttachFlags |= flags + } } if err := sys.ProgAttach(&attr); err != nil { if haveFeatErr := haveProgAttach(); haveFeatErr != nil { return haveFeatErr } - return fmt.Errorf("can't attach program: %w", err) + return fmt.Errorf("attach program: %w", err) } + runtime.KeepAlive(opts.Program) return nil } -type RawDetachProgramOptions struct { - Target int - Program *ebpf.Program - Attach ebpf.AttachType -} +type RawDetachProgramOptions RawAttachProgramOptions // RawDetachProgram is a low level wrapper around BPF_PROG_DETACH. // // You should use one of the higher level abstractions available in this // package if possible. func RawDetachProgram(opts RawDetachProgramOptions) error { + if opts.Flags&anchorFlags != 0 { + return fmt.Errorf("disallowed flags: use Anchor to specify attach target") + } + attr := sys.ProgDetachAttr{ - TargetFd: uint32(opts.Target), - AttachBpfFd: uint32(opts.Program.FD()), - AttachType: uint32(opts.Attach), + TargetFdOrIfindex: uint32(opts.Target), + AttachBpfFd: uint32(opts.Program.FD()), + AttachType: uint32(opts.Attach), + ExpectedRevision: opts.ExpectedRevision, } + + if opts.Anchor != nil { + fdOrID, flags, err := opts.Anchor.anchor() + if err != nil { + return fmt.Errorf("detach program: %w", err) + } + + attr.RelativeFdOrId = fdOrID + attr.AttachFlags |= flags + } + if err := sys.ProgDetach(&attr); err != nil { if haveFeatErr := haveProgAttach(); haveFeatErr != nil { return haveFeatErr diff --git a/vendor/github.com/cilium/ebpf/link/query.go b/vendor/github.com/cilium/ebpf/link/query.go index c05656512d5..fe534f8efad 100644 --- a/vendor/github.com/cilium/ebpf/link/query.go +++ b/vendor/github.com/cilium/ebpf/link/query.go @@ -2,7 +2,6 @@ package link import ( "fmt" - "os" "unsafe" "github.com/cilium/ebpf" @@ -11,53 +10,102 @@ import ( // QueryOptions defines additional parameters when querying for programs. type QueryOptions struct { - // Path can be a path to a cgroup, netns or LIRC2 device - Path string + // Target to query. This is usually a file descriptor but may refer to + // something else based on the attach type. + Target int // Attach specifies the AttachType of the programs queried for Attach ebpf.AttachType // QueryFlags are flags for BPF_PROG_QUERY, e.g. BPF_F_QUERY_EFFECTIVE QueryFlags uint32 } -// QueryPrograms retrieves ProgramIDs associated with the AttachType. -// -// Returns (nil, nil) if there are no programs attached to the queried kernel -// resource. Calling QueryPrograms on a kernel missing PROG_QUERY will result in -// ErrNotSupported. -func QueryPrograms(opts QueryOptions) ([]ebpf.ProgramID, error) { - if haveProgQuery() != nil { - return nil, fmt.Errorf("can't query program IDs: %w", ErrNotSupported) - } +// QueryResult describes which programs and links are active. +type QueryResult struct { + // List of attached programs. + Programs []AttachedProgram - f, err := os.Open(opts.Path) - if err != nil { - return nil, fmt.Errorf("can't open file: %s", err) - } - defer f.Close() + // Incremented by one every time the set of attached programs changes. + // May be zero if not supported by the [ebpf.AttachType]. + Revision uint64 +} + +// HaveLinkInfo returns true if the kernel supports querying link information +// for a particular [ebpf.AttachType]. +func (qr *QueryResult) HaveLinkInfo() bool { + return qr.Revision > 0 +} + +type AttachedProgram struct { + ID ebpf.ProgramID + linkID ID +} + +// LinkID returns the ID associated with the program. +// +// Returns 0, false if the kernel doesn't support retrieving the ID or if the +// program wasn't attached via a link. See [QueryResult.HaveLinkInfo] if you +// need to tell the two apart. +func (ap *AttachedProgram) LinkID() (ID, bool) { + return ap.linkID, ap.linkID != 0 +} +// QueryPrograms retrieves a list of programs for the given AttachType. +// +// Returns a slice of attached programs, which may be empty. +// revision counts how many times the set of attached programs has changed and +// may be zero if not supported by the [ebpf.AttachType]. +// Returns ErrNotSupportd on a kernel without BPF_PROG_QUERY +func QueryPrograms(opts QueryOptions) (*QueryResult, error) { // query the number of programs to allocate correct slice size attr := sys.ProgQueryAttr{ - TargetFd: uint32(f.Fd()), - AttachType: sys.AttachType(opts.Attach), - QueryFlags: opts.QueryFlags, + TargetFdOrIfindex: uint32(opts.Target), + AttachType: sys.AttachType(opts.Attach), + QueryFlags: opts.QueryFlags, } - if err := sys.ProgQuery(&attr); err != nil { - return nil, fmt.Errorf("can't query program count: %w", err) + err := sys.ProgQuery(&attr) + if err != nil { + if haveFeatErr := haveProgQuery(); haveFeatErr != nil { + return nil, fmt.Errorf("query programs: %w", haveFeatErr) + } + return nil, fmt.Errorf("query programs: %w", err) } + if attr.Count == 0 { + return &QueryResult{Revision: attr.Revision}, nil + } + + // The minimum bpf_mprog revision is 1, so we can use the field to detect + // whether the attach type supports link ids. + haveLinkIDs := attr.Revision != 0 - // return nil if no progs are attached - if attr.ProgCount == 0 { - return nil, nil + count := attr.Count + progIds := make([]ebpf.ProgramID, count) + attr = sys.ProgQueryAttr{ + TargetFdOrIfindex: uint32(opts.Target), + AttachType: sys.AttachType(opts.Attach), + QueryFlags: opts.QueryFlags, + Count: count, + ProgIds: sys.NewPointer(unsafe.Pointer(&progIds[0])), + } + + var linkIds []ID + if haveLinkIDs { + linkIds = make([]ID, count) + attr.LinkIds = sys.NewPointer(unsafe.Pointer(&linkIds[0])) } - // we have at least one prog, so we query again - progIds := make([]ebpf.ProgramID, attr.ProgCount) - attr.ProgIds = sys.NewPointer(unsafe.Pointer(&progIds[0])) - attr.ProgCount = uint32(len(progIds)) if err := sys.ProgQuery(&attr); err != nil { - return nil, fmt.Errorf("can't query program IDs: %w", err) + return nil, fmt.Errorf("query programs: %w", err) } - return progIds, nil + // NB: attr.Count might have changed between the two syscalls. + var programs []AttachedProgram + for i, id := range progIds[:attr.Count] { + ap := AttachedProgram{ID: id} + if haveLinkIDs { + ap.linkID = linkIds[i] + } + programs = append(programs, ap) + } + return &QueryResult{programs, attr.Revision}, nil } diff --git a/vendor/github.com/cilium/ebpf/link/syscalls.go b/vendor/github.com/cilium/ebpf/link/syscalls.go index 012970ec78e..53476c925b0 100644 --- a/vendor/github.com/cilium/ebpf/link/syscalls.go +++ b/vendor/github.com/cilium/ebpf/link/syscalls.go @@ -24,6 +24,9 @@ const ( XDPType = sys.BPF_LINK_TYPE_XDP PerfEventType = sys.BPF_LINK_TYPE_PERF_EVENT KprobeMultiType = sys.BPF_LINK_TYPE_KPROBE_MULTI + TCXType = sys.BPF_LINK_TYPE_TCX + UprobeMultiType = sys.BPF_LINK_TYPE_UPROBE_MULTI + NetfilterType = sys.BPF_LINK_TYPE_NETFILTER ) var haveProgAttach = internal.NewFeatureTest("BPF_PROG_ATTACH", "4.10", func() error { @@ -72,10 +75,10 @@ var haveProgAttachReplace = internal.NewFeatureTest("BPF_PROG_ATTACH atomic repl // present. attr := sys.ProgAttachAttr{ // We rely on this being checked after attachFlags. - TargetFd: ^uint32(0), - AttachBpfFd: uint32(prog.FD()), - AttachType: uint32(ebpf.AttachCGroupInetIngress), - AttachFlags: uint32(flagReplace), + TargetFdOrIfindex: ^uint32(0), + AttachBpfFd: uint32(prog.FD()), + AttachType: uint32(ebpf.AttachCGroupInetIngress), + AttachFlags: uint32(flagReplace), } err = sys.ProgAttach(&attr) @@ -110,8 +113,8 @@ var haveProgQuery = internal.NewFeatureTest("BPF_PROG_QUERY", "4.15", func() err // We rely on this being checked during the syscall. // With an otherwise correct payload we expect EBADF here // as an indication that the feature is present. - TargetFd: ^uint32(0), - AttachType: sys.AttachType(ebpf.AttachCGroupInetIngress), + TargetFdOrIfindex: ^uint32(0), + AttachType: sys.AttachType(ebpf.AttachCGroupInetIngress), } err := sys.ProgQuery(&attr) @@ -124,3 +127,38 @@ var haveProgQuery = internal.NewFeatureTest("BPF_PROG_QUERY", "4.15", func() err } return errors.New("syscall succeeded unexpectedly") }) + +var haveTCX = internal.NewFeatureTest("tcx", "6.6", func() error { + prog, err := ebpf.NewProgram(&ebpf.ProgramSpec{ + Type: ebpf.SchedCLS, + License: "MIT", + Instructions: asm.Instructions{ + asm.Mov.Imm(asm.R0, 0), + asm.Return(), + }, + }) + + if err != nil { + return internal.ErrNotSupported + } + + defer prog.Close() + attr := sys.LinkCreateTcxAttr{ + // We rely on this being checked during the syscall. + // With an otherwise correct payload we expect ENODEV here + // as an indication that the feature is present. + TargetIfindex: ^uint32(0), + ProgFd: uint32(prog.FD()), + AttachType: sys.AttachType(ebpf.AttachTCXIngress), + } + + _, err = sys.LinkCreateTcx(&attr) + + if errors.Is(err, unix.ENODEV) { + return nil + } + if err != nil { + return ErrNotSupported + } + return errors.New("syscall succeeded unexpectedly") +}) diff --git a/vendor/github.com/cilium/ebpf/link/uprobe.go b/vendor/github.com/cilium/ebpf/link/uprobe.go index 83977e0e54d..ad85024e38e 100644 --- a/vendor/github.com/cilium/ebpf/link/uprobe.go +++ b/vendor/github.com/cilium/ebpf/link/uprobe.go @@ -36,10 +36,10 @@ var ( type Executable struct { // Path of the executable on the filesystem. path string - // Parsed ELF and dynamic symbols' addresses. - addresses map[string]uint64 + // Parsed ELF and dynamic symbols' cachedAddresses. + cachedAddresses map[string]uint64 // Keep track of symbol table lazy load. - addressesOnce sync.Once + cacheAddressesOnce sync.Once } // UprobeOptions defines additional parameters that will be used @@ -108,8 +108,8 @@ func OpenExecutable(path string) (*Executable, error) { } return &Executable{ - path: path, - addresses: make(map[string]uint64), + path: path, + cachedAddresses: make(map[string]uint64), }, nil } @@ -153,7 +153,7 @@ func (ex *Executable) load(f *internal.SafeELFFile) error { } } - ex.addresses[s.Name] = address + ex.cachedAddresses[s.Name] = address } return nil @@ -162,13 +162,13 @@ func (ex *Executable) load(f *internal.SafeELFFile) error { // address calculates the address of a symbol in the executable. // // opts must not be nil. -func (ex *Executable) address(symbol string, opts *UprobeOptions) (uint64, error) { - if opts.Address > 0 { - return opts.Address + opts.Offset, nil +func (ex *Executable) address(symbol string, address, offset uint64) (uint64, error) { + if address > 0 { + return address + offset, nil } var err error - ex.addressesOnce.Do(func() { + ex.cacheAddressesOnce.Do(func() { var f *internal.SafeELFFile f, err = internal.OpenSafeELFFile(ex.path) if err != nil { @@ -183,7 +183,7 @@ func (ex *Executable) address(symbol string, opts *UprobeOptions) (uint64, error return 0, fmt.Errorf("lazy load symbols: %w", err) } - address, ok := ex.addresses[symbol] + address, ok := ex.cachedAddresses[symbol] if !ok { return 0, fmt.Errorf("symbol %s: %w", symbol, ErrNoSymbol) } @@ -199,7 +199,7 @@ func (ex *Executable) address(symbol string, opts *UprobeOptions) (uint64, error "(consider providing UprobeOptions.Address)", ex.path, symbol, ErrNotSupported) } - return address + opts.Offset, nil + return address + offset, nil } // Uprobe attaches the given eBPF program to a perf event that fires when the @@ -284,7 +284,7 @@ func (ex *Executable) uprobe(symbol string, prog *ebpf.Program, opts *UprobeOpti opts = &UprobeOptions{} } - offset, err := ex.address(symbol, opts) + offset, err := ex.address(symbol, opts.Address, opts.Offset) if err != nil { return nil, err } diff --git a/vendor/github.com/cilium/ebpf/linker.go b/vendor/github.com/cilium/ebpf/linker.go index cf5b02ddf58..b93ca913ec0 100644 --- a/vendor/github.com/cilium/ebpf/linker.go +++ b/vendor/github.com/cilium/ebpf/linker.go @@ -6,8 +6,7 @@ import ( "fmt" "io" "math" - - "golang.org/x/exp/slices" + "slices" "github.com/cilium/ebpf/asm" "github.com/cilium/ebpf/btf" @@ -120,7 +119,7 @@ func hasFunctionReferences(insns asm.Instructions) bool { // // Passing a nil target will relocate against the running kernel. insns are // modified in place. -func applyRelocations(insns asm.Instructions, target *btf.Spec, bo binary.ByteOrder) error { +func applyRelocations(insns asm.Instructions, target *btf.Spec, bo binary.ByteOrder, b *btf.Builder) error { var relos []*btf.CORERelocation var reloInsns []*asm.Instruction iter := insns.Iterate() @@ -139,7 +138,7 @@ func applyRelocations(insns asm.Instructions, target *btf.Spec, bo binary.ByteOr bo = internal.NativeEndian } - fixups, err := btf.CORERelocate(relos, target, bo) + fixups, err := btf.CORERelocate(relos, target, bo, b.Add) if err != nil { return err } diff --git a/vendor/github.com/cilium/ebpf/map.go b/vendor/github.com/cilium/ebpf/map.go index ce945ace083..1eeee1776d3 100644 --- a/vendor/github.com/cilium/ebpf/map.go +++ b/vendor/github.com/cilium/ebpf/map.go @@ -10,6 +10,7 @@ import ( "path/filepath" "reflect" "strings" + "sync" "time" "unsafe" @@ -133,7 +134,7 @@ func (spec *MapSpec) fixupMagicFields() (*MapSpec, error) { spec.KeySize = 4 spec.ValueSize = 4 - n, err := internal.PossibleCPUs() + n, err := PossibleCPU() if err != nil { return nil, fmt.Errorf("fixup perf event array: %w", err) } @@ -515,7 +516,7 @@ func newMap(fd *sys.FD, name string, typ MapType, keySize, valueSize, maxEntries return m, nil } - possibleCPUs, err := internal.PossibleCPUs() + possibleCPUs, err := PossibleCPU() if err != nil { return nil, err } @@ -642,11 +643,15 @@ func (m *Map) LookupBytes(key interface{}) ([]byte, error) { } func (m *Map) lookupPerCPU(key, valueOut any, flags MapLookupFlags) error { + slice, err := ensurePerCPUSlice(valueOut, int(m.valueSize)) + if err != nil { + return err + } valueBytes := make([]byte, m.fullValueSize) if err := m.lookup(key, sys.NewSlicePointer(valueBytes), flags); err != nil { return err } - return unmarshalPerCPUValue(valueOut, int(m.valueSize), valueBytes) + return unmarshalPerCPUValue(slice, int(m.valueSize), valueBytes) } func (m *Map) lookup(key interface{}, valueOut sys.Pointer, flags MapLookupFlags) error { @@ -669,11 +674,53 @@ func (m *Map) lookup(key interface{}, valueOut sys.Pointer, flags MapLookupFlags } func (m *Map) lookupAndDeletePerCPU(key, valueOut any, flags MapLookupFlags) error { + slice, err := ensurePerCPUSlice(valueOut, int(m.valueSize)) + if err != nil { + return err + } valueBytes := make([]byte, m.fullValueSize) if err := m.lookupAndDelete(key, sys.NewSlicePointer(valueBytes), flags); err != nil { return err } - return unmarshalPerCPUValue(valueOut, int(m.valueSize), valueBytes) + return unmarshalPerCPUValue(slice, int(m.valueSize), valueBytes) +} + +// ensurePerCPUSlice allocates a slice for a per-CPU value if necessary. +func ensurePerCPUSlice(sliceOrPtr any, elemLength int) (any, error) { + sliceOrPtrType := reflect.TypeOf(sliceOrPtr) + if sliceOrPtrType.Kind() == reflect.Slice { + // The target is a slice, the caller is responsible for ensuring that + // size is correct. + return sliceOrPtr, nil + } + + slicePtrType := sliceOrPtrType + if slicePtrType.Kind() != reflect.Ptr || slicePtrType.Elem().Kind() != reflect.Slice { + return nil, fmt.Errorf("per-cpu value requires a slice or a pointer to slice") + } + + possibleCPUs, err := PossibleCPU() + if err != nil { + return nil, err + } + + sliceType := slicePtrType.Elem() + slice := reflect.MakeSlice(sliceType, possibleCPUs, possibleCPUs) + + sliceElemType := sliceType.Elem() + sliceElemIsPointer := sliceElemType.Kind() == reflect.Ptr + reflect.ValueOf(sliceOrPtr).Elem().Set(slice) + if !sliceElemIsPointer { + return slice.Interface(), nil + } + sliceElemType = sliceElemType.Elem() + + for i := 0; i < possibleCPUs; i++ { + newElem := reflect.New(sliceElemType) + slice.Index(i).Set(newElem) + } + + return slice.Interface(), nil } func (m *Map) lookupAndDelete(key any, valuePtr sys.Pointer, flags MapLookupFlags) error { @@ -861,7 +908,7 @@ func (m *Map) nextKey(key interface{}, nextKeyOut sys.Pointer) error { return nil } -var mmapProtectedPage = internal.Memoize(func() ([]byte, error) { +var mmapProtectedPage = sync.OnceValues(func() ([]byte, error) { return unix.Mmap(-1, 0, os.Getpagesize(), unix.PROT_NONE, unix.MAP_ANON|unix.MAP_SHARED) }) @@ -917,14 +964,19 @@ func (m *Map) guessNonExistentKey() ([]byte, error) { // // "keysOut" and "valuesOut" must be of type slice, a pointer // to a slice or buffer will not work. -// "prevKey" is the key to start the batch lookup from, it will -// *not* be included in the results. Use nil to start at the first key. +// "cursor" is an pointer to an opaque handle. It must be non-nil. Pass +// "cursor" to subsequent calls of this function to continue the batching +// operation in the case of chunking. +// +// Warning: This API is not very safe to use as the kernel implementation for +// batching relies on the user to be aware of subtle details with regarding to +// different map type implementations. // // ErrKeyNotExist is returned when the batch lookup has reached // the end of all possible results, even when partial results // are returned. It should be used to evaluate when lookup is "done". -func (m *Map) BatchLookup(prevKey, nextKeyOut, keysOut, valuesOut interface{}, opts *BatchOptions) (int, error) { - return m.batchLookup(sys.BPF_MAP_LOOKUP_BATCH, prevKey, nextKeyOut, keysOut, valuesOut, opts) +func (m *Map) BatchLookup(cursor *MapBatchCursor, keysOut, valuesOut interface{}, opts *BatchOptions) (int, error) { + return m.batchLookup(sys.BPF_MAP_LOOKUP_BATCH, cursor, keysOut, valuesOut, opts) } // BatchLookupAndDelete looks up many elements in a map at once, @@ -932,47 +984,113 @@ func (m *Map) BatchLookup(prevKey, nextKeyOut, keysOut, valuesOut interface{}, o // It then deletes all those elements. // "keysOut" and "valuesOut" must be of type slice, a pointer // to a slice or buffer will not work. -// "prevKey" is the key to start the batch lookup from, it will -// *not* be included in the results. Use nil to start at the first key. +// "cursor" is an pointer to an opaque handle. It must be non-nil. Pass +// "cursor" to subsequent calls of this function to continue the batching +// operation in the case of chunking. +// +// Warning: This API is not very safe to use as the kernel implementation for +// batching relies on the user to be aware of subtle details with regarding to +// different map type implementations. // // ErrKeyNotExist is returned when the batch lookup has reached // the end of all possible results, even when partial results // are returned. It should be used to evaluate when lookup is "done". -func (m *Map) BatchLookupAndDelete(prevKey, nextKeyOut, keysOut, valuesOut interface{}, opts *BatchOptions) (int, error) { - return m.batchLookup(sys.BPF_MAP_LOOKUP_AND_DELETE_BATCH, prevKey, nextKeyOut, keysOut, valuesOut, opts) +func (m *Map) BatchLookupAndDelete(cursor *MapBatchCursor, keysOut, valuesOut interface{}, opts *BatchOptions) (int, error) { + return m.batchLookup(sys.BPF_MAP_LOOKUP_AND_DELETE_BATCH, cursor, keysOut, valuesOut, opts) } -func (m *Map) batchLookup(cmd sys.Cmd, startKey, nextKeyOut, keysOut, valuesOut interface{}, opts *BatchOptions) (int, error) { - if err := haveBatchAPI(); err != nil { - return 0, err - } +// MapBatchCursor represents a starting point for a batch operation. +type MapBatchCursor struct { + m *Map + opaque []byte +} + +func (m *Map) batchLookup(cmd sys.Cmd, cursor *MapBatchCursor, keysOut, valuesOut interface{}, opts *BatchOptions) (int, error) { if m.typ.hasPerCPUValue() { - return 0, ErrNotSupported + return m.batchLookupPerCPU(cmd, cursor, keysOut, valuesOut, opts) } - keysValue := reflect.ValueOf(keysOut) - if keysValue.Kind() != reflect.Slice { - return 0, fmt.Errorf("keys must be a slice") + + count, err := batchCount(keysOut, valuesOut) + if err != nil { + return 0, err } - valuesValue := reflect.ValueOf(valuesOut) - if valuesValue.Kind() != reflect.Slice { - return 0, fmt.Errorf("valuesOut must be a slice") + + valueBuf := sysenc.SyscallOutput(valuesOut, count*int(m.fullValueSize)) + + n, err := m.batchLookupCmd(cmd, cursor, count, keysOut, valueBuf.Pointer(), opts) + if err != nil { + return n, err } - count := keysValue.Len() - if count != valuesValue.Len() { - return 0, fmt.Errorf("keysOut and valuesOut must be the same length") + + err = valueBuf.Unmarshal(valuesOut) + if err != nil { + return 0, err } - keyBuf := make([]byte, count*int(m.keySize)) - keyPtr := sys.NewSlicePointer(keyBuf) + + return n, nil +} + +func (m *Map) batchLookupPerCPU(cmd sys.Cmd, cursor *MapBatchCursor, keysOut, valuesOut interface{}, opts *BatchOptions) (int, error) { + count, err := sliceLen(keysOut) + if err != nil { + return 0, fmt.Errorf("keys: %w", err) + } + valueBuf := make([]byte, count*int(m.fullValueSize)) valuePtr := sys.NewSlicePointer(valueBuf) - nextBuf := makeMapSyscallOutput(nextKeyOut, int(m.keySize)) + + n, sysErr := m.batchLookupCmd(cmd, cursor, count, keysOut, valuePtr, opts) + if sysErr != nil && !errors.Is(sysErr, unix.ENOENT) { + return 0, err + } + + err = unmarshalBatchPerCPUValue(valuesOut, count, int(m.valueSize), valueBuf) + if err != nil { + return 0, err + } + + return n, sysErr +} + +func (m *Map) batchLookupCmd(cmd sys.Cmd, cursor *MapBatchCursor, count int, keysOut any, valuePtr sys.Pointer, opts *BatchOptions) (int, error) { + cursorLen := int(m.keySize) + if cursorLen < 4 { + // * generic_map_lookup_batch requires that batch_out is key_size bytes. + // This is used by array and LPM maps. + // + // * __htab_map_lookup_and_delete_batch requires u32. This is used by the + // various hash maps. + // + // Use a minimum of 4 bytes to avoid having to distinguish between the two. + cursorLen = 4 + } + + inBatch := cursor.opaque + if inBatch == nil { + // This is the first lookup, allocate a buffer to hold the cursor. + cursor.opaque = make([]byte, cursorLen) + cursor.m = m + } else if cursor.m != m { + // Prevent reuse of a cursor across maps. First, it's unlikely to work. + // Second, the maps may require different cursorLen and cursor.opaque + // may therefore be too short. This could lead to the kernel clobbering + // user space memory. + return 0, errors.New("a cursor may not be reused across maps") + } + + if err := haveBatchAPI(); err != nil { + return 0, err + } + + keyBuf := sysenc.SyscallOutput(keysOut, count*int(m.keySize)) attr := sys.MapLookupBatchAttr{ MapFd: m.fd.Uint(), - Keys: keyPtr, + Keys: keyBuf.Pointer(), Values: valuePtr, Count: uint32(count), - OutBatch: nextBuf.Pointer(), + InBatch: sys.NewSlicePointer(inBatch), + OutBatch: sys.NewSlicePointer(cursor.opaque), } if opts != nil { @@ -980,30 +1098,13 @@ func (m *Map) batchLookup(cmd sys.Cmd, startKey, nextKeyOut, keysOut, valuesOut attr.Flags = opts.Flags } - var err error - if startKey != nil { - attr.InBatch, err = marshalMapSyscallInput(startKey, int(m.keySize)) - if err != nil { - return 0, err - } - } - _, sysErr := sys.BPF(cmd, unsafe.Pointer(&attr), unsafe.Sizeof(attr)) sysErr = wrapMapError(sysErr) if sysErr != nil && !errors.Is(sysErr, unix.ENOENT) { return 0, sysErr } - err = nextBuf.Unmarshal(nextKeyOut) - if err != nil { - return 0, err - } - err = sysenc.Unmarshal(keysOut, keyBuf) - if err != nil { - return 0, err - } - err = sysenc.Unmarshal(valuesOut, valueBuf) - if err != nil { + if err := keyBuf.Unmarshal(keysOut); err != nil { return 0, err } @@ -1016,29 +1117,24 @@ func (m *Map) batchLookup(cmd sys.Cmd, startKey, nextKeyOut, keysOut, valuesOut // to a slice or buffer will not work. func (m *Map) BatchUpdate(keys, values interface{}, opts *BatchOptions) (int, error) { if m.typ.hasPerCPUValue() { - return 0, ErrNotSupported + return m.batchUpdatePerCPU(keys, values, opts) } - keysValue := reflect.ValueOf(keys) - if keysValue.Kind() != reflect.Slice { - return 0, fmt.Errorf("keys must be a slice") - } - valuesValue := reflect.ValueOf(values) - if valuesValue.Kind() != reflect.Slice { - return 0, fmt.Errorf("values must be a slice") - } - var ( - count = keysValue.Len() - valuePtr sys.Pointer - err error - ) - if count != valuesValue.Len() { - return 0, fmt.Errorf("keys and values must be the same length") + + count, err := batchCount(keys, values) + if err != nil { + return 0, err } - keyPtr, err := marshalMapSyscallInput(keys, count*int(m.keySize)) + + valuePtr, err := marshalMapSyscallInput(values, count*int(m.valueSize)) if err != nil { return 0, err } - valuePtr, err = marshalMapSyscallInput(values, count*int(m.valueSize)) + + return m.batchUpdate(count, keys, valuePtr, opts) +} + +func (m *Map) batchUpdate(count int, keys any, valuePtr sys.Pointer, opts *BatchOptions) (int, error) { + keyPtr, err := marshalMapSyscallInput(keys, count*int(m.keySize)) if err != nil { return 0, err } @@ -1065,17 +1161,28 @@ func (m *Map) BatchUpdate(keys, values interface{}, opts *BatchOptions) (int, er return int(attr.Count), nil } +func (m *Map) batchUpdatePerCPU(keys, values any, opts *BatchOptions) (int, error) { + count, err := sliceLen(keys) + if err != nil { + return 0, fmt.Errorf("keys: %w", err) + } + + valueBuf, err := marshalBatchPerCPUValue(values, count, int(m.valueSize)) + if err != nil { + return 0, err + } + + return m.batchUpdate(count, keys, sys.NewSlicePointer(valueBuf), opts) +} + // BatchDelete batch deletes entries in the map by keys. // "keys" must be of type slice, a pointer to a slice or buffer will not work. func (m *Map) BatchDelete(keys interface{}, opts *BatchOptions) (int, error) { - if m.typ.hasPerCPUValue() { - return 0, ErrNotSupported - } - keysValue := reflect.ValueOf(keys) - if keysValue.Kind() != reflect.Slice { - return 0, fmt.Errorf("keys must be a slice") + count, err := sliceLen(keys) + if err != nil { + return 0, fmt.Errorf("keys: %w", err) } - count := keysValue.Len() + keyPtr, err := marshalMapSyscallInput(keys, count*int(m.keySize)) if err != nil { return 0, fmt.Errorf("cannot marshal keys: %v", err) @@ -1102,6 +1209,24 @@ func (m *Map) BatchDelete(keys interface{}, opts *BatchOptions) (int, error) { return int(attr.Count), nil } +func batchCount(keys, values any) (int, error) { + keysLen, err := sliceLen(keys) + if err != nil { + return 0, fmt.Errorf("keys: %w", err) + } + + valuesLen, err := sliceLen(values) + if err != nil { + return 0, fmt.Errorf("values: %w", err) + } + + if keysLen != valuesLen { + return 0, fmt.Errorf("keys and values must have the same length") + } + + return keysLen, nil +} + // Iterate traverses a map. // // It's safe to create multiple iterators at the same time. @@ -1365,8 +1490,10 @@ func marshalMap(m *Map, length int) ([]byte, error) { // // See Map.Iterate. type MapIterator struct { - target *Map - curKey []byte + target *Map + // Temporary storage to avoid allocations in Next(). This is any instead + // of []byte to avoid allocations. + cursor any count, maxEntries uint32 done bool err error @@ -1394,34 +1521,30 @@ func (mi *MapIterator) Next(keyOut, valueOut interface{}) bool { return false } - // For array-like maps NextKeyBytes returns nil only on after maxEntries + // For array-like maps NextKey returns nil only after maxEntries // iterations. for mi.count <= mi.maxEntries { - var nextKey []byte - if mi.curKey == nil { - // Pass nil interface to NextKeyBytes to make sure the Map's first key + if mi.cursor == nil { + // Pass nil interface to NextKey to make sure the Map's first key // is returned. If we pass an uninitialized []byte instead, it'll see a // non-nil interface and try to marshal it. - nextKey, mi.err = mi.target.NextKeyBytes(nil) - - mi.curKey = make([]byte, mi.target.keySize) + mi.cursor = make([]byte, mi.target.keySize) + mi.err = mi.target.NextKey(nil, mi.cursor) } else { - nextKey, mi.err = mi.target.NextKeyBytes(mi.curKey) - } - if mi.err != nil { - mi.err = fmt.Errorf("get next key: %w", mi.err) - return false + mi.err = mi.target.NextKey(mi.cursor, mi.cursor) } - if nextKey == nil { + if errors.Is(mi.err, ErrKeyNotExist) { mi.done = true + mi.err = nil + return false + } else if mi.err != nil { + mi.err = fmt.Errorf("get next key: %w", mi.err) return false } - mi.curKey = nextKey - mi.count++ - mi.err = mi.target.Lookup(nextKey, valueOut) + mi.err = mi.target.Lookup(mi.cursor, valueOut) if errors.Is(mi.err, ErrKeyNotExist) { // Even though the key should be valid, we couldn't look up // its value. If we're iterating a hash map this is probably @@ -1438,10 +1561,11 @@ func (mi *MapIterator) Next(keyOut, valueOut interface{}) bool { return false } + buf := mi.cursor.([]byte) if ptr, ok := keyOut.(unsafe.Pointer); ok { - copy(unsafe.Slice((*byte)(ptr), len(nextKey)), nextKey) + copy(unsafe.Slice((*byte)(ptr), len(buf)), buf) } else { - mi.err = sysenc.Unmarshal(keyOut, nextKey) + mi.err = sysenc.Unmarshal(keyOut, buf) } return mi.err == nil @@ -1481,3 +1605,12 @@ func NewMapFromID(id MapID) (*Map, error) { return newMapFromFD(fd) } + +// sliceLen returns the length if the value is a slice or an error otherwise. +func sliceLen(slice any) (int, error) { + sliceValue := reflect.ValueOf(slice) + if sliceValue.Kind() != reflect.Slice { + return 0, fmt.Errorf("%T is not a slice", slice) + } + return sliceValue.Len(), nil +} diff --git a/vendor/github.com/cilium/ebpf/marshalers.go b/vendor/github.com/cilium/ebpf/marshalers.go index e89a12f0fb1..57a0a8e88af 100644 --- a/vendor/github.com/cilium/ebpf/marshalers.go +++ b/vendor/github.com/cilium/ebpf/marshalers.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "reflect" + "slices" "unsafe" "github.com/cilium/ebpf/internal" @@ -43,79 +44,125 @@ func makeMapSyscallOutput(dst any, length int) sysenc.Buffer { return sysenc.SyscallOutput(dst, length) } -// marshalPerCPUValue encodes a slice containing one value per +// appendPerCPUSlice encodes a slice containing one value per // possible CPU into a buffer of bytes. // // Values are initialized to zero if the slice has less elements than CPUs. -func marshalPerCPUValue(slice any, elemLength int) (sys.Pointer, error) { +func appendPerCPUSlice(buf []byte, slice any, possibleCPUs, elemLength, alignedElemLength int) ([]byte, error) { sliceType := reflect.TypeOf(slice) if sliceType.Kind() != reflect.Slice { - return sys.Pointer{}, errors.New("per-CPU value requires slice") - } - - possibleCPUs, err := internal.PossibleCPUs() - if err != nil { - return sys.Pointer{}, err + return nil, errors.New("per-CPU value requires slice") } sliceValue := reflect.ValueOf(slice) sliceLen := sliceValue.Len() if sliceLen > possibleCPUs { - return sys.Pointer{}, fmt.Errorf("per-CPU value exceeds number of CPUs") + return nil, fmt.Errorf("per-CPU value greater than number of CPUs") } - alignedElemLength := internal.Align(elemLength, 8) - buf := make([]byte, alignedElemLength*possibleCPUs) - + // Grow increases the slice's capacity, _if_necessary_ + buf = slices.Grow(buf, alignedElemLength*possibleCPUs) for i := 0; i < sliceLen; i++ { elem := sliceValue.Index(i).Interface() elemBytes, err := sysenc.Marshal(elem, elemLength) if err != nil { - return sys.Pointer{}, err + return nil, err } - offset := i * alignedElemLength - elemBytes.CopyTo(buf[offset : offset+elemLength]) + buf = elemBytes.AppendTo(buf) + buf = append(buf, make([]byte, alignedElemLength-elemLength)...) + } + + // Ensure buf is zero-padded full size. + buf = append(buf, make([]byte, (possibleCPUs-sliceLen)*alignedElemLength)...) + + return buf, nil +} + +// marshalPerCPUValue encodes a slice containing one value per +// possible CPU into a buffer of bytes. +// +// Values are initialized to zero if the slice has less elements than CPUs. +func marshalPerCPUValue(slice any, elemLength int) (sys.Pointer, error) { + possibleCPUs, err := PossibleCPU() + if err != nil { + return sys.Pointer{}, err + } + + alignedElemLength := internal.Align(elemLength, 8) + buf := make([]byte, 0, alignedElemLength*possibleCPUs) + buf, err = appendPerCPUSlice(buf, slice, possibleCPUs, elemLength, alignedElemLength) + if err != nil { + return sys.Pointer{}, err } return sys.NewSlicePointer(buf), nil } +// marshalBatchPerCPUValue encodes a batch-sized slice of slices containing +// one value per possible CPU into a buffer of bytes. +func marshalBatchPerCPUValue(slice any, batchLen, elemLength int) ([]byte, error) { + sliceType := reflect.TypeOf(slice) + if sliceType.Kind() != reflect.Slice { + return nil, fmt.Errorf("batch value requires a slice") + } + sliceValue := reflect.ValueOf(slice) + + possibleCPUs, err := PossibleCPU() + if err != nil { + return nil, err + } + if sliceValue.Len() != batchLen*possibleCPUs { + return nil, fmt.Errorf("per-CPU slice has incorrect length, expected %d, got %d", + batchLen*possibleCPUs, sliceValue.Len()) + } + alignedElemLength := internal.Align(elemLength, 8) + buf := make([]byte, 0, batchLen*alignedElemLength*possibleCPUs) + for i := 0; i < batchLen; i++ { + batch := sliceValue.Slice(i*possibleCPUs, (i+1)*possibleCPUs).Interface() + buf, err = appendPerCPUSlice(buf, batch, possibleCPUs, elemLength, alignedElemLength) + if err != nil { + return nil, fmt.Errorf("batch %d: %w", i, err) + } + } + return buf, nil +} + // unmarshalPerCPUValue decodes a buffer into a slice containing one value per // possible CPU. // -// slicePtr must be a pointer to a slice. -func unmarshalPerCPUValue(slicePtr any, elemLength int, buf []byte) error { - slicePtrType := reflect.TypeOf(slicePtr) - if slicePtrType.Kind() != reflect.Ptr || slicePtrType.Elem().Kind() != reflect.Slice { - return fmt.Errorf("per-cpu value requires pointer to slice") +// slice must be a literal slice and not a pointer. +func unmarshalPerCPUValue(slice any, elemLength int, buf []byte) error { + sliceType := reflect.TypeOf(slice) + if sliceType.Kind() != reflect.Slice { + return fmt.Errorf("per-CPU value requires a slice") } - possibleCPUs, err := internal.PossibleCPUs() + possibleCPUs, err := PossibleCPU() if err != nil { return err } - sliceType := slicePtrType.Elem() - slice := reflect.MakeSlice(sliceType, possibleCPUs, possibleCPUs) + sliceValue := reflect.ValueOf(slice) + if sliceValue.Len() != possibleCPUs { + return fmt.Errorf("per-CPU slice has incorrect length, expected %d, got %d", + possibleCPUs, sliceValue.Len()) + } sliceElemType := sliceType.Elem() sliceElemIsPointer := sliceElemType.Kind() == reflect.Ptr - if sliceElemIsPointer { - sliceElemType = sliceElemType.Elem() - } - stride := internal.Align(elemLength, 8) for i := 0; i < possibleCPUs; i++ { var elem any + v := sliceValue.Index(i) if sliceElemIsPointer { - newElem := reflect.New(sliceElemType) - slice.Index(i).Set(newElem) - elem = newElem.Interface() + if !v.Elem().CanAddr() { + return fmt.Errorf("per-CPU slice elements cannot be nil") + } + elem = v.Elem().Addr().Interface() } else { - elem = slice.Index(i).Addr().Interface() + elem = v.Addr().Interface() } - err := sysenc.Unmarshal(elem, buf[:elemLength]) if err != nil { return fmt.Errorf("cpu %d: %w", i, err) @@ -123,7 +170,41 @@ func unmarshalPerCPUValue(slicePtr any, elemLength int, buf []byte) error { buf = buf[stride:] } + return nil +} - reflect.ValueOf(slicePtr).Elem().Set(slice) +// unmarshalBatchPerCPUValue decodes a buffer into a batch-sized slice +// containing one value per possible CPU. +// +// slice must have length batchLen * PossibleCPUs(). +func unmarshalBatchPerCPUValue(slice any, batchLen, elemLength int, buf []byte) error { + sliceType := reflect.TypeOf(slice) + if sliceType.Kind() != reflect.Slice { + return fmt.Errorf("batch requires a slice") + } + + sliceValue := reflect.ValueOf(slice) + possibleCPUs, err := PossibleCPU() + if err != nil { + return err + } + if sliceValue.Len() != batchLen*possibleCPUs { + return fmt.Errorf("per-CPU slice has incorrect length, expected %d, got %d", + sliceValue.Len(), batchLen*possibleCPUs) + } + + fullValueSize := possibleCPUs * internal.Align(elemLength, 8) + if len(buf) != batchLen*fullValueSize { + return fmt.Errorf("input buffer has incorrect length, expected %d, got %d", + len(buf), batchLen*fullValueSize) + } + + for i := 0; i < batchLen; i++ { + elem := sliceValue.Slice(i*possibleCPUs, (i+1)*possibleCPUs).Interface() + if err := unmarshalPerCPUValue(elem, elemLength, buf[:fullValueSize]); err != nil { + return fmt.Errorf("batch %d: %w", i, err) + } + buf = buf[fullValueSize:] + } return nil } diff --git a/vendor/github.com/cilium/ebpf/prog.go b/vendor/github.com/cilium/ebpf/prog.go index 6d46a0422b9..611e087a9c5 100644 --- a/vendor/github.com/cilium/ebpf/prog.go +++ b/vendor/github.com/cilium/ebpf/prog.go @@ -53,7 +53,7 @@ type ProgramOptions struct { // verifier output enabled. Upon error, the program load will be repeated // with LogLevelBranch and the given (or default) LogSize value. // - // Setting this to a non-zero value will unconditionally enable the verifier + // Unless LogDisabled is set, setting this to a non-zero value will enable the verifier // log, populating the [ebpf.Program.VerifierLog] field on successful loads // and including detailed verifier errors if the program is rejected. This // will always allocate an output buffer, but will result in only a single @@ -242,14 +242,26 @@ func newProgramWithOptions(spec *ProgramSpec, opts ProgramOptions) (*Program, er insns := make(asm.Instructions, len(spec.Instructions)) copy(insns, spec.Instructions) - handle, fib, lib, err := btf.MarshalExtInfos(insns) - if err != nil && !errors.Is(err, btf.ErrNotSupported) { - return nil, fmt.Errorf("load ext_infos: %w", err) + var b btf.Builder + if err := applyRelocations(insns, opts.KernelTypes, spec.ByteOrder, &b); err != nil { + return nil, fmt.Errorf("apply CO-RE relocations: %w", err) } - if handle != nil { - defer handle.Close() - attr.ProgBtfFd = uint32(handle.FD()) + errExtInfos := haveProgramExtInfos() + if !b.Empty() && errors.Is(errExtInfos, ErrNotSupported) { + // There is at least one CO-RE relocation which relies on a stable local + // type ID. + // Return ErrNotSupported instead of E2BIG if there is no BTF support. + return nil, errExtInfos + } + + if errExtInfos == nil { + // Only add func and line info if the kernel supports it. This allows + // BPF compiled with modern toolchains to work on old kernels. + fib, lib, err := btf.MarshalExtInfos(insns, &b) + if err != nil { + return nil, fmt.Errorf("marshal ext_infos: %w", err) + } attr.FuncInfoRecSize = btf.FuncInfoSize attr.FuncInfoCnt = uint32(len(fib)) / btf.FuncInfoSize @@ -260,8 +272,14 @@ func newProgramWithOptions(spec *ProgramSpec, opts ProgramOptions) (*Program, er attr.LineInfo = sys.NewSlicePointer(lib) } - if err := applyRelocations(insns, opts.KernelTypes, spec.ByteOrder); err != nil { - return nil, fmt.Errorf("apply CO-RE relocations: %w", err) + if !b.Empty() { + handle, err := btf.NewHandle(&b) + if err != nil { + return nil, fmt.Errorf("load BTF: %w", err) + } + defer handle.Close() + + attr.ProgBtfFd = uint32(handle.FD()) } kconfig, err := resolveKconfigReferences(insns) @@ -703,10 +721,6 @@ func (p *Program) run(opts *RunOptions) (uint32, time.Duration, error) { Cpu: opts.CPU, } - if attr.Repeat == 0 { - attr.Repeat = 1 - } - retry: for { err := sys.ProgRun(&attr) @@ -715,7 +729,7 @@ retry: } if errors.Is(err, unix.EINTR) { - if attr.Repeat == 1 { + if attr.Repeat <= 1 { // Older kernels check whether enough repetitions have been // executed only after checking for pending signals. // diff --git a/vendor/github.com/cilium/ebpf/run-tests.sh b/vendor/github.com/cilium/ebpf/run-tests.sh index 629a069dd14..e818cd24067 100644 --- a/vendor/github.com/cilium/ebpf/run-tests.sh +++ b/vendor/github.com/cilium/ebpf/run-tests.sh @@ -14,6 +14,8 @@ set -euo pipefail script="$(realpath "$0")" readonly script +source "$(dirname "$script")/testdata/sh/lib.sh" + quote_env() { for var in "$@"; do if [ -v "$var" ]; then @@ -66,12 +68,12 @@ if [[ "${1:-}" = "--exec-vm" ]]; then fi for ((i = 0; i < 3; i++)); do - if ! $sudo virtme-run --kimg "${input}/boot/vmlinuz" --memory 768M --pwd \ + if ! $sudo virtme-run --kimg "${input}/boot/vmlinuz" --cpus 2 --memory 768M --pwd \ --rwdir="${testdir}=${testdir}" \ --rodir=/run/input="${input}" \ --rwdir=/run/output="${output}" \ - --script-sh "$(quote_env "${preserved_env[@]}") \"$script\" --exec-test $cmd" \ - --kopt possible_cpus=2; then # need at least two CPUs for some tests + --script-sh "$(quote_env "${preserved_env[@]}") \"$script\" \ + --exec-test $cmd"; then exit 23 fi @@ -96,8 +98,8 @@ elif [[ "${1:-}" = "--exec-test" ]]; then mount -t bpf bpf /sys/fs/bpf mount -t tracefs tracefs /sys/kernel/debug/tracing - if [[ -d "/run/input/bpf" ]]; then - export KERNEL_SELFTESTS="/run/input/bpf" + if [[ -d "/run/input/usr/src/linux/tools/testing/selftests/bpf" ]]; then + export KERNEL_SELFTESTS="/run/input/usr/src/linux/tools/testing/selftests/bpf" fi if [[ -d "/run/input/lib/modules" ]]; then @@ -117,48 +119,21 @@ if [[ -z "${1:-}" ]]; then exit 1 fi -readonly input="$(mktemp -d)" -readonly tmp_dir="${TMPDIR:-/tmp}" - -fetch() { - echo Fetching "${1}" - pushd "${tmp_dir}" > /dev/null - curl --no-progress-meter -L -O --fail --etag-compare "${1}.etag" --etag-save "${1}.etag" "https://github.com/cilium/ci-kernels/raw/${BRANCH:-master}/${1}" - local ret=$? - popd > /dev/null - return $ret -} - -machine="$(uname -m)" -readonly machine +input="$(mktemp -d)" +readonly input if [[ -f "${1}" ]]; then + # First argument is a local file. readonly kernel="${1}" - cp "${1}" "${input}/bzImage" + cp "${1}" "${input}/boot/vmlinuz" else -# LINUX_VERSION_CODE test compares this to discovered value. - export KERNEL_VERSION="${1}" - - if [ "${machine}" = "x86_64" ]; then - readonly kernel="linux-${1}-amd64.tgz" - readonly selftests="linux-${1}-amd64-selftests-bpf.tgz" - elif [ "${machine}" = "aarch64" ]; then - readonly kernel="linux-${1}-arm64.tgz" - readonly selftests="" - else - echo "Arch ${machine} is not supported" - exit 1 - fi + readonly kernel="${1}" - fetch "${kernel}" - tar xf "${tmp_dir}/${kernel}" -C "${input}" + # LINUX_VERSION_CODE test compares this to discovered value. + export KERNEL_VERSION="${1}" - if [ -n "${selftests}" ] && fetch "${selftests}"; then - echo "Decompressing selftests" - mkdir "${input}/bpf" - tar --strip-components=5 -xf "${tmp_dir}/${selftests}" -C "${input}/bpf" - else - echo "No selftests found, disabling" + if ! extract_oci_image "ghcr.io/cilium/ci-kernels:${kernel}-selftests" "${input}"; then + extract_oci_image "ghcr.io/cilium/ci-kernels:${kernel}" "${input}" fi fi shift diff --git a/vendor/github.com/cilium/ebpf/syscalls.go b/vendor/github.com/cilium/ebpf/syscalls.go index cdf1fcf2ef5..4aef7faebc8 100644 --- a/vendor/github.com/cilium/ebpf/syscalls.go +++ b/vendor/github.com/cilium/ebpf/syscalls.go @@ -4,6 +4,7 @@ import ( "bytes" "errors" "fmt" + "math" "os" "runtime" @@ -302,3 +303,35 @@ var haveSyscallWrapper = internal.NewFeatureTest("syscall wrapper", "4.17", func return evt.Close() }) + +var haveProgramExtInfos = internal.NewFeatureTest("program ext_infos", "5.0", func() error { + insns := asm.Instructions{ + asm.Mov.Imm(asm.R0, 0), + asm.Return(), + } + + buf := bytes.NewBuffer(make([]byte, 0, insns.Size())) + if err := insns.Marshal(buf, internal.NativeEndian); err != nil { + return err + } + bytecode := buf.Bytes() + + _, err := sys.ProgLoad(&sys.ProgLoadAttr{ + ProgType: sys.ProgType(SocketFilter), + License: sys.NewStringPointer("MIT"), + Insns: sys.NewSlicePointer(bytecode), + InsnCnt: uint32(len(bytecode) / asm.InstructionSize), + FuncInfoCnt: 1, + ProgBtfFd: math.MaxUint32, + }) + + if errors.Is(err, unix.EBADF) { + return nil + } + + if errors.Is(err, unix.E2BIG) { + return ErrNotSupported + } + + return err +}) diff --git a/vendor/github.com/cilium/ebpf/types.go b/vendor/github.com/cilium/ebpf/types.go index e9215519a2f..5146721c899 100644 --- a/vendor/github.com/cilium/ebpf/types.go +++ b/vendor/github.com/cilium/ebpf/types.go @@ -125,38 +125,39 @@ type ProgramType uint32 // eBPF program types const ( - UnspecifiedProgram ProgramType = iota - SocketFilter - Kprobe - SchedCLS - SchedACT - TracePoint - XDP - PerfEvent - CGroupSKB - CGroupSock - LWTIn - LWTOut - LWTXmit - SockOps - SkSKB - CGroupDevice - SkMsg - RawTracepoint - CGroupSockAddr - LWTSeg6Local - LircMode2 - SkReuseport - FlowDissector - CGroupSysctl - RawTracepointWritable - CGroupSockopt - Tracing - StructOps - Extension - LSM - SkLookup - Syscall + UnspecifiedProgram = ProgramType(sys.BPF_PROG_TYPE_UNSPEC) + SocketFilter = ProgramType(sys.BPF_PROG_TYPE_SOCKET_FILTER) + Kprobe = ProgramType(sys.BPF_PROG_TYPE_KPROBE) + SchedCLS = ProgramType(sys.BPF_PROG_TYPE_SCHED_CLS) + SchedACT = ProgramType(sys.BPF_PROG_TYPE_SCHED_ACT) + TracePoint = ProgramType(sys.BPF_PROG_TYPE_TRACEPOINT) + XDP = ProgramType(sys.BPF_PROG_TYPE_XDP) + PerfEvent = ProgramType(sys.BPF_PROG_TYPE_PERF_EVENT) + CGroupSKB = ProgramType(sys.BPF_PROG_TYPE_CGROUP_SKB) + CGroupSock = ProgramType(sys.BPF_PROG_TYPE_CGROUP_SOCK) + LWTIn = ProgramType(sys.BPF_PROG_TYPE_LWT_IN) + LWTOut = ProgramType(sys.BPF_PROG_TYPE_LWT_OUT) + LWTXmit = ProgramType(sys.BPF_PROG_TYPE_LWT_XMIT) + SockOps = ProgramType(sys.BPF_PROG_TYPE_SOCK_OPS) + SkSKB = ProgramType(sys.BPF_PROG_TYPE_SK_SKB) + CGroupDevice = ProgramType(sys.BPF_PROG_TYPE_CGROUP_DEVICE) + SkMsg = ProgramType(sys.BPF_PROG_TYPE_SK_MSG) + RawTracepoint = ProgramType(sys.BPF_PROG_TYPE_RAW_TRACEPOINT) + CGroupSockAddr = ProgramType(sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR) + LWTSeg6Local = ProgramType(sys.BPF_PROG_TYPE_LWT_SEG6LOCAL) + LircMode2 = ProgramType(sys.BPF_PROG_TYPE_LIRC_MODE2) + SkReuseport = ProgramType(sys.BPF_PROG_TYPE_SK_REUSEPORT) + FlowDissector = ProgramType(sys.BPF_PROG_TYPE_FLOW_DISSECTOR) + CGroupSysctl = ProgramType(sys.BPF_PROG_TYPE_CGROUP_SYSCTL) + RawTracepointWritable = ProgramType(sys.BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE) + CGroupSockopt = ProgramType(sys.BPF_PROG_TYPE_CGROUP_SOCKOPT) + Tracing = ProgramType(sys.BPF_PROG_TYPE_TRACING) + StructOps = ProgramType(sys.BPF_PROG_TYPE_STRUCT_OPS) + Extension = ProgramType(sys.BPF_PROG_TYPE_EXT) + LSM = ProgramType(sys.BPF_PROG_TYPE_LSM) + SkLookup = ProgramType(sys.BPF_PROG_TYPE_SK_LOOKUP) + Syscall = ProgramType(sys.BPF_PROG_TYPE_SYSCALL) + Netfilter = ProgramType(sys.BPF_PROG_TYPE_NETFILTER) ) // AttachType of the eBPF program, needed to differentiate allowed context accesses in @@ -170,49 +171,55 @@ type AttachType uint32 const AttachNone AttachType = 0 const ( - AttachCGroupInetIngress AttachType = iota - AttachCGroupInetEgress - AttachCGroupInetSockCreate - AttachCGroupSockOps - AttachSkSKBStreamParser - AttachSkSKBStreamVerdict - AttachCGroupDevice - AttachSkMsgVerdict - AttachCGroupInet4Bind - AttachCGroupInet6Bind - AttachCGroupInet4Connect - AttachCGroupInet6Connect - AttachCGroupInet4PostBind - AttachCGroupInet6PostBind - AttachCGroupUDP4Sendmsg - AttachCGroupUDP6Sendmsg - AttachLircMode2 - AttachFlowDissector - AttachCGroupSysctl - AttachCGroupUDP4Recvmsg - AttachCGroupUDP6Recvmsg - AttachCGroupGetsockopt - AttachCGroupSetsockopt - AttachTraceRawTp - AttachTraceFEntry - AttachTraceFExit - AttachModifyReturn - AttachLSMMac - AttachTraceIter - AttachCgroupInet4GetPeername - AttachCgroupInet6GetPeername - AttachCgroupInet4GetSockname - AttachCgroupInet6GetSockname - AttachXDPDevMap - AttachCgroupInetSockRelease - AttachXDPCPUMap - AttachSkLookup - AttachXDP - AttachSkSKBVerdict - AttachSkReuseportSelect - AttachSkReuseportSelectOrMigrate - AttachPerfEvent - AttachTraceKprobeMulti + AttachCGroupInetIngress = AttachType(sys.BPF_CGROUP_INET_INGRESS) + AttachCGroupInetEgress = AttachType(sys.BPF_CGROUP_INET_EGRESS) + AttachCGroupInetSockCreate = AttachType(sys.BPF_CGROUP_INET_SOCK_CREATE) + AttachCGroupSockOps = AttachType(sys.BPF_CGROUP_SOCK_OPS) + AttachSkSKBStreamParser = AttachType(sys.BPF_SK_SKB_STREAM_PARSER) + AttachSkSKBStreamVerdict = AttachType(sys.BPF_SK_SKB_STREAM_VERDICT) + AttachCGroupDevice = AttachType(sys.BPF_CGROUP_DEVICE) + AttachSkMsgVerdict = AttachType(sys.BPF_SK_MSG_VERDICT) + AttachCGroupInet4Bind = AttachType(sys.BPF_CGROUP_INET4_BIND) + AttachCGroupInet6Bind = AttachType(sys.BPF_CGROUP_INET6_BIND) + AttachCGroupInet4Connect = AttachType(sys.BPF_CGROUP_INET4_CONNECT) + AttachCGroupInet6Connect = AttachType(sys.BPF_CGROUP_INET6_CONNECT) + AttachCGroupInet4PostBind = AttachType(sys.BPF_CGROUP_INET4_POST_BIND) + AttachCGroupInet6PostBind = AttachType(sys.BPF_CGROUP_INET6_POST_BIND) + AttachCGroupUDP4Sendmsg = AttachType(sys.BPF_CGROUP_UDP4_SENDMSG) + AttachCGroupUDP6Sendmsg = AttachType(sys.BPF_CGROUP_UDP6_SENDMSG) + AttachLircMode2 = AttachType(sys.BPF_LIRC_MODE2) + AttachFlowDissector = AttachType(sys.BPF_FLOW_DISSECTOR) + AttachCGroupSysctl = AttachType(sys.BPF_CGROUP_SYSCTL) + AttachCGroupUDP4Recvmsg = AttachType(sys.BPF_CGROUP_UDP4_RECVMSG) + AttachCGroupUDP6Recvmsg = AttachType(sys.BPF_CGROUP_UDP6_RECVMSG) + AttachCGroupGetsockopt = AttachType(sys.BPF_CGROUP_GETSOCKOPT) + AttachCGroupSetsockopt = AttachType(sys.BPF_CGROUP_SETSOCKOPT) + AttachTraceRawTp = AttachType(sys.BPF_TRACE_RAW_TP) + AttachTraceFEntry = AttachType(sys.BPF_TRACE_FENTRY) + AttachTraceFExit = AttachType(sys.BPF_TRACE_FEXIT) + AttachModifyReturn = AttachType(sys.BPF_MODIFY_RETURN) + AttachLSMMac = AttachType(sys.BPF_LSM_MAC) + AttachTraceIter = AttachType(sys.BPF_TRACE_ITER) + AttachCgroupInet4GetPeername = AttachType(sys.BPF_CGROUP_INET4_GETPEERNAME) + AttachCgroupInet6GetPeername = AttachType(sys.BPF_CGROUP_INET6_GETPEERNAME) + AttachCgroupInet4GetSockname = AttachType(sys.BPF_CGROUP_INET4_GETSOCKNAME) + AttachCgroupInet6GetSockname = AttachType(sys.BPF_CGROUP_INET6_GETSOCKNAME) + AttachXDPDevMap = AttachType(sys.BPF_XDP_DEVMAP) + AttachCgroupInetSockRelease = AttachType(sys.BPF_CGROUP_INET_SOCK_RELEASE) + AttachXDPCPUMap = AttachType(sys.BPF_XDP_CPUMAP) + AttachSkLookup = AttachType(sys.BPF_SK_LOOKUP) + AttachXDP = AttachType(sys.BPF_XDP) + AttachSkSKBVerdict = AttachType(sys.BPF_SK_SKB_VERDICT) + AttachSkReuseportSelect = AttachType(sys.BPF_SK_REUSEPORT_SELECT) + AttachSkReuseportSelectOrMigrate = AttachType(sys.BPF_SK_REUSEPORT_SELECT_OR_MIGRATE) + AttachPerfEvent = AttachType(sys.BPF_PERF_EVENT) + AttachTraceKprobeMulti = AttachType(sys.BPF_TRACE_KPROBE_MULTI) + AttachLSMCgroup = AttachType(sys.BPF_LSM_CGROUP) + AttachStructOps = AttachType(sys.BPF_STRUCT_OPS) + AttachNetfilter = AttachType(sys.BPF_NETFILTER) + AttachTCXIngress = AttachType(sys.BPF_TCX_INGRESS) + AttachTCXEgress = AttachType(sys.BPF_TCX_EGRESS) + AttachTraceUprobeMulti = AttachType(sys.BPF_TRACE_UPROBE_MULTI) ) // AttachFlags of the eBPF program used in BPF_PROG_ATTACH command diff --git a/vendor/github.com/cilium/ebpf/types_string.go b/vendor/github.com/cilium/ebpf/types_string.go index e20c37aa4e4..ee60b5be5b6 100644 --- a/vendor/github.com/cilium/ebpf/types_string.go +++ b/vendor/github.com/cilium/ebpf/types_string.go @@ -86,11 +86,12 @@ func _() { _ = x[LSM-29] _ = x[SkLookup-30] _ = x[Syscall-31] + _ = x[Netfilter-32] } -const _ProgramType_name = "UnspecifiedProgramSocketFilterKprobeSchedCLSSchedACTTracePointXDPPerfEventCGroupSKBCGroupSockLWTInLWTOutLWTXmitSockOpsSkSKBCGroupDeviceSkMsgRawTracepointCGroupSockAddrLWTSeg6LocalLircMode2SkReuseportFlowDissectorCGroupSysctlRawTracepointWritableCGroupSockoptTracingStructOpsExtensionLSMSkLookupSyscall" +const _ProgramType_name = "UnspecifiedProgramSocketFilterKprobeSchedCLSSchedACTTracePointXDPPerfEventCGroupSKBCGroupSockLWTInLWTOutLWTXmitSockOpsSkSKBCGroupDeviceSkMsgRawTracepointCGroupSockAddrLWTSeg6LocalLircMode2SkReuseportFlowDissectorCGroupSysctlRawTracepointWritableCGroupSockoptTracingStructOpsExtensionLSMSkLookupSyscallNetfilter" -var _ProgramType_index = [...]uint16{0, 18, 30, 36, 44, 52, 62, 65, 74, 83, 93, 98, 104, 111, 118, 123, 135, 140, 153, 167, 179, 188, 199, 212, 224, 245, 258, 265, 274, 283, 286, 294, 301} +var _ProgramType_index = [...]uint16{0, 18, 30, 36, 44, 52, 62, 65, 74, 83, 93, 98, 104, 111, 118, 123, 135, 140, 153, 167, 179, 188, 199, 212, 224, 245, 258, 265, 274, 283, 286, 294, 301, 310} func (i ProgramType) String() string { if i >= ProgramType(len(_ProgramType_index)-1) { diff --git a/vendor/golang.org/x/exp/maps/maps.go b/vendor/golang.org/x/exp/maps/maps.go deleted file mode 100644 index ecc0dabb74d..00000000000 --- a/vendor/golang.org/x/exp/maps/maps.go +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package maps defines various functions useful with maps of any type. -package maps - -// Keys returns the keys of the map m. -// The keys will be in an indeterminate order. -func Keys[M ~map[K]V, K comparable, V any](m M) []K { - r := make([]K, 0, len(m)) - for k := range m { - r = append(r, k) - } - return r -} - -// Values returns the values of the map m. -// The values will be in an indeterminate order. -func Values[M ~map[K]V, K comparable, V any](m M) []V { - r := make([]V, 0, len(m)) - for _, v := range m { - r = append(r, v) - } - return r -} - -// Equal reports whether two maps contain the same key/value pairs. -// Values are compared using ==. -func Equal[M1, M2 ~map[K]V, K, V comparable](m1 M1, m2 M2) bool { - if len(m1) != len(m2) { - return false - } - for k, v1 := range m1 { - if v2, ok := m2[k]; !ok || v1 != v2 { - return false - } - } - return true -} - -// EqualFunc is like Equal, but compares values using eq. -// Keys are still compared with ==. -func EqualFunc[M1 ~map[K]V1, M2 ~map[K]V2, K comparable, V1, V2 any](m1 M1, m2 M2, eq func(V1, V2) bool) bool { - if len(m1) != len(m2) { - return false - } - for k, v1 := range m1 { - if v2, ok := m2[k]; !ok || !eq(v1, v2) { - return false - } - } - return true -} - -// Clear removes all entries from m, leaving it empty. -func Clear[M ~map[K]V, K comparable, V any](m M) { - for k := range m { - delete(m, k) - } -} - -// Clone returns a copy of m. This is a shallow clone: -// the new keys and values are set using ordinary assignment. -func Clone[M ~map[K]V, K comparable, V any](m M) M { - // Preserve nil in case it matters. - if m == nil { - return nil - } - r := make(M, len(m)) - for k, v := range m { - r[k] = v - } - return r -} - -// Copy copies all key/value pairs in src adding them to dst. -// When a key in src is already present in dst, -// the value in dst will be overwritten by the value associated -// with the key in src. -func Copy[M1 ~map[K]V, M2 ~map[K]V, K comparable, V any](dst M1, src M2) { - for k, v := range src { - dst[k] = v - } -} - -// DeleteFunc deletes any key/value pairs from m for which del returns true. -func DeleteFunc[M ~map[K]V, K comparable, V any](m M, del func(K, V) bool) { - for k, v := range m { - if del(k, v) { - delete(m, k) - } - } -} diff --git a/vendor/golang.org/x/exp/slices/slices.go b/vendor/golang.org/x/exp/slices/slices.go deleted file mode 100644 index cff0cd49ecf..00000000000 --- a/vendor/golang.org/x/exp/slices/slices.go +++ /dev/null @@ -1,258 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package slices defines various functions useful with slices of any type. -// Unless otherwise specified, these functions all apply to the elements -// of a slice at index 0 <= i < len(s). -// -// Note that the less function in IsSortedFunc, SortFunc, SortStableFunc requires a -// strict weak ordering (https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings), -// or the sorting may fail to sort correctly. A common case is when sorting slices of -// floating-point numbers containing NaN values. -package slices - -import "golang.org/x/exp/constraints" - -// Equal reports whether two slices are equal: the same length and all -// elements equal. If the lengths are different, Equal returns false. -// Otherwise, the elements are compared in increasing index order, and the -// comparison stops at the first unequal pair. -// Floating point NaNs are not considered equal. -func Equal[E comparable](s1, s2 []E) bool { - if len(s1) != len(s2) { - return false - } - for i := range s1 { - if s1[i] != s2[i] { - return false - } - } - return true -} - -// EqualFunc reports whether two slices are equal using a comparison -// function on each pair of elements. If the lengths are different, -// EqualFunc returns false. Otherwise, the elements are compared in -// increasing index order, and the comparison stops at the first index -// for which eq returns false. -func EqualFunc[E1, E2 any](s1 []E1, s2 []E2, eq func(E1, E2) bool) bool { - if len(s1) != len(s2) { - return false - } - for i, v1 := range s1 { - v2 := s2[i] - if !eq(v1, v2) { - return false - } - } - return true -} - -// Compare compares the elements of s1 and s2. -// The elements are compared sequentially, starting at index 0, -// until one element is not equal to the other. -// The result of comparing the first non-matching elements is returned. -// If both slices are equal until one of them ends, the shorter slice is -// considered less than the longer one. -// The result is 0 if s1 == s2, -1 if s1 < s2, and +1 if s1 > s2. -// Comparisons involving floating point NaNs are ignored. -func Compare[E constraints.Ordered](s1, s2 []E) int { - s2len := len(s2) - for i, v1 := range s1 { - if i >= s2len { - return +1 - } - v2 := s2[i] - switch { - case v1 < v2: - return -1 - case v1 > v2: - return +1 - } - } - if len(s1) < s2len { - return -1 - } - return 0 -} - -// CompareFunc is like Compare but uses a comparison function -// on each pair of elements. The elements are compared in increasing -// index order, and the comparisons stop after the first time cmp -// returns non-zero. -// The result is the first non-zero result of cmp; if cmp always -// returns 0 the result is 0 if len(s1) == len(s2), -1 if len(s1) < len(s2), -// and +1 if len(s1) > len(s2). -func CompareFunc[E1, E2 any](s1 []E1, s2 []E2, cmp func(E1, E2) int) int { - s2len := len(s2) - for i, v1 := range s1 { - if i >= s2len { - return +1 - } - v2 := s2[i] - if c := cmp(v1, v2); c != 0 { - return c - } - } - if len(s1) < s2len { - return -1 - } - return 0 -} - -// Index returns the index of the first occurrence of v in s, -// or -1 if not present. -func Index[E comparable](s []E, v E) int { - for i, vs := range s { - if v == vs { - return i - } - } - return -1 -} - -// IndexFunc returns the first index i satisfying f(s[i]), -// or -1 if none do. -func IndexFunc[E any](s []E, f func(E) bool) int { - for i, v := range s { - if f(v) { - return i - } - } - return -1 -} - -// Contains reports whether v is present in s. -func Contains[E comparable](s []E, v E) bool { - return Index(s, v) >= 0 -} - -// ContainsFunc reports whether at least one -// element e of s satisfies f(e). -func ContainsFunc[E any](s []E, f func(E) bool) bool { - return IndexFunc(s, f) >= 0 -} - -// Insert inserts the values v... into s at index i, -// returning the modified slice. -// In the returned slice r, r[i] == v[0]. -// Insert panics if i is out of range. -// This function is O(len(s) + len(v)). -func Insert[S ~[]E, E any](s S, i int, v ...E) S { - tot := len(s) + len(v) - if tot <= cap(s) { - s2 := s[:tot] - copy(s2[i+len(v):], s[i:]) - copy(s2[i:], v) - return s2 - } - s2 := make(S, tot) - copy(s2, s[:i]) - copy(s2[i:], v) - copy(s2[i+len(v):], s[i:]) - return s2 -} - -// Delete removes the elements s[i:j] from s, returning the modified slice. -// Delete panics if s[i:j] is not a valid slice of s. -// Delete modifies the contents of the slice s; it does not create a new slice. -// Delete is O(len(s)-j), so if many items must be deleted, it is better to -// make a single call deleting them all together than to delete one at a time. -// Delete might not modify the elements s[len(s)-(j-i):len(s)]. If those -// elements contain pointers you might consider zeroing those elements so that -// objects they reference can be garbage collected. -func Delete[S ~[]E, E any](s S, i, j int) S { - _ = s[i:j] // bounds check - - return append(s[:i], s[j:]...) -} - -// Replace replaces the elements s[i:j] by the given v, and returns the -// modified slice. Replace panics if s[i:j] is not a valid slice of s. -func Replace[S ~[]E, E any](s S, i, j int, v ...E) S { - _ = s[i:j] // verify that i:j is a valid subslice - tot := len(s[:i]) + len(v) + len(s[j:]) - if tot <= cap(s) { - s2 := s[:tot] - copy(s2[i+len(v):], s[j:]) - copy(s2[i:], v) - return s2 - } - s2 := make(S, tot) - copy(s2, s[:i]) - copy(s2[i:], v) - copy(s2[i+len(v):], s[j:]) - return s2 -} - -// Clone returns a copy of the slice. -// The elements are copied using assignment, so this is a shallow clone. -func Clone[S ~[]E, E any](s S) S { - // Preserve nil in case it matters. - if s == nil { - return nil - } - return append(S([]E{}), s...) -} - -// Compact replaces consecutive runs of equal elements with a single copy. -// This is like the uniq command found on Unix. -// Compact modifies the contents of the slice s; it does not create a new slice. -// When Compact discards m elements in total, it might not modify the elements -// s[len(s)-m:len(s)]. If those elements contain pointers you might consider -// zeroing those elements so that objects they reference can be garbage collected. -func Compact[S ~[]E, E comparable](s S) S { - if len(s) < 2 { - return s - } - i := 1 - last := s[0] - for _, v := range s[1:] { - if v != last { - s[i] = v - i++ - last = v - } - } - return s[:i] -} - -// CompactFunc is like Compact but uses a comparison function. -func CompactFunc[S ~[]E, E any](s S, eq func(E, E) bool) S { - if len(s) < 2 { - return s - } - i := 1 - last := s[0] - for _, v := range s[1:] { - if !eq(v, last) { - s[i] = v - i++ - last = v - } - } - return s[:i] -} - -// Grow increases the slice's capacity, if necessary, to guarantee space for -// another n elements. After Grow(n), at least n elements can be appended -// to the slice without another allocation. If n is negative or too large to -// allocate the memory, Grow panics. -func Grow[S ~[]E, E any](s S, n int) S { - if n < 0 { - panic("cannot be negative") - } - if n -= cap(s) - len(s); n > 0 { - // TODO(https://go.dev/issue/53888): Make using []E instead of S - // to workaround a compiler bug where the runtime.growslice optimization - // does not take effect. Revert when the compiler is fixed. - s = append([]E(s)[:cap(s)], make([]E, n)...)[:len(s)] - } - return s -} - -// Clip removes unused capacity from the slice, returning s[:len(s):len(s)]. -func Clip[S ~[]E, E any](s S) S { - return s[:len(s):len(s)] -} diff --git a/vendor/golang.org/x/exp/slices/sort.go b/vendor/golang.org/x/exp/slices/sort.go deleted file mode 100644 index f14f40da712..00000000000 --- a/vendor/golang.org/x/exp/slices/sort.go +++ /dev/null @@ -1,126 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package slices - -import ( - "math/bits" - - "golang.org/x/exp/constraints" -) - -// Sort sorts a slice of any ordered type in ascending order. -// Sort may fail to sort correctly when sorting slices of floating-point -// numbers containing Not-a-number (NaN) values. -// Use slices.SortFunc(x, func(a, b float64) bool {return a < b || (math.IsNaN(a) && !math.IsNaN(b))}) -// instead if the input may contain NaNs. -func Sort[E constraints.Ordered](x []E) { - n := len(x) - pdqsortOrdered(x, 0, n, bits.Len(uint(n))) -} - -// SortFunc sorts the slice x in ascending order as determined by the less function. -// This sort is not guaranteed to be stable. -// -// SortFunc requires that less is a strict weak ordering. -// See https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings. -func SortFunc[E any](x []E, less func(a, b E) bool) { - n := len(x) - pdqsortLessFunc(x, 0, n, bits.Len(uint(n)), less) -} - -// SortStableFunc sorts the slice x while keeping the original order of equal -// elements, using less to compare elements. -func SortStableFunc[E any](x []E, less func(a, b E) bool) { - stableLessFunc(x, len(x), less) -} - -// IsSorted reports whether x is sorted in ascending order. -func IsSorted[E constraints.Ordered](x []E) bool { - for i := len(x) - 1; i > 0; i-- { - if x[i] < x[i-1] { - return false - } - } - return true -} - -// IsSortedFunc reports whether x is sorted in ascending order, with less as the -// comparison function. -func IsSortedFunc[E any](x []E, less func(a, b E) bool) bool { - for i := len(x) - 1; i > 0; i-- { - if less(x[i], x[i-1]) { - return false - } - } - return true -} - -// BinarySearch searches for target in a sorted slice and returns the position -// where target is found, or the position where target would appear in the -// sort order; it also returns a bool saying whether the target is really found -// in the slice. The slice must be sorted in increasing order. -func BinarySearch[E constraints.Ordered](x []E, target E) (int, bool) { - // Inlining is faster than calling BinarySearchFunc with a lambda. - n := len(x) - // Define x[-1] < target and x[n] >= target. - // Invariant: x[i-1] < target, x[j] >= target. - i, j := 0, n - for i < j { - h := int(uint(i+j) >> 1) // avoid overflow when computing h - // i ≤ h < j - if x[h] < target { - i = h + 1 // preserves x[i-1] < target - } else { - j = h // preserves x[j] >= target - } - } - // i == j, x[i-1] < target, and x[j] (= x[i]) >= target => answer is i. - return i, i < n && x[i] == target -} - -// BinarySearchFunc works like BinarySearch, but uses a custom comparison -// function. The slice must be sorted in increasing order, where "increasing" is -// defined by cmp. cmp(a, b) is expected to return an integer comparing the two -// parameters: 0 if a == b, a negative number if a < b and a positive number if -// a > b. -func BinarySearchFunc[E, T any](x []E, target T, cmp func(E, T) int) (int, bool) { - n := len(x) - // Define cmp(x[-1], target) < 0 and cmp(x[n], target) >= 0 . - // Invariant: cmp(x[i - 1], target) < 0, cmp(x[j], target) >= 0. - i, j := 0, n - for i < j { - h := int(uint(i+j) >> 1) // avoid overflow when computing h - // i ≤ h < j - if cmp(x[h], target) < 0 { - i = h + 1 // preserves cmp(x[i - 1], target) < 0 - } else { - j = h // preserves cmp(x[j], target) >= 0 - } - } - // i == j, cmp(x[i-1], target) < 0, and cmp(x[j], target) (= cmp(x[i], target)) >= 0 => answer is i. - return i, i < n && cmp(x[i], target) == 0 -} - -type sortedHint int // hint for pdqsort when choosing the pivot - -const ( - unknownHint sortedHint = iota - increasingHint - decreasingHint -) - -// xorshift paper: https://www.jstatsoft.org/article/view/v008i14/xorshift.pdf -type xorshift uint64 - -func (r *xorshift) Next() uint64 { - *r ^= *r << 13 - *r ^= *r >> 17 - *r ^= *r << 5 - return uint64(*r) -} - -func nextPowerOfTwo(length int) uint { - return 1 << bits.Len(uint(length)) -} diff --git a/vendor/golang.org/x/exp/slices/zsortfunc.go b/vendor/golang.org/x/exp/slices/zsortfunc.go deleted file mode 100644 index 2a632476c50..00000000000 --- a/vendor/golang.org/x/exp/slices/zsortfunc.go +++ /dev/null @@ -1,479 +0,0 @@ -// Code generated by gen_sort_variants.go; DO NOT EDIT. - -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package slices - -// insertionSortLessFunc sorts data[a:b] using insertion sort. -func insertionSortLessFunc[E any](data []E, a, b int, less func(a, b E) bool) { - for i := a + 1; i < b; i++ { - for j := i; j > a && less(data[j], data[j-1]); j-- { - data[j], data[j-1] = data[j-1], data[j] - } - } -} - -// siftDownLessFunc implements the heap property on data[lo:hi]. -// first is an offset into the array where the root of the heap lies. -func siftDownLessFunc[E any](data []E, lo, hi, first int, less func(a, b E) bool) { - root := lo - for { - child := 2*root + 1 - if child >= hi { - break - } - if child+1 < hi && less(data[first+child], data[first+child+1]) { - child++ - } - if !less(data[first+root], data[first+child]) { - return - } - data[first+root], data[first+child] = data[first+child], data[first+root] - root = child - } -} - -func heapSortLessFunc[E any](data []E, a, b int, less func(a, b E) bool) { - first := a - lo := 0 - hi := b - a - - // Build heap with greatest element at top. - for i := (hi - 1) / 2; i >= 0; i-- { - siftDownLessFunc(data, i, hi, first, less) - } - - // Pop elements, largest first, into end of data. - for i := hi - 1; i >= 0; i-- { - data[first], data[first+i] = data[first+i], data[first] - siftDownLessFunc(data, lo, i, first, less) - } -} - -// pdqsortLessFunc sorts data[a:b]. -// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort. -// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf -// C++ implementation: https://github.com/orlp/pdqsort -// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/ -// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort. -func pdqsortLessFunc[E any](data []E, a, b, limit int, less func(a, b E) bool) { - const maxInsertion = 12 - - var ( - wasBalanced = true // whether the last partitioning was reasonably balanced - wasPartitioned = true // whether the slice was already partitioned - ) - - for { - length := b - a - - if length <= maxInsertion { - insertionSortLessFunc(data, a, b, less) - return - } - - // Fall back to heapsort if too many bad choices were made. - if limit == 0 { - heapSortLessFunc(data, a, b, less) - return - } - - // If the last partitioning was imbalanced, we need to breaking patterns. - if !wasBalanced { - breakPatternsLessFunc(data, a, b, less) - limit-- - } - - pivot, hint := choosePivotLessFunc(data, a, b, less) - if hint == decreasingHint { - reverseRangeLessFunc(data, a, b, less) - // The chosen pivot was pivot-a elements after the start of the array. - // After reversing it is pivot-a elements before the end of the array. - // The idea came from Rust's implementation. - pivot = (b - 1) - (pivot - a) - hint = increasingHint - } - - // The slice is likely already sorted. - if wasBalanced && wasPartitioned && hint == increasingHint { - if partialInsertionSortLessFunc(data, a, b, less) { - return - } - } - - // Probably the slice contains many duplicate elements, partition the slice into - // elements equal to and elements greater than the pivot. - if a > 0 && !less(data[a-1], data[pivot]) { - mid := partitionEqualLessFunc(data, a, b, pivot, less) - a = mid - continue - } - - mid, alreadyPartitioned := partitionLessFunc(data, a, b, pivot, less) - wasPartitioned = alreadyPartitioned - - leftLen, rightLen := mid-a, b-mid - balanceThreshold := length / 8 - if leftLen < rightLen { - wasBalanced = leftLen >= balanceThreshold - pdqsortLessFunc(data, a, mid, limit, less) - a = mid + 1 - } else { - wasBalanced = rightLen >= balanceThreshold - pdqsortLessFunc(data, mid+1, b, limit, less) - b = mid - } - } -} - -// partitionLessFunc does one quicksort partition. -// Let p = data[pivot] -// Moves elements in data[a:b] around, so that data[i]

=p for inewpivot. -// On return, data[newpivot] = p -func partitionLessFunc[E any](data []E, a, b, pivot int, less func(a, b E) bool) (newpivot int, alreadyPartitioned bool) { - data[a], data[pivot] = data[pivot], data[a] - i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned - - for i <= j && less(data[i], data[a]) { - i++ - } - for i <= j && !less(data[j], data[a]) { - j-- - } - if i > j { - data[j], data[a] = data[a], data[j] - return j, true - } - data[i], data[j] = data[j], data[i] - i++ - j-- - - for { - for i <= j && less(data[i], data[a]) { - i++ - } - for i <= j && !less(data[j], data[a]) { - j-- - } - if i > j { - break - } - data[i], data[j] = data[j], data[i] - i++ - j-- - } - data[j], data[a] = data[a], data[j] - return j, false -} - -// partitionEqualLessFunc partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot]. -// It assumed that data[a:b] does not contain elements smaller than the data[pivot]. -func partitionEqualLessFunc[E any](data []E, a, b, pivot int, less func(a, b E) bool) (newpivot int) { - data[a], data[pivot] = data[pivot], data[a] - i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned - - for { - for i <= j && !less(data[a], data[i]) { - i++ - } - for i <= j && less(data[a], data[j]) { - j-- - } - if i > j { - break - } - data[i], data[j] = data[j], data[i] - i++ - j-- - } - return i -} - -// partialInsertionSortLessFunc partially sorts a slice, returns true if the slice is sorted at the end. -func partialInsertionSortLessFunc[E any](data []E, a, b int, less func(a, b E) bool) bool { - const ( - maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted - shortestShifting = 50 // don't shift any elements on short arrays - ) - i := a + 1 - for j := 0; j < maxSteps; j++ { - for i < b && !less(data[i], data[i-1]) { - i++ - } - - if i == b { - return true - } - - if b-a < shortestShifting { - return false - } - - data[i], data[i-1] = data[i-1], data[i] - - // Shift the smaller one to the left. - if i-a >= 2 { - for j := i - 1; j >= 1; j-- { - if !less(data[j], data[j-1]) { - break - } - data[j], data[j-1] = data[j-1], data[j] - } - } - // Shift the greater one to the right. - if b-i >= 2 { - for j := i + 1; j < b; j++ { - if !less(data[j], data[j-1]) { - break - } - data[j], data[j-1] = data[j-1], data[j] - } - } - } - return false -} - -// breakPatternsLessFunc scatters some elements around in an attempt to break some patterns -// that might cause imbalanced partitions in quicksort. -func breakPatternsLessFunc[E any](data []E, a, b int, less func(a, b E) bool) { - length := b - a - if length >= 8 { - random := xorshift(length) - modulus := nextPowerOfTwo(length) - - for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ { - other := int(uint(random.Next()) & (modulus - 1)) - if other >= length { - other -= length - } - data[idx], data[a+other] = data[a+other], data[idx] - } - } -} - -// choosePivotLessFunc chooses a pivot in data[a:b]. -// -// [0,8): chooses a static pivot. -// [8,shortestNinther): uses the simple median-of-three method. -// [shortestNinther,∞): uses the Tukey ninther method. -func choosePivotLessFunc[E any](data []E, a, b int, less func(a, b E) bool) (pivot int, hint sortedHint) { - const ( - shortestNinther = 50 - maxSwaps = 4 * 3 - ) - - l := b - a - - var ( - swaps int - i = a + l/4*1 - j = a + l/4*2 - k = a + l/4*3 - ) - - if l >= 8 { - if l >= shortestNinther { - // Tukey ninther method, the idea came from Rust's implementation. - i = medianAdjacentLessFunc(data, i, &swaps, less) - j = medianAdjacentLessFunc(data, j, &swaps, less) - k = medianAdjacentLessFunc(data, k, &swaps, less) - } - // Find the median among i, j, k and stores it into j. - j = medianLessFunc(data, i, j, k, &swaps, less) - } - - switch swaps { - case 0: - return j, increasingHint - case maxSwaps: - return j, decreasingHint - default: - return j, unknownHint - } -} - -// order2LessFunc returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a. -func order2LessFunc[E any](data []E, a, b int, swaps *int, less func(a, b E) bool) (int, int) { - if less(data[b], data[a]) { - *swaps++ - return b, a - } - return a, b -} - -// medianLessFunc returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c. -func medianLessFunc[E any](data []E, a, b, c int, swaps *int, less func(a, b E) bool) int { - a, b = order2LessFunc(data, a, b, swaps, less) - b, c = order2LessFunc(data, b, c, swaps, less) - a, b = order2LessFunc(data, a, b, swaps, less) - return b -} - -// medianAdjacentLessFunc finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a. -func medianAdjacentLessFunc[E any](data []E, a int, swaps *int, less func(a, b E) bool) int { - return medianLessFunc(data, a-1, a, a+1, swaps, less) -} - -func reverseRangeLessFunc[E any](data []E, a, b int, less func(a, b E) bool) { - i := a - j := b - 1 - for i < j { - data[i], data[j] = data[j], data[i] - i++ - j-- - } -} - -func swapRangeLessFunc[E any](data []E, a, b, n int, less func(a, b E) bool) { - for i := 0; i < n; i++ { - data[a+i], data[b+i] = data[b+i], data[a+i] - } -} - -func stableLessFunc[E any](data []E, n int, less func(a, b E) bool) { - blockSize := 20 // must be > 0 - a, b := 0, blockSize - for b <= n { - insertionSortLessFunc(data, a, b, less) - a = b - b += blockSize - } - insertionSortLessFunc(data, a, n, less) - - for blockSize < n { - a, b = 0, 2*blockSize - for b <= n { - symMergeLessFunc(data, a, a+blockSize, b, less) - a = b - b += 2 * blockSize - } - if m := a + blockSize; m < n { - symMergeLessFunc(data, a, m, n, less) - } - blockSize *= 2 - } -} - -// symMergeLessFunc merges the two sorted subsequences data[a:m] and data[m:b] using -// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum -// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz -// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in -// Computer Science, pages 714-723. Springer, 2004. -// -// Let M = m-a and N = b-n. Wolog M < N. -// The recursion depth is bound by ceil(log(N+M)). -// The algorithm needs O(M*log(N/M + 1)) calls to data.Less. -// The algorithm needs O((M+N)*log(M)) calls to data.Swap. -// -// The paper gives O((M+N)*log(M)) as the number of assignments assuming a -// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation -// in the paper carries through for Swap operations, especially as the block -// swapping rotate uses only O(M+N) Swaps. -// -// symMerge assumes non-degenerate arguments: a < m && m < b. -// Having the caller check this condition eliminates many leaf recursion calls, -// which improves performance. -func symMergeLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) { - // Avoid unnecessary recursions of symMerge - // by direct insertion of data[a] into data[m:b] - // if data[a:m] only contains one element. - if m-a == 1 { - // Use binary search to find the lowest index i - // such that data[i] >= data[a] for m <= i < b. - // Exit the search loop with i == b in case no such index exists. - i := m - j := b - for i < j { - h := int(uint(i+j) >> 1) - if less(data[h], data[a]) { - i = h + 1 - } else { - j = h - } - } - // Swap values until data[a] reaches the position before i. - for k := a; k < i-1; k++ { - data[k], data[k+1] = data[k+1], data[k] - } - return - } - - // Avoid unnecessary recursions of symMerge - // by direct insertion of data[m] into data[a:m] - // if data[m:b] only contains one element. - if b-m == 1 { - // Use binary search to find the lowest index i - // such that data[i] > data[m] for a <= i < m. - // Exit the search loop with i == m in case no such index exists. - i := a - j := m - for i < j { - h := int(uint(i+j) >> 1) - if !less(data[m], data[h]) { - i = h + 1 - } else { - j = h - } - } - // Swap values until data[m] reaches the position i. - for k := m; k > i; k-- { - data[k], data[k-1] = data[k-1], data[k] - } - return - } - - mid := int(uint(a+b) >> 1) - n := mid + m - var start, r int - if m > mid { - start = n - b - r = mid - } else { - start = a - r = m - } - p := n - 1 - - for start < r { - c := int(uint(start+r) >> 1) - if !less(data[p-c], data[c]) { - start = c + 1 - } else { - r = c - } - } - - end := n - start - if start < m && m < end { - rotateLessFunc(data, start, m, end, less) - } - if a < start && start < mid { - symMergeLessFunc(data, a, start, mid, less) - } - if mid < end && end < b { - symMergeLessFunc(data, mid, end, b, less) - } -} - -// rotateLessFunc rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data: -// Data of the form 'x u v y' is changed to 'x v u y'. -// rotate performs at most b-a many calls to data.Swap, -// and it assumes non-degenerate arguments: a < m && m < b. -func rotateLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) { - i := m - a - j := b - m - - for i != j { - if i > j { - swapRangeLessFunc(data, m-i, m, j, less) - i -= j - } else { - swapRangeLessFunc(data, m-i, m+j-i, i, less) - j -= i - } - } - // i == j - swapRangeLessFunc(data, m-i, m, i, less) -} diff --git a/vendor/golang.org/x/exp/slices/zsortordered.go b/vendor/golang.org/x/exp/slices/zsortordered.go deleted file mode 100644 index efaa1c8b714..00000000000 --- a/vendor/golang.org/x/exp/slices/zsortordered.go +++ /dev/null @@ -1,481 +0,0 @@ -// Code generated by gen_sort_variants.go; DO NOT EDIT. - -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package slices - -import "golang.org/x/exp/constraints" - -// insertionSortOrdered sorts data[a:b] using insertion sort. -func insertionSortOrdered[E constraints.Ordered](data []E, a, b int) { - for i := a + 1; i < b; i++ { - for j := i; j > a && (data[j] < data[j-1]); j-- { - data[j], data[j-1] = data[j-1], data[j] - } - } -} - -// siftDownOrdered implements the heap property on data[lo:hi]. -// first is an offset into the array where the root of the heap lies. -func siftDownOrdered[E constraints.Ordered](data []E, lo, hi, first int) { - root := lo - for { - child := 2*root + 1 - if child >= hi { - break - } - if child+1 < hi && (data[first+child] < data[first+child+1]) { - child++ - } - if !(data[first+root] < data[first+child]) { - return - } - data[first+root], data[first+child] = data[first+child], data[first+root] - root = child - } -} - -func heapSortOrdered[E constraints.Ordered](data []E, a, b int) { - first := a - lo := 0 - hi := b - a - - // Build heap with greatest element at top. - for i := (hi - 1) / 2; i >= 0; i-- { - siftDownOrdered(data, i, hi, first) - } - - // Pop elements, largest first, into end of data. - for i := hi - 1; i >= 0; i-- { - data[first], data[first+i] = data[first+i], data[first] - siftDownOrdered(data, lo, i, first) - } -} - -// pdqsortOrdered sorts data[a:b]. -// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort. -// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf -// C++ implementation: https://github.com/orlp/pdqsort -// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/ -// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort. -func pdqsortOrdered[E constraints.Ordered](data []E, a, b, limit int) { - const maxInsertion = 12 - - var ( - wasBalanced = true // whether the last partitioning was reasonably balanced - wasPartitioned = true // whether the slice was already partitioned - ) - - for { - length := b - a - - if length <= maxInsertion { - insertionSortOrdered(data, a, b) - return - } - - // Fall back to heapsort if too many bad choices were made. - if limit == 0 { - heapSortOrdered(data, a, b) - return - } - - // If the last partitioning was imbalanced, we need to breaking patterns. - if !wasBalanced { - breakPatternsOrdered(data, a, b) - limit-- - } - - pivot, hint := choosePivotOrdered(data, a, b) - if hint == decreasingHint { - reverseRangeOrdered(data, a, b) - // The chosen pivot was pivot-a elements after the start of the array. - // After reversing it is pivot-a elements before the end of the array. - // The idea came from Rust's implementation. - pivot = (b - 1) - (pivot - a) - hint = increasingHint - } - - // The slice is likely already sorted. - if wasBalanced && wasPartitioned && hint == increasingHint { - if partialInsertionSortOrdered(data, a, b) { - return - } - } - - // Probably the slice contains many duplicate elements, partition the slice into - // elements equal to and elements greater than the pivot. - if a > 0 && !(data[a-1] < data[pivot]) { - mid := partitionEqualOrdered(data, a, b, pivot) - a = mid - continue - } - - mid, alreadyPartitioned := partitionOrdered(data, a, b, pivot) - wasPartitioned = alreadyPartitioned - - leftLen, rightLen := mid-a, b-mid - balanceThreshold := length / 8 - if leftLen < rightLen { - wasBalanced = leftLen >= balanceThreshold - pdqsortOrdered(data, a, mid, limit) - a = mid + 1 - } else { - wasBalanced = rightLen >= balanceThreshold - pdqsortOrdered(data, mid+1, b, limit) - b = mid - } - } -} - -// partitionOrdered does one quicksort partition. -// Let p = data[pivot] -// Moves elements in data[a:b] around, so that data[i]

=p for inewpivot. -// On return, data[newpivot] = p -func partitionOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivot int, alreadyPartitioned bool) { - data[a], data[pivot] = data[pivot], data[a] - i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned - - for i <= j && (data[i] < data[a]) { - i++ - } - for i <= j && !(data[j] < data[a]) { - j-- - } - if i > j { - data[j], data[a] = data[a], data[j] - return j, true - } - data[i], data[j] = data[j], data[i] - i++ - j-- - - for { - for i <= j && (data[i] < data[a]) { - i++ - } - for i <= j && !(data[j] < data[a]) { - j-- - } - if i > j { - break - } - data[i], data[j] = data[j], data[i] - i++ - j-- - } - data[j], data[a] = data[a], data[j] - return j, false -} - -// partitionEqualOrdered partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot]. -// It assumed that data[a:b] does not contain elements smaller than the data[pivot]. -func partitionEqualOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivot int) { - data[a], data[pivot] = data[pivot], data[a] - i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned - - for { - for i <= j && !(data[a] < data[i]) { - i++ - } - for i <= j && (data[a] < data[j]) { - j-- - } - if i > j { - break - } - data[i], data[j] = data[j], data[i] - i++ - j-- - } - return i -} - -// partialInsertionSortOrdered partially sorts a slice, returns true if the slice is sorted at the end. -func partialInsertionSortOrdered[E constraints.Ordered](data []E, a, b int) bool { - const ( - maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted - shortestShifting = 50 // don't shift any elements on short arrays - ) - i := a + 1 - for j := 0; j < maxSteps; j++ { - for i < b && !(data[i] < data[i-1]) { - i++ - } - - if i == b { - return true - } - - if b-a < shortestShifting { - return false - } - - data[i], data[i-1] = data[i-1], data[i] - - // Shift the smaller one to the left. - if i-a >= 2 { - for j := i - 1; j >= 1; j-- { - if !(data[j] < data[j-1]) { - break - } - data[j], data[j-1] = data[j-1], data[j] - } - } - // Shift the greater one to the right. - if b-i >= 2 { - for j := i + 1; j < b; j++ { - if !(data[j] < data[j-1]) { - break - } - data[j], data[j-1] = data[j-1], data[j] - } - } - } - return false -} - -// breakPatternsOrdered scatters some elements around in an attempt to break some patterns -// that might cause imbalanced partitions in quicksort. -func breakPatternsOrdered[E constraints.Ordered](data []E, a, b int) { - length := b - a - if length >= 8 { - random := xorshift(length) - modulus := nextPowerOfTwo(length) - - for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ { - other := int(uint(random.Next()) & (modulus - 1)) - if other >= length { - other -= length - } - data[idx], data[a+other] = data[a+other], data[idx] - } - } -} - -// choosePivotOrdered chooses a pivot in data[a:b]. -// -// [0,8): chooses a static pivot. -// [8,shortestNinther): uses the simple median-of-three method. -// [shortestNinther,∞): uses the Tukey ninther method. -func choosePivotOrdered[E constraints.Ordered](data []E, a, b int) (pivot int, hint sortedHint) { - const ( - shortestNinther = 50 - maxSwaps = 4 * 3 - ) - - l := b - a - - var ( - swaps int - i = a + l/4*1 - j = a + l/4*2 - k = a + l/4*3 - ) - - if l >= 8 { - if l >= shortestNinther { - // Tukey ninther method, the idea came from Rust's implementation. - i = medianAdjacentOrdered(data, i, &swaps) - j = medianAdjacentOrdered(data, j, &swaps) - k = medianAdjacentOrdered(data, k, &swaps) - } - // Find the median among i, j, k and stores it into j. - j = medianOrdered(data, i, j, k, &swaps) - } - - switch swaps { - case 0: - return j, increasingHint - case maxSwaps: - return j, decreasingHint - default: - return j, unknownHint - } -} - -// order2Ordered returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a. -func order2Ordered[E constraints.Ordered](data []E, a, b int, swaps *int) (int, int) { - if data[b] < data[a] { - *swaps++ - return b, a - } - return a, b -} - -// medianOrdered returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c. -func medianOrdered[E constraints.Ordered](data []E, a, b, c int, swaps *int) int { - a, b = order2Ordered(data, a, b, swaps) - b, c = order2Ordered(data, b, c, swaps) - a, b = order2Ordered(data, a, b, swaps) - return b -} - -// medianAdjacentOrdered finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a. -func medianAdjacentOrdered[E constraints.Ordered](data []E, a int, swaps *int) int { - return medianOrdered(data, a-1, a, a+1, swaps) -} - -func reverseRangeOrdered[E constraints.Ordered](data []E, a, b int) { - i := a - j := b - 1 - for i < j { - data[i], data[j] = data[j], data[i] - i++ - j-- - } -} - -func swapRangeOrdered[E constraints.Ordered](data []E, a, b, n int) { - for i := 0; i < n; i++ { - data[a+i], data[b+i] = data[b+i], data[a+i] - } -} - -func stableOrdered[E constraints.Ordered](data []E, n int) { - blockSize := 20 // must be > 0 - a, b := 0, blockSize - for b <= n { - insertionSortOrdered(data, a, b) - a = b - b += blockSize - } - insertionSortOrdered(data, a, n) - - for blockSize < n { - a, b = 0, 2*blockSize - for b <= n { - symMergeOrdered(data, a, a+blockSize, b) - a = b - b += 2 * blockSize - } - if m := a + blockSize; m < n { - symMergeOrdered(data, a, m, n) - } - blockSize *= 2 - } -} - -// symMergeOrdered merges the two sorted subsequences data[a:m] and data[m:b] using -// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum -// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz -// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in -// Computer Science, pages 714-723. Springer, 2004. -// -// Let M = m-a and N = b-n. Wolog M < N. -// The recursion depth is bound by ceil(log(N+M)). -// The algorithm needs O(M*log(N/M + 1)) calls to data.Less. -// The algorithm needs O((M+N)*log(M)) calls to data.Swap. -// -// The paper gives O((M+N)*log(M)) as the number of assignments assuming a -// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation -// in the paper carries through for Swap operations, especially as the block -// swapping rotate uses only O(M+N) Swaps. -// -// symMerge assumes non-degenerate arguments: a < m && m < b. -// Having the caller check this condition eliminates many leaf recursion calls, -// which improves performance. -func symMergeOrdered[E constraints.Ordered](data []E, a, m, b int) { - // Avoid unnecessary recursions of symMerge - // by direct insertion of data[a] into data[m:b] - // if data[a:m] only contains one element. - if m-a == 1 { - // Use binary search to find the lowest index i - // such that data[i] >= data[a] for m <= i < b. - // Exit the search loop with i == b in case no such index exists. - i := m - j := b - for i < j { - h := int(uint(i+j) >> 1) - if data[h] < data[a] { - i = h + 1 - } else { - j = h - } - } - // Swap values until data[a] reaches the position before i. - for k := a; k < i-1; k++ { - data[k], data[k+1] = data[k+1], data[k] - } - return - } - - // Avoid unnecessary recursions of symMerge - // by direct insertion of data[m] into data[a:m] - // if data[m:b] only contains one element. - if b-m == 1 { - // Use binary search to find the lowest index i - // such that data[i] > data[m] for a <= i < m. - // Exit the search loop with i == m in case no such index exists. - i := a - j := m - for i < j { - h := int(uint(i+j) >> 1) - if !(data[m] < data[h]) { - i = h + 1 - } else { - j = h - } - } - // Swap values until data[m] reaches the position i. - for k := m; k > i; k-- { - data[k], data[k-1] = data[k-1], data[k] - } - return - } - - mid := int(uint(a+b) >> 1) - n := mid + m - var start, r int - if m > mid { - start = n - b - r = mid - } else { - start = a - r = m - } - p := n - 1 - - for start < r { - c := int(uint(start+r) >> 1) - if !(data[p-c] < data[c]) { - start = c + 1 - } else { - r = c - } - } - - end := n - start - if start < m && m < end { - rotateOrdered(data, start, m, end) - } - if a < start && start < mid { - symMergeOrdered(data, a, start, mid) - } - if mid < end && end < b { - symMergeOrdered(data, mid, end, b) - } -} - -// rotateOrdered rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data: -// Data of the form 'x u v y' is changed to 'x v u y'. -// rotate performs at most b-a many calls to data.Swap, -// and it assumes non-degenerate arguments: a < m && m < b. -func rotateOrdered[E constraints.Ordered](data []E, a, m, b int) { - i := m - a - j := b - m - - for i != j { - if i > j { - swapRangeOrdered(data, m-i, m, j) - i -= j - } else { - swapRangeOrdered(data, m-i, m+j-i, i) - j -= i - } - } - // i == j - swapRangeOrdered(data, m-i, m, i) -} diff --git a/vendor/modules.txt b/vendor/modules.txt index 2f4d71a3a86..d1a139ed0ef 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -2,8 +2,8 @@ ## explicit; go 1.16 github.com/checkpoint-restore/go-criu/v6 github.com/checkpoint-restore/go-criu/v6/rpc -# github.com/cilium/ebpf v0.12.3 -## explicit; go 1.20 +# github.com/cilium/ebpf v0.13.2 +## explicit; go 1.21.0 github.com/cilium/ebpf github.com/cilium/ebpf/asm github.com/cilium/ebpf/btf @@ -77,8 +77,6 @@ github.com/vishvananda/netns # golang.org/x/exp v0.0.0-20230224173230-c95f2b4c22f2 ## explicit; go 1.18 golang.org/x/exp/constraints -golang.org/x/exp/maps -golang.org/x/exp/slices # golang.org/x/net v0.22.0 ## explicit; go 1.18 golang.org/x/net/bpf