diff --git a/Makefile b/Makefile
index 349a846fe34556544bd9cb56a222ccb0ee62da12..f34d997e2457114f48a9ef4066498d514bb811ef 100644
--- a/Makefile
+++ b/Makefile
@@ -35,7 +35,7 @@ OCI_BIN_PATH := $(shell which docker 2>/dev/null || which podman)
 OCI_BIN ?= $(shell basename ${OCI_BIN_PATH})
 
 LOCAL_GENERATOR_IMAGE ?= ebpf-generator:latest
-CILIUM_EBPF_VERSION := v0.12.3
+CILIUM_EBPF_VERSION := v0.12.4-0.20240124115601-f95957d1669c
 GOLANGCI_LINT_VERSION = v1.54.2
 CLANG ?= clang
 CFLAGS := -O2 -g -Wall -Werror $(CFLAGS)
@@ -119,7 +119,6 @@ generate: prereqs ## Generate artifacts of the code repo (pkg/ebpf and pkg/proto
 	go generate ./pkg/...
 	@echo "### Generating gRPC and Protocol Buffers code"
 	protoc --go_out=pkg --go-grpc_out=pkg proto/flow.proto
-	@mv ./pkg/ebpf/bpf_bpfeb_s390.go ./pkg/ebpf/bpf_bpfeb_s390x.go
 
 .PHONY: docker-generate
 docker-generate: ## Create the container that generates the eBPF binaries
diff --git a/go.mod b/go.mod
index 354031122a4eafedc3bd2d2e365e70e3b5409f19..02fe23c47ffc7d92767ef8e46cd87c679d3d2c0e 100644
--- a/go.mod
+++ b/go.mod
@@ -4,7 +4,7 @@ go 1.20
 
 require (
 	github.com/caarlos0/env/v6 v6.10.1
-	github.com/cilium/ebpf v0.12.3
+	github.com/cilium/ebpf v0.12.4-0.20240124115601-f95957d1669c
 	github.com/fsnotify/fsnotify v1.7.0
 	github.com/gavv/monotime v0.0.0-20190418164738-30dba4353424
 	github.com/google/gopacket v1.1.19
diff --git a/go.sum b/go.sum
index be202a8b19e0cf27686d8ce08ba354f4bde58528..a816da8d562c5288683482a40e56577051ec3806 100644
--- a/go.sum
+++ b/go.sum
@@ -127,8 +127,8 @@ github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XL
 github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
 github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
 github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
-github.com/cilium/ebpf v0.12.3 h1:8ht6F9MquybnY97at+VDZb3eQQr8ev79RueWeVaEcG4=
-github.com/cilium/ebpf v0.12.3/go.mod h1:TctK1ivibvI3znr66ljgi4hqOT8EYQjz1KWBfb1UVgM=
+github.com/cilium/ebpf v0.12.4-0.20240124115601-f95957d1669c h1:Z4RObSsN7c3kvhpqwgnIjJ9Fm9lNyteorA6D4Gw4ikI=
+github.com/cilium/ebpf v0.12.4-0.20240124115601-f95957d1669c/go.mod h1:9BszLnmZR7oucpa/kBbVVf1ts3BoUSpltcnNp1hQkVw=
 github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
 github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
 github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE=
@@ -186,7 +186,6 @@ github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/
 github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
 github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4=
 github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20=
-github.com/frankban/quicktest v1.14.5 h1:dfYrrRyLtiqT9GyKXgdh+k4inNeTvmGbuSgZ3lx3GhA=
 github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
 github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
 github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
@@ -284,6 +283,7 @@ github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+
 github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA=
 github.com/go-openapi/validate v0.19.3/go.mod h1:90Vh6jjkTn+OT1Eefm0ZixWNFjhtOH7vS9k0lo6zwJo=
 github.com/go-openapi/validate v0.19.8/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4=
+github.com/go-quicktest/qt v1.101.0 h1:O1K29Txy5P2OK0dGo59b7b0LR6wKfIhttaAhHUyn7eI=
 github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
 github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
 github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
@@ -744,7 +744,7 @@ github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6L
 github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
 github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
 github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
-github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
+github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M=
 github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
 github.com/rs/xid v1.5.0 h1:mKX4bl4iPYJtEIxp6CYiUuLQ/8DYMoz0PUdtGgMFRVc=
 github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
diff --git a/pkg/ebpf/bpf_bpfel_arm64.go b/pkg/ebpf/bpf_arm64_bpfel.go
similarity index 99%
rename from pkg/ebpf/bpf_bpfel_arm64.go
rename to pkg/ebpf/bpf_arm64_bpfel.go
index 4742b97d39775385540560fbdf007b3ba070b11a..bc204c62819ffb303f28c67b7db03df171e4135a 100644
--- a/pkg/ebpf/bpf_bpfel_arm64.go
+++ b/pkg/ebpf/bpf_arm64_bpfel.go
@@ -200,5 +200,5 @@ func _BpfClose(closers ...io.Closer) error {
 
 // Do not access this directly.
 //
-//go:embed bpf_bpfel_arm64.o
+//go:embed bpf_arm64_bpfel.o
 var _BpfBytes []byte
diff --git a/pkg/ebpf/bpf_bpfel_arm64.o b/pkg/ebpf/bpf_arm64_bpfel.o
similarity index 100%
rename from pkg/ebpf/bpf_bpfel_arm64.o
rename to pkg/ebpf/bpf_arm64_bpfel.o
diff --git a/pkg/ebpf/bpf_bpfel_powerpc.go b/pkg/ebpf/bpf_powerpc_bpfel.go
similarity index 99%
rename from pkg/ebpf/bpf_bpfel_powerpc.go
rename to pkg/ebpf/bpf_powerpc_bpfel.go
index ca202722aa6e8344e63363adf5903299cc78bcf6..e074d6d88d7e1734a9da0964199c2b0d2c065ccc 100644
--- a/pkg/ebpf/bpf_bpfel_powerpc.go
+++ b/pkg/ebpf/bpf_powerpc_bpfel.go
@@ -200,5 +200,5 @@ func _BpfClose(closers ...io.Closer) error {
 
 // Do not access this directly.
 //
-//go:embed bpf_bpfel_powerpc.o
+//go:embed bpf_powerpc_bpfel.o
 var _BpfBytes []byte
diff --git a/pkg/ebpf/bpf_bpfel_powerpc.o b/pkg/ebpf/bpf_powerpc_bpfel.o
similarity index 100%
rename from pkg/ebpf/bpf_bpfel_powerpc.o
rename to pkg/ebpf/bpf_powerpc_bpfel.o
diff --git a/pkg/ebpf/bpf_bpfeb_s390x.go b/pkg/ebpf/bpf_s390_bpfeb.go
similarity index 98%
rename from pkg/ebpf/bpf_bpfeb_s390x.go
rename to pkg/ebpf/bpf_s390_bpfeb.go
index 76a129d66f6bddbd322bbfd9453de615b9def262..f129c345ff0bbf4059f0e9dd652aa5c36da3d858 100644
--- a/pkg/ebpf/bpf_bpfeb_s390x.go
+++ b/pkg/ebpf/bpf_s390_bpfeb.go
@@ -1,5 +1,5 @@
 // Code generated by bpf2go; DO NOT EDIT.
-//go:build s390 || s390x
+//go:build s390x
 
 package ebpf
 
@@ -200,5 +200,5 @@ func _BpfClose(closers ...io.Closer) error {
 
 // Do not access this directly.
 //
-//go:embed bpf_bpfeb_s390.o
+//go:embed bpf_s390_bpfeb.o
 var _BpfBytes []byte
diff --git a/pkg/ebpf/bpf_bpfeb_s390.o b/pkg/ebpf/bpf_s390_bpfeb.o
similarity index 100%
rename from pkg/ebpf/bpf_bpfeb_s390.o
rename to pkg/ebpf/bpf_s390_bpfeb.o
diff --git a/pkg/ebpf/bpf_bpfel_x86.go b/pkg/ebpf/bpf_x86_bpfel.go
similarity index 99%
rename from pkg/ebpf/bpf_bpfel_x86.go
rename to pkg/ebpf/bpf_x86_bpfel.go
index 5b9e0826dbc0707b54b9e8872eed7475ff5bfbd4..5b39854730c7912d72928b06ccb5bdbd6eedb5ee 100644
--- a/pkg/ebpf/bpf_bpfel_x86.go
+++ b/pkg/ebpf/bpf_x86_bpfel.go
@@ -200,5 +200,5 @@ func _BpfClose(closers ...io.Closer) error {
 
 // Do not access this directly.
 //
-//go:embed bpf_bpfel_x86.o
+//go:embed bpf_x86_bpfel.o
 var _BpfBytes []byte
diff --git a/pkg/ebpf/bpf_bpfel_x86.o b/pkg/ebpf/bpf_x86_bpfel.o
similarity index 100%
rename from pkg/ebpf/bpf_bpfel_x86.o
rename to pkg/ebpf/bpf_x86_bpfel.o
diff --git a/vendor/github.com/cilium/ebpf/ARCHITECTURE.md b/vendor/github.com/cilium/ebpf/ARCHITECTURE.md
deleted file mode 100644
index 26f555eb7a76ef7fcb446cd77b1ec39c8e8fcef1..0000000000000000000000000000000000000000
--- a/vendor/github.com/cilium/ebpf/ARCHITECTURE.md
+++ /dev/null
@@ -1,92 +0,0 @@
-Architecture of the library
-===
-
-```mermaid
-graph RL
-    Program --> ProgramSpec --> ELF
-    btf.Spec --> ELF
-    Map --> MapSpec --> ELF
-    Links --> Map & Program
-    ProgramSpec -.-> btf.Spec
-    MapSpec -.-> btf.Spec
-    subgraph Collection
-        Program & Map
-    end
-    subgraph CollectionSpec
-        ProgramSpec & MapSpec & btf.Spec
-    end
-```
-
-ELF
----
-
-BPF is usually produced by using Clang to compile a subset of C. Clang outputs
-an ELF file which contains program byte code (aka BPF), but also metadata for
-maps used by the program. The metadata follows the conventions set by libbpf
-shipped with the kernel. Certain ELF sections have special meaning
-and contain structures defined by libbpf. Newer versions of clang emit
-additional metadata in [BPF Type Format](#BTF).
-
-The library aims to be compatible with libbpf so that moving from a C toolchain
-to a Go one creates little friction. To that end, the [ELF reader](elf_reader.go)
-is tested against the Linux selftests and avoids introducing custom behaviour
-if possible.
-
-The output of the ELF reader is a `CollectionSpec` which encodes
-all of the information contained in the ELF in a form that is easy to work with
-in Go. The returned `CollectionSpec` should be deterministic: reading the same ELF
-file on different systems must produce the same output.
-As a corollary, any changes that depend on the runtime environment like the
-current kernel version must happen when creating [Objects](#Objects).
-
-Specifications
----
-
-`CollectionSpec` is a very simple container for `ProgramSpec`, `MapSpec` and
-`btf.Spec`. Avoid adding functionality to it if possible.
-
-`ProgramSpec` and `MapSpec` are blueprints for in-kernel
-objects and contain everything necessary to execute the relevant `bpf(2)`
-syscalls. They refer to `btf.Spec` for type information such as `Map` key and
-value types.
-
-The [asm](asm/) package provides an assembler that can be used to generate
-`ProgramSpec` on the fly.
-
-Objects
----
-
-`Program` and `Map` are the result of loading specifications into the kernel.
-Features that depend on knowledge of the current system (e.g kernel version)
-are implemented at this point.
-
-Sometimes loading a spec will fail because the kernel is too old, or a feature is not
-enabled. There are multiple ways the library deals with that:
-
-* Fallback: older kernels don't allow naming programs and maps. The library
-  automatically detects support for names, and omits them during load if
-  necessary. This works since name is primarily a debug aid.
-
-* Sentinel error: sometimes it's possible to detect that a feature isn't available.
-  In that case the library will return an error wrapping `ErrNotSupported`.
-  This is also useful to skip tests that can't run on the current kernel.
-
-Once program and map objects are loaded they expose the kernel's low-level API,
-e.g. `NextKey`. Often this API is awkward to use in Go, so there are safer
-wrappers on top of the low-level API, like `MapIterator`. The low-level API is
-useful when our higher-level API doesn't support a particular use case.
-
-Links
----
-
-Programs can be attached to many different points in the kernel and newer BPF hooks
-tend to use bpf_link to do so. Older hooks unfortunately use a combination of
-syscalls, netlink messages, etc. Adding support for a new link type should not
-pull in large dependencies like netlink, so XDP programs or tracepoints are
-out of scope.
-
-Each bpf_link_type has one corresponding Go type, e.g. `link.tracing` corresponds
-to BPF_LINK_TRACING. In general, these types should be unexported as long as they
-don't export methods outside of the Link interface. Each Go type may have multiple
-exported constructors. For example `AttachTracing` and `AttachLSM` create a
-tracing link, but are distinct functions since they may require different arguments.
diff --git a/vendor/github.com/cilium/ebpf/CODEOWNERS b/vendor/github.com/cilium/ebpf/CODEOWNERS
new file mode 100644
index 0000000000000000000000000000000000000000..adf991a30f9cba2356ad96870bd92dcb0e44eb5b
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/CODEOWNERS
@@ -0,0 +1 @@
+* @cilium/ebpf-lib-maintainers
diff --git a/vendor/github.com/cilium/ebpf/CONTRIBUTING.md b/vendor/github.com/cilium/ebpf/CONTRIBUTING.md
deleted file mode 100644
index bf57da9395399e55331e3e3a69f12c7e05608979..0000000000000000000000000000000000000000
--- a/vendor/github.com/cilium/ebpf/CONTRIBUTING.md
+++ /dev/null
@@ -1,48 +0,0 @@
-# How to contribute
-
-Development is on [GitHub](https://github.com/cilium/ebpf) and contributions in
-the form of pull requests and issues reporting bugs or suggesting new features
-are welcome. Please take a look at [the architecture](ARCHITECTURE.md) to get
-a better understanding for the high-level goals.
-
-## Adding a new feature
-
-1. [Join](https://ebpf.io/slack) the
-[#ebpf-go](https://cilium.slack.com/messages/ebpf-go) channel to discuss your requirements and how the feature can be implemented. The most important part is figuring out how much new exported API is necessary. **The less new API is required the easier it will be to land the feature.**
-2. (*optional*) Create a draft PR if you want to discuss the implementation or have hit a problem. It's fine if this doesn't compile or contains debug statements.
-3. Create a PR that is ready to merge. This must pass CI and have tests.
-
-### API stability
-
-The library doesn't guarantee the stability of its API at the moment.
-
-1. If possible avoid breakage by introducing new API and deprecating the old one
-   at the same time. If an API was deprecated in v0.x it can be removed in v0.x+1.
-2. Breaking API in a way that causes compilation failures is acceptable but must
-   have good reasons.
-3. Changing the semantics of the API without causing compilation failures is
-   heavily discouraged.
-
-## Running the tests
-
-Many of the tests require privileges to set resource limits and load eBPF code.
-The easiest way to obtain these is to run the tests with `sudo`.
-
-To test the current package with your local kernel you can simply run:
-```
-go test -exec sudo  ./...
-```
-
-To test the current package with a different kernel version you can use the [run-tests.sh](run-tests.sh) script.
-It requires [virtme](https://github.com/amluto/virtme) and qemu to be installed.
-
-Examples:
-
-```bash
-# Run all tests on a 5.4 kernel
-./run-tests.sh 5.4
-
-# Run a subset of tests:
-./run-tests.sh 5.4 ./link
-```
-
diff --git a/vendor/github.com/cilium/ebpf/Makefile b/vendor/github.com/cilium/ebpf/Makefile
index 0fa8cdc521c63b1c1a35e00c87402bdfaa690d30..eb532b2fd148b7fd8237881d263cf5c69f1fa08f 100644
--- a/vendor/github.com/cilium/ebpf/Makefile
+++ b/vendor/github.com/cilium/ebpf/Makefile
@@ -84,7 +84,8 @@ all: format $(addsuffix -el.elf,$(TARGETS)) $(addsuffix -eb.elf,$(TARGETS)) gene
 	ln -srf testdata/loader-$(CLANG)-eb.elf testdata/loader-eb.elf
 
 generate:
-	go generate ./...
+	go generate -run "internal/cmd/gentypes" ./...
+	go generate -skip "internal/cmd/gentypes" ./...
 
 testdata/loader-%-el.elf: testdata/loader.c
 	$* $(CFLAGS) -target bpfel -c $< -o $@
@@ -102,13 +103,8 @@ testdata/loader-%-eb.elf: testdata/loader.c
 	$(CLANG) $(CFLAGS) -target bpfeb -c $< -o $@
 	$(STRIP) -g $@
 
-.PHONY: generate-btf
-generate-btf: KERNEL_VERSION?=6.1.29
-generate-btf:
-	$(eval TMP := $(shell mktemp -d))
-	curl -fL "$(CI_KERNEL_URL)/linux-$(KERNEL_VERSION)-amd64.tgz" -o "$(TMP)/linux.tgz"
-	tar xvf "$(TMP)/linux.tgz" -C "$(TMP)" --strip-components=2 ./boot/vmlinuz ./lib/modules
-	/lib/modules/$(shell uname -r)/build/scripts/extract-vmlinux "$(TMP)/vmlinuz" > "$(TMP)/vmlinux"
-	$(OBJCOPY) --dump-section .BTF=/dev/stdout "$(TMP)/vmlinux" /dev/null | gzip > "btf/testdata/vmlinux.btf.gz"
-	find "$(TMP)/modules" -type f -name bpf_testmod.ko -exec $(OBJCOPY) --dump-section .BTF="btf/testdata/btf_testmod.btf" {} /dev/null \;
-	$(RM) -r "$(TMP)"
+.PHONY: update-kernel-deps
+update-kernel-deps: export KERNEL_VERSION?=6.7
+update-kernel-deps:
+	./testdata/sh/update-kernel-deps.sh
+	$(MAKE) container-all
diff --git a/vendor/github.com/cilium/ebpf/README.md b/vendor/github.com/cilium/ebpf/README.md
index 81235a69dd31dc7967330090c950f178a7c99cdb..b36f8c0bec2b4c3288eb509ad941d0b0c8c4e5a5 100644
--- a/vendor/github.com/cilium/ebpf/README.md
+++ b/vendor/github.com/cilium/ebpf/README.md
@@ -13,10 +13,9 @@ ecosystem.
 
 ## Getting Started
 
-A small collection of Go and eBPF programs that serve as examples for building
-your own tools can be found under [examples/](examples/).
+Please take a look at our [Getting Started] guide.
 
-[Contributions](CONTRIBUTING.md) are highly encouraged, as they highlight certain use cases of
+[Contributions](https://ebpf-go.dev/contributing) are highly encouraged, as they highlight certain use cases of
 eBPF and the library, and help shape the future of the project.
 
 ## Getting Help
@@ -62,17 +61,6 @@ This library includes the following packages:
 * Linux >= 4.9. CI is run against kernel.org LTS releases. 4.4 should work but is
   not tested against.
 
-## Regenerating Testdata
-
-Run `make` in the root of this repository to rebuild testdata in all
-subpackages. This requires Docker, as it relies on a standardized build
-environment to keep the build output stable.
-
-It is possible to regenerate data using Podman by overriding the `CONTAINER_*`
-variables: `CONTAINER_ENGINE=podman CONTAINER_RUN_ARGS= make`.
-
-The toolchain image build files are kept in [testdata/docker/](testdata/docker/).
-
 ## License
 
 MIT
@@ -80,3 +68,5 @@ MIT
 ### eBPF Gopher
 
 The eBPF honeygopher is based on the Go gopher designed by Renee French.
+
+[Getting Started]: https://ebpf-go.dev/guides/getting-started/
diff --git a/vendor/github.com/cilium/ebpf/attachtype_string.go b/vendor/github.com/cilium/ebpf/attachtype_string.go
index add2a3b5cc9b3491225ba5e7487c98ab3416b635..7e3caed328d8c92d79c0acaedcc88ac07bd04357 100644
--- a/vendor/github.com/cilium/ebpf/attachtype_string.go
+++ b/vendor/github.com/cilium/ebpf/attachtype_string.go
@@ -52,11 +52,17 @@ func _() {
 	_ = x[AttachSkReuseportSelectOrMigrate-40]
 	_ = x[AttachPerfEvent-41]
 	_ = x[AttachTraceKprobeMulti-42]
+	_ = x[AttachLSMCgroup-43]
+	_ = x[AttachStructOps-44]
+	_ = x[AttachNetfilter-45]
+	_ = x[AttachTCXIngress-46]
+	_ = x[AttachTCXEgress-47]
+	_ = x[AttachTraceUprobeMulti-48]
 }
 
-const _AttachType_name = "NoneCGroupInetEgressCGroupInetSockCreateCGroupSockOpsSkSKBStreamParserSkSKBStreamVerdictCGroupDeviceSkMsgVerdictCGroupInet4BindCGroupInet6BindCGroupInet4ConnectCGroupInet6ConnectCGroupInet4PostBindCGroupInet6PostBindCGroupUDP4SendmsgCGroupUDP6SendmsgLircMode2FlowDissectorCGroupSysctlCGroupUDP4RecvmsgCGroupUDP6RecvmsgCGroupGetsockoptCGroupSetsockoptTraceRawTpTraceFEntryTraceFExitModifyReturnLSMMacTraceIterCgroupInet4GetPeernameCgroupInet6GetPeernameCgroupInet4GetSocknameCgroupInet6GetSocknameXDPDevMapCgroupInetSockReleaseXDPCPUMapSkLookupXDPSkSKBVerdictSkReuseportSelectSkReuseportSelectOrMigratePerfEventTraceKprobeMulti"
+const _AttachType_name = "NoneCGroupInetEgressCGroupInetSockCreateCGroupSockOpsSkSKBStreamParserSkSKBStreamVerdictCGroupDeviceSkMsgVerdictCGroupInet4BindCGroupInet6BindCGroupInet4ConnectCGroupInet6ConnectCGroupInet4PostBindCGroupInet6PostBindCGroupUDP4SendmsgCGroupUDP6SendmsgLircMode2FlowDissectorCGroupSysctlCGroupUDP4RecvmsgCGroupUDP6RecvmsgCGroupGetsockoptCGroupSetsockoptTraceRawTpTraceFEntryTraceFExitModifyReturnLSMMacTraceIterCgroupInet4GetPeernameCgroupInet6GetPeernameCgroupInet4GetSocknameCgroupInet6GetSocknameXDPDevMapCgroupInetSockReleaseXDPCPUMapSkLookupXDPSkSKBVerdictSkReuseportSelectSkReuseportSelectOrMigratePerfEventTraceKprobeMultiLSMCgroupStructOpsNetfilterTCXIngressTCXEgressTraceUprobeMulti"
 
-var _AttachType_index = [...]uint16{0, 4, 20, 40, 53, 70, 88, 100, 112, 127, 142, 160, 178, 197, 216, 233, 250, 259, 272, 284, 301, 318, 334, 350, 360, 371, 381, 393, 399, 408, 430, 452, 474, 496, 505, 526, 535, 543, 546, 558, 575, 601, 610, 626}
+var _AttachType_index = [...]uint16{0, 4, 20, 40, 53, 70, 88, 100, 112, 127, 142, 160, 178, 197, 216, 233, 250, 259, 272, 284, 301, 318, 334, 350, 360, 371, 381, 393, 399, 408, 430, 452, 474, 496, 505, 526, 535, 543, 546, 558, 575, 601, 610, 626, 635, 644, 653, 663, 672, 688}
 
 func (i AttachType) String() string {
 	if i >= AttachType(len(_AttachType_index)-1) {
diff --git a/vendor/github.com/cilium/ebpf/btf/btf.go b/vendor/github.com/cilium/ebpf/btf/btf.go
index 80f64d78aeeec0af128192efe7de0d5c80bd654a..6a3e53d678dcf95f68308065160d3ac24197aaed 100644
--- a/vendor/github.com/cilium/ebpf/btf/btf.go
+++ b/vendor/github.com/cilium/ebpf/btf/btf.go
@@ -29,9 +29,8 @@ var (
 // ID represents the unique ID of a BTF object.
 type ID = sys.BTFID
 
-// Spec allows querying a set of Types and loading the set into the
-// kernel.
-type Spec struct {
+// immutableTypes is a set of types which musn't be changed.
+type immutableTypes struct {
 	// All types contained by the spec, not including types from the base in
 	// case the spec was parsed from split BTF.
 	types []Type
@@ -44,13 +43,132 @@ type Spec struct {
 
 	// Types indexed by essential name.
 	// Includes all struct flavors and types with the same name.
-	namedTypes map[essentialName][]Type
+	namedTypes map[essentialName][]TypeID
+
+	// Byte order of the types. This affects things like struct member order
+	// when using bitfields.
+	byteOrder binary.ByteOrder
+}
+
+func (s *immutableTypes) typeByID(id TypeID) (Type, bool) {
+	if id < s.firstTypeID {
+		return nil, false
+	}
+
+	index := int(id - s.firstTypeID)
+	if index >= len(s.types) {
+		return nil, false
+	}
+
+	return s.types[index], true
+}
+
+// mutableTypes is a set of types which may be changed.
+type mutableTypes struct {
+	imm           immutableTypes
+	copies        map[Type]Type   // map[orig]copy
+	copiedTypeIDs map[Type]TypeID //map[copy]origID
+}
+
+// add a type to the set of mutable types.
+//
+// Copies type and all of its children once. Repeated calls with the same type
+// do not copy again.
+func (mt *mutableTypes) add(typ Type, typeIDs map[Type]TypeID) Type {
+	return modifyGraphPreorder(typ, func(t Type) (Type, bool) {
+		cpy, ok := mt.copies[t]
+		if ok {
+			// This has been copied previously, no need to continue.
+			return cpy, false
+		}
+
+		cpy = t.copy()
+		mt.copies[t] = cpy
+
+		if id, ok := typeIDs[t]; ok {
+			mt.copiedTypeIDs[cpy] = id
+		}
+
+		// This is a new copy, keep copying children.
+		return cpy, true
+	})
+}
+
+// copy a set of mutable types.
+func (mt *mutableTypes) copy() mutableTypes {
+	mtCopy := mutableTypes{
+		mt.imm,
+		make(map[Type]Type, len(mt.copies)),
+		make(map[Type]TypeID, len(mt.copiedTypeIDs)),
+	}
+
+	copies := make(map[Type]Type, len(mt.copies))
+	for orig, copy := range mt.copies {
+		// NB: We make a copy of copy, not orig, so that changes to mutable types
+		// are preserved.
+		copyOfCopy := mtCopy.add(copy, mt.copiedTypeIDs)
+		copies[orig] = copyOfCopy
+	}
+
+	// mtCopy.copies is currently map[copy]copyOfCopy, replace it with
+	// map[orig]copyOfCopy.
+	mtCopy.copies = copies
+	return mtCopy
+}
+
+func (mt *mutableTypes) typeID(typ Type) (TypeID, error) {
+	if _, ok := typ.(*Void); ok {
+		// Equality is weird for void, since it is a zero sized type.
+		return 0, nil
+	}
+
+	id, ok := mt.copiedTypeIDs[typ]
+	if !ok {
+		return 0, fmt.Errorf("no ID for type %s: %w", typ, ErrNotFound)
+	}
+
+	return id, nil
+}
+
+func (mt *mutableTypes) typeByID(id TypeID) (Type, bool) {
+	immT, ok := mt.imm.typeByID(id)
+	if !ok {
+		return nil, false
+	}
+
+	return mt.add(immT, mt.imm.typeIDs), true
+}
+
+func (mt *mutableTypes) anyTypesByName(name string) ([]Type, error) {
+	immTypes := mt.imm.namedTypes[newEssentialName(name)]
+	if len(immTypes) == 0 {
+		return nil, fmt.Errorf("type name %s: %w", name, ErrNotFound)
+	}
+
+	// Return a copy to prevent changes to namedTypes.
+	result := make([]Type, 0, len(immTypes))
+	for _, id := range immTypes {
+		immT, ok := mt.imm.typeByID(id)
+		if !ok {
+			return nil, fmt.Errorf("no type with ID %d", id)
+		}
+
+		// Match against the full name, not just the essential one
+		// in case the type being looked up is a struct flavor.
+		if immT.TypeName() == name {
+			result = append(result, mt.add(immT, mt.imm.typeIDs))
+		}
+	}
+	return result, nil
+}
+
+// Spec allows querying a set of Types and loading the set into the
+// kernel.
+type Spec struct {
+	mutableTypes
 
 	// String table from ELF.
 	strings *stringTable
-
-	// Byte order of the ELF we decoded the spec from, may be nil.
-	byteOrder binary.ByteOrder
 }
 
 // LoadSpec opens file and calls LoadSpecFromReader on it.
@@ -181,7 +299,7 @@ func loadSpecFromELF(file *internal.SafeELFFile) (*Spec, error) {
 		return nil, err
 	}
 
-	err = fixupDatasec(spec.types, sectionSizes, offsets)
+	err = fixupDatasec(spec.imm.types, sectionSizes, offsets)
 	if err != nil {
 		return nil, err
 	}
@@ -197,7 +315,7 @@ func loadRawSpec(btf io.ReaderAt, bo binary.ByteOrder, base *Spec) (*Spec, error
 	)
 
 	if base != nil {
-		if base.firstTypeID != 0 {
+		if base.imm.firstTypeID != 0 {
 			return nil, fmt.Errorf("can't use split BTF as base")
 		}
 
@@ -217,16 +335,22 @@ func loadRawSpec(btf io.ReaderAt, bo binary.ByteOrder, base *Spec) (*Spec, error
 	typeIDs, typesByName := indexTypes(types, firstTypeID)
 
 	return &Spec{
-		namedTypes:  typesByName,
-		typeIDs:     typeIDs,
-		types:       types,
-		firstTypeID: firstTypeID,
-		strings:     rawStrings,
-		byteOrder:   bo,
+		mutableTypes{
+			immutableTypes{
+				types,
+				typeIDs,
+				firstTypeID,
+				typesByName,
+				bo,
+			},
+			make(map[Type]Type),
+			make(map[Type]TypeID),
+		},
+		rawStrings,
 	}, nil
 }
 
-func indexTypes(types []Type, firstTypeID TypeID) (map[Type]TypeID, map[essentialName][]Type) {
+func indexTypes(types []Type, firstTypeID TypeID) (map[Type]TypeID, map[essentialName][]TypeID) {
 	namedTypes := 0
 	for _, typ := range types {
 		if typ.TypeName() != "" {
@@ -238,13 +362,15 @@ func indexTypes(types []Type, firstTypeID TypeID) (map[Type]TypeID, map[essentia
 	}
 
 	typeIDs := make(map[Type]TypeID, len(types))
-	typesByName := make(map[essentialName][]Type, namedTypes)
+	typesByName := make(map[essentialName][]TypeID, namedTypes)
 
 	for i, typ := range types {
+		id := firstTypeID + TypeID(i)
+		typeIDs[typ] = id
+
 		if name := newEssentialName(typ.TypeName()); name != "" {
-			typesByName[name] = append(typesByName[name], typ)
+			typesByName[name] = append(typesByName[name], id)
 		}
-		typeIDs[typ] = firstTypeID + TypeID(i)
 	}
 
 	return typeIDs, typesByName
@@ -492,17 +618,9 @@ func fixupDatasecLayout(ds *Datasec) error {
 
 // Copy creates a copy of Spec.
 func (s *Spec) Copy() *Spec {
-	types := copyTypes(s.types, nil)
-	typeIDs, typesByName := indexTypes(types, s.firstTypeID)
-
-	// NB: Other parts of spec are not copied since they are immutable.
 	return &Spec{
-		types,
-		typeIDs,
-		s.firstTypeID,
-		typesByName,
+		s.mutableTypes.copy(),
 		s.strings,
-		s.byteOrder,
 	}
 }
 
@@ -519,8 +637,8 @@ func (sw sliceWriter) Write(p []byte) (int, error) {
 // nextTypeID returns the next unallocated type ID or an error if there are no
 // more type IDs.
 func (s *Spec) nextTypeID() (TypeID, error) {
-	id := s.firstTypeID + TypeID(len(s.types))
-	if id < s.firstTypeID {
+	id := s.imm.firstTypeID + TypeID(len(s.imm.types))
+	if id < s.imm.firstTypeID {
 		return 0, fmt.Errorf("no more type IDs")
 	}
 	return id, nil
@@ -531,33 +649,19 @@ func (s *Spec) nextTypeID() (TypeID, error) {
 // Returns an error wrapping ErrNotFound if a Type with the given ID
 // does not exist in the Spec.
 func (s *Spec) TypeByID(id TypeID) (Type, error) {
-	if id < s.firstTypeID {
-		return nil, fmt.Errorf("look up type with ID %d (first ID is %d): %w", id, s.firstTypeID, ErrNotFound)
-	}
-
-	index := int(id - s.firstTypeID)
-	if index >= len(s.types) {
-		return nil, fmt.Errorf("look up type with ID %d: %w", id, ErrNotFound)
+	typ, ok := s.typeByID(id)
+	if !ok {
+		return nil, fmt.Errorf("look up type with ID %d (first ID is %d): %w", id, s.imm.firstTypeID, ErrNotFound)
 	}
 
-	return s.types[index], nil
+	return typ, nil
 }
 
 // TypeID returns the ID for a given Type.
 //
 // Returns an error wrapping ErrNoFound if the type isn't part of the Spec.
 func (s *Spec) TypeID(typ Type) (TypeID, error) {
-	if _, ok := typ.(*Void); ok {
-		// Equality is weird for void, since it is a zero sized type.
-		return 0, nil
-	}
-
-	id, ok := s.typeIDs[typ]
-	if !ok {
-		return 0, fmt.Errorf("no ID for type %s: %w", typ, ErrNotFound)
-	}
-
-	return id, nil
+	return s.mutableTypes.typeID(typ)
 }
 
 // AnyTypesByName returns a list of BTF Types with the given name.
@@ -568,21 +672,7 @@ func (s *Spec) TypeID(typ Type) (TypeID, error) {
 //
 // Returns an error wrapping ErrNotFound if no matching Type exists in the Spec.
 func (s *Spec) AnyTypesByName(name string) ([]Type, error) {
-	types := s.namedTypes[newEssentialName(name)]
-	if len(types) == 0 {
-		return nil, fmt.Errorf("type name %s: %w", name, ErrNotFound)
-	}
-
-	// Return a copy to prevent changes to namedTypes.
-	result := make([]Type, 0, len(types))
-	for _, t := range types {
-		// Match against the full name, not just the essential one
-		// in case the type being looked up is a struct flavor.
-		if t.TypeName() == name {
-			result = append(result, t)
-		}
-	}
-	return result, nil
+	return s.mutableTypes.anyTypesByName(name)
 }
 
 // AnyTypeByName returns a Type with the given name.
@@ -671,26 +761,27 @@ func LoadSplitSpecFromReader(r io.ReaderAt, base *Spec) (*Spec, error) {
 
 // TypesIterator iterates over types of a given spec.
 type TypesIterator struct {
-	types []Type
-	index int
+	spec *Spec
+	id   TypeID
+	done bool
 	// The last visited type in the spec.
 	Type Type
 }
 
 // Iterate returns the types iterator.
 func (s *Spec) Iterate() *TypesIterator {
-	// We share the backing array of types with the Spec. This is safe since
-	// we don't allow deletion or shuffling of types.
-	return &TypesIterator{types: s.types, index: 0}
+	return &TypesIterator{spec: s, id: s.imm.firstTypeID}
 }
 
 // Next returns true as long as there are any remaining types.
 func (iter *TypesIterator) Next() bool {
-	if len(iter.types) <= iter.index {
+	if iter.done {
 		return false
 	}
 
-	iter.Type = iter.types[iter.index]
-	iter.index++
-	return true
+	var ok bool
+	iter.Type, ok = iter.spec.typeByID(iter.id)
+	iter.id++
+	iter.done = !ok
+	return !iter.done
 }
diff --git a/vendor/github.com/cilium/ebpf/btf/core.go b/vendor/github.com/cilium/ebpf/btf/core.go
index ded7d43d482dcc881a0be18ecbfa96877c6a507e..724fae4b8c2f87e8f00c21b26e40b0a34f64fb8f 100644
--- a/vendor/github.com/cilium/ebpf/btf/core.go
+++ b/vendor/github.com/cilium/ebpf/btf/core.go
@@ -159,12 +159,17 @@ func (k coreKind) String() string {
 // CORERelocate calculates changes needed to adjust eBPF instructions for differences
 // in types.
 //
+// resolveLocalTypeID is called for each local type which requires a stable TypeID.
+// Calling the function with the same type multiple times must produce the same
+// result. It is the callers responsibility to ensure that the relocated instructions
+// are loaded with matching BTF.
+//
 // Returns a list of fixups which can be applied to instructions to make them
 // match the target type(s).
 //
 // Fixups are returned in the order of relos, e.g. fixup[i] is the solution
 // for relos[i].
-func CORERelocate(relos []*CORERelocation, target *Spec, bo binary.ByteOrder) ([]COREFixup, error) {
+func CORERelocate(relos []*CORERelocation, target *Spec, bo binary.ByteOrder, resolveLocalTypeID func(Type) (TypeID, error)) ([]COREFixup, error) {
 	if target == nil {
 		var err error
 		target, _, err = kernelSpec()
@@ -173,8 +178,8 @@ func CORERelocate(relos []*CORERelocation, target *Spec, bo binary.ByteOrder) ([
 		}
 	}
 
-	if bo != target.byteOrder {
-		return nil, fmt.Errorf("can't relocate %s against %s", bo, target.byteOrder)
+	if bo != target.imm.byteOrder {
+		return nil, fmt.Errorf("can't relocate %s against %s", bo, target.imm.byteOrder)
 	}
 
 	type reloGroup struct {
@@ -194,14 +199,15 @@ func CORERelocate(relos []*CORERelocation, target *Spec, bo binary.ByteOrder) ([
 				return nil, fmt.Errorf("%s: unexpected accessor %v", relo.kind, relo.accessor)
 			}
 
+			id, err := resolveLocalTypeID(relo.typ)
+			if err != nil {
+				return nil, fmt.Errorf("%s: get type id: %w", relo.kind, err)
+			}
+
 			result[i] = COREFixup{
-				kind:  relo.kind,
-				local: uint64(relo.id),
-				// NB: Using relo.id as the target here is incorrect, since
-				// it doesn't match the BTF we generate on the fly. This isn't
-				// too bad for now since there are no uses of the local type ID
-				// in the kernel, yet.
-				target: uint64(relo.id),
+				kind:   relo.kind,
+				local:  uint64(relo.id),
+				target: uint64(id),
 			}
 			continue
 		}
@@ -221,7 +227,7 @@ func CORERelocate(relos []*CORERelocation, target *Spec, bo binary.ByteOrder) ([
 			return nil, fmt.Errorf("relocate unnamed or anonymous type %s: %w", localType, ErrNotSupported)
 		}
 
-		targets := target.namedTypes[newEssentialName(localTypeName)]
+		targets := target.imm.namedTypes[newEssentialName(localTypeName)]
 		fixups, err := coreCalculateFixups(group.relos, target, targets, bo)
 		if err != nil {
 			return nil, fmt.Errorf("relocate %s: %w", localType, err)
@@ -245,13 +251,13 @@ var errIncompatibleTypes = errors.New("incompatible types")
 //
 // The best target is determined by scoring: the less poisoning we have to do
 // the better the target is.
-func coreCalculateFixups(relos []*CORERelocation, targetSpec *Spec, targets []Type, bo binary.ByteOrder) ([]COREFixup, error) {
+func coreCalculateFixups(relos []*CORERelocation, targetSpec *Spec, targets []TypeID, bo binary.ByteOrder) ([]COREFixup, error) {
 	bestScore := len(relos)
 	var bestFixups []COREFixup
-	for _, target := range targets {
-		targetID, err := targetSpec.TypeID(target)
+	for _, targetID := range targets {
+		target, err := targetSpec.TypeByID(targetID)
 		if err != nil {
-			return nil, fmt.Errorf("target type ID: %w", err)
+			return nil, fmt.Errorf("look up target: %w", err)
 		}
 
 		score := 0 // lower is better
@@ -903,7 +909,7 @@ func coreAreTypesCompatible(localType Type, targetType Type) error {
 		targetType = UnderlyingType(*t)
 
 		if reflect.TypeOf(localType) != reflect.TypeOf(targetType) {
-			return fmt.Errorf("type mismatch: %w", errIncompatibleTypes)
+			return fmt.Errorf("type mismatch between %v and %v: %w", localType, targetType, errIncompatibleTypes)
 		}
 
 		switch lv := (localType).(type) {
diff --git a/vendor/github.com/cilium/ebpf/btf/ext_info.go b/vendor/github.com/cilium/ebpf/btf/ext_info.go
index d85f45ae9d83ae25f5027969dd8516ff09069b4f..d5652bad512199f14f32124984e019811984389b 100644
--- a/vendor/github.com/cilium/ebpf/btf/ext_info.go
+++ b/vendor/github.com/cilium/ebpf/btf/ext_info.go
@@ -142,15 +142,7 @@ func AssignMetadataToInstructions(
 
 // MarshalExtInfos encodes function and line info embedded in insns into kernel
 // wire format.
-//
-// Returns ErrNotSupported if the kernel doesn't support BTF-associated programs.
-func MarshalExtInfos(insns asm.Instructions) (_ *Handle, funcInfos, lineInfos []byte, _ error) {
-	// Bail out early if the kernel doesn't support Func(Proto). If this is the
-	// case, func_info will also be unsupported.
-	if err := haveProgBTF(); err != nil {
-		return nil, nil, nil, err
-	}
-
+func MarshalExtInfos(insns asm.Instructions, b *Builder) (funcInfos, lineInfos []byte, _ error) {
 	iter := insns.Iterate()
 	for iter.Next() {
 		_, ok := iter.Ins.Source().(*Line)
@@ -160,10 +152,9 @@ func MarshalExtInfos(insns asm.Instructions) (_ *Handle, funcInfos, lineInfos []
 		}
 	}
 
-	return nil, nil, nil, nil
+	return nil, nil, nil
 
 marshal:
-	var b Builder
 	var fiBuf, liBuf bytes.Buffer
 	for {
 		if fn := FuncMetadata(iter.Ins); fn != nil {
@@ -171,8 +162,8 @@ marshal:
 				fn:     fn,
 				offset: iter.Offset,
 			}
-			if err := fi.marshal(&fiBuf, &b); err != nil {
-				return nil, nil, nil, fmt.Errorf("write func info: %w", err)
+			if err := fi.marshal(&fiBuf, b); err != nil {
+				return nil, nil, fmt.Errorf("write func info: %w", err)
 			}
 		}
 
@@ -181,8 +172,8 @@ marshal:
 				line:   line,
 				offset: iter.Offset,
 			}
-			if err := li.marshal(&liBuf, &b); err != nil {
-				return nil, nil, nil, fmt.Errorf("write line info: %w", err)
+			if err := li.marshal(&liBuf, b); err != nil {
+				return nil, nil, fmt.Errorf("write line info: %w", err)
 			}
 		}
 
@@ -191,8 +182,7 @@ marshal:
 		}
 	}
 
-	handle, err := NewHandle(&b)
-	return handle, fiBuf.Bytes(), liBuf.Bytes(), err
+	return fiBuf.Bytes(), liBuf.Bytes(), nil
 }
 
 // btfExtHeader is found at the start of the .BTF.ext section.
diff --git a/vendor/github.com/cilium/ebpf/btf/marshal.go b/vendor/github.com/cilium/ebpf/btf/marshal.go
index 0d093c6656485a9ca7dec4c8e573f4b6d9e3b8c5..cdaf08a6c03db9a5d8f5077b0079f58beb642432 100644
--- a/vendor/github.com/cilium/ebpf/btf/marshal.go
+++ b/vendor/github.com/cilium/ebpf/btf/marshal.go
@@ -93,6 +93,11 @@ func NewBuilder(types []Type) (*Builder, error) {
 	return b, nil
 }
 
+// Empty returns true if [Add] has not been invoked on the builder.
+func (b *Builder) Empty() bool {
+	return len(b.types) == 0
+}
+
 // Add a Type and allocate a stable ID for it.
 //
 // Adding the identical Type multiple times is valid and will return the same ID.
diff --git a/vendor/github.com/cilium/ebpf/btf/traversal.go b/vendor/github.com/cilium/ebpf/btf/traversal.go
index a3a9dec940a9fe0b68ca65f010c617f04ea45bc4..5a7387b06389e1cbae9efb6d18074b4d1a53afea 100644
--- a/vendor/github.com/cilium/ebpf/btf/traversal.go
+++ b/vendor/github.com/cilium/ebpf/btf/traversal.go
@@ -87,6 +87,43 @@ func (po *postorderIterator) Next() bool {
 	return po.Type != nil
 }
 
+// modifyGraphPreorder allows modifying every Type in a graph.
+//
+// fn is invoked in preorder for every unique Type in a graph. See [Type] for the definition
+// of equality. Every occurrence of node is substituted with its replacement.
+//
+// If cont is true, fn is invoked for every child of replacement. Otherwise
+// traversal stops.
+//
+// Returns the substitution of the root node.
+func modifyGraphPreorder(root Type, fn func(node Type) (replacement Type, cont bool)) Type {
+	sub, cont := fn(root)
+	replacements := map[Type]Type{root: sub}
+
+	// This is a preorder traversal.
+	var walk func(*Type)
+	walk = func(node *Type) {
+		sub, visited := replacements[*node]
+		if visited {
+			*node = sub
+			return
+		}
+
+		sub, cont := fn(*node)
+		replacements[*node] = sub
+		*node = sub
+
+		if cont {
+			walkType(*node, walk)
+		}
+	}
+
+	if cont {
+		walkType(sub, walk)
+	}
+	return sub
+}
+
 // walkType calls fn on each child of typ.
 func walkType(typ Type, fn func(*Type)) {
 	// Explicitly type switch on the most common types to allow the inliner to
diff --git a/vendor/github.com/cilium/ebpf/btf/types.go b/vendor/github.com/cilium/ebpf/btf/types.go
index 8bd7fc77fc607e8dd9392493b84e73887b9c3403..8bac8018c6f56e80cd99c9bd91f5bab1d1b9a8d0 100644
--- a/vendor/github.com/cilium/ebpf/btf/types.go
+++ b/vendor/github.com/cilium/ebpf/btf/types.go
@@ -678,52 +678,31 @@ type Transformer func(Type) Type
 // typ may form a cycle. If transform is not nil, it is called with the
 // to be copied type, and the returned value is copied instead.
 func Copy(typ Type, transform Transformer) Type {
-	copies := copier{copies: make(map[Type]Type)}
-	copies.copy(&typ, transform)
-	return typ
+	copies := make(copier)
+	return copies.copy(typ, transform)
 }
 
-// copy a slice of Types recursively.
-//
-// See Copy for the semantics.
-func copyTypes(types []Type, transform Transformer) []Type {
-	result := make([]Type, len(types))
-	copy(result, types)
-
-	copies := copier{copies: make(map[Type]Type, len(types))}
-	for i := range result {
-		copies.copy(&result[i], transform)
-	}
-
-	return result
-}
-
-type copier struct {
-	copies map[Type]Type
-	work   typeDeque
-}
+// A map of a type to its copy.
+type copier map[Type]Type
 
-func (c *copier) copy(typ *Type, transform Transformer) {
-	for t := typ; t != nil; t = c.work.Pop() {
-		// *t is the identity of the type.
-		if cpy := c.copies[*t]; cpy != nil {
-			*t = cpy
-			continue
+func (c copier) copy(typ Type, transform Transformer) Type {
+	return modifyGraphPreorder(typ, func(t Type) (Type, bool) {
+		cpy, ok := c[t]
+		if ok {
+			// This has been copied previously, no need to continue.
+			return cpy, false
 		}
 
-		var cpy Type
 		if transform != nil {
-			cpy = transform(*t).copy()
+			cpy = transform(t).copy()
 		} else {
-			cpy = (*t).copy()
+			cpy = t.copy()
 		}
+		c[t] = cpy
 
-		c.copies[*t] = cpy
-		*t = cpy
-
-		// Mark any nested types for copying.
-		walkType(cpy, c.work.Push)
-	}
+		// This is a new copy, keep copying children.
+		return cpy, true
+	})
 }
 
 type typeDeque = internal.Deque[*Type]
diff --git a/vendor/github.com/cilium/ebpf/internal/cpu.go b/vendor/github.com/cilium/ebpf/cpu.go
similarity index 71%
rename from vendor/github.com/cilium/ebpf/internal/cpu.go
rename to vendor/github.com/cilium/ebpf/cpu.go
index 9e908b610b5fd729c3fbe74382e674a9d510d44a..1397d6de985cc2ee857f10e4af7cae16f5a09945 100644
--- a/vendor/github.com/cilium/ebpf/internal/cpu.go
+++ b/vendor/github.com/cilium/ebpf/cpu.go
@@ -1,17 +1,33 @@
-package internal
+package ebpf
 
 import (
 	"fmt"
 	"os"
 	"strings"
+
+	"github.com/cilium/ebpf/internal"
 )
 
-// PossibleCPUs returns the max number of CPUs a system may possibly have
-// Logical CPU numbers must be of the form 0-n
-var PossibleCPUs = Memoize(func() (int, error) {
+var possibleCPU = internal.Memoize(func() (int, error) {
 	return parseCPUsFromFile("/sys/devices/system/cpu/possible")
 })
 
+// PossibleCPU returns the max number of CPUs a system may possibly have
+// Logical CPU numbers must be of the form 0-n
+func PossibleCPU() (int, error) {
+	return possibleCPU()
+}
+
+// MustPossibleCPU is a helper that wraps a call to PossibleCPU and panics if
+// the error is non-nil.
+func MustPossibleCPU() int {
+	cpus, err := PossibleCPU()
+	if err != nil {
+		panic(err)
+	}
+	return cpus
+}
+
 func parseCPUsFromFile(path string) (int, error) {
 	spec, err := os.ReadFile(path)
 	if err != nil {
diff --git a/vendor/github.com/cilium/ebpf/elf_reader.go b/vendor/github.com/cilium/ebpf/elf_reader.go
index 5a85cbc24ff86210c9e8fb272d5f8a08a03a3670..e409e97f9fc0041f61b8b92ead103d2513255c71 100644
--- a/vendor/github.com/cilium/ebpf/elf_reader.go
+++ b/vendor/github.com/cilium/ebpf/elf_reader.go
@@ -15,6 +15,7 @@ import (
 	"github.com/cilium/ebpf/asm"
 	"github.com/cilium/ebpf/btf"
 	"github.com/cilium/ebpf/internal"
+	"github.com/cilium/ebpf/internal/sys"
 	"github.com/cilium/ebpf/internal/unix"
 )
 
@@ -456,6 +457,8 @@ func jumpTarget(offset uint64, ins asm.Instruction) uint64 {
 	return uint64(dest)
 }
 
+var errUnsupportedBinding = errors.New("unsupported binding")
+
 func (ec *elfCode) relocateInstruction(ins *asm.Instruction, rel elf.Symbol) error {
 	var (
 		typ  = elf.ST_TYPE(rel.Info)
@@ -472,7 +475,7 @@ func (ec *elfCode) relocateInstruction(ins *asm.Instruction, rel elf.Symbol) err
 		}
 
 		if bind != elf.STB_GLOBAL {
-			return fmt.Errorf("map %q: unsupported relocation %s", name, bind)
+			return fmt.Errorf("map %q: %w: %s", name, errUnsupportedBinding, bind)
 		}
 
 		if typ != elf.STT_OBJECT && typ != elf.STT_NOTYPE {
@@ -488,7 +491,7 @@ func (ec *elfCode) relocateInstruction(ins *asm.Instruction, rel elf.Symbol) err
 		switch typ {
 		case elf.STT_SECTION:
 			if bind != elf.STB_LOCAL {
-				return fmt.Errorf("direct load: %s: unsupported section relocation %s", name, bind)
+				return fmt.Errorf("direct load: %s: %w: %s", name, errUnsupportedBinding, bind)
 			}
 
 			// This is really a reference to a static symbol, which clang doesn't
@@ -499,7 +502,7 @@ func (ec *elfCode) relocateInstruction(ins *asm.Instruction, rel elf.Symbol) err
 		case elf.STT_OBJECT:
 			// LLVM 9 emits OBJECT-LOCAL symbols for anonymous constants.
 			if bind != elf.STB_GLOBAL && bind != elf.STB_LOCAL {
-				return fmt.Errorf("direct load: %s: unsupported object relocation %s", name, bind)
+				return fmt.Errorf("direct load: %s: %w: %s", name, errUnsupportedBinding, bind)
 			}
 
 			offset = uint32(rel.Value)
@@ -507,7 +510,7 @@ func (ec *elfCode) relocateInstruction(ins *asm.Instruction, rel elf.Symbol) err
 		case elf.STT_NOTYPE:
 			// LLVM 7 emits NOTYPE-LOCAL symbols for anonymous constants.
 			if bind != elf.STB_LOCAL {
-				return fmt.Errorf("direct load: %s: unsupported untyped relocation %s", name, bind)
+				return fmt.Errorf("direct load: %s: %w: %s", name, errUnsupportedBinding, bind)
 			}
 
 			offset = uint32(rel.Value)
@@ -535,12 +538,12 @@ func (ec *elfCode) relocateInstruction(ins *asm.Instruction, rel elf.Symbol) err
 			switch typ {
 			case elf.STT_NOTYPE, elf.STT_FUNC:
 				if bind != elf.STB_GLOBAL {
-					return fmt.Errorf("call: %s: unsupported binding: %s", name, bind)
+					return fmt.Errorf("call: %s: %w: %s", name, errUnsupportedBinding, bind)
 				}
 
 			case elf.STT_SECTION:
 				if bind != elf.STB_LOCAL {
-					return fmt.Errorf("call: %s: unsupported binding: %s", name, bind)
+					return fmt.Errorf("call: %s: %w: %s", name, errUnsupportedBinding, bind)
 				}
 
 				// The function we want to call is in the indicated section,
@@ -563,12 +566,12 @@ func (ec *elfCode) relocateInstruction(ins *asm.Instruction, rel elf.Symbol) err
 			switch typ {
 			case elf.STT_FUNC:
 				if bind != elf.STB_GLOBAL {
-					return fmt.Errorf("load: %s: unsupported binding: %s", name, bind)
+					return fmt.Errorf("load: %s: %w: %s", name, errUnsupportedBinding, bind)
 				}
 
 			case elf.STT_SECTION:
 				if bind != elf.STB_LOCAL {
-					return fmt.Errorf("load: %s: unsupported binding: %s", name, bind)
+					return fmt.Errorf("load: %s: %w: %s", name, errUnsupportedBinding, bind)
 				}
 
 				// ins.Constant already contains the offset in bytes from the
@@ -598,7 +601,7 @@ func (ec *elfCode) relocateInstruction(ins *asm.Instruction, rel elf.Symbol) err
 	// and extern kconfig variables declared using __kconfig.
 	case undefSection:
 		if bind != elf.STB_GLOBAL {
-			return fmt.Errorf("asm relocation: %s: unsupported binding: %s", name, bind)
+			return fmt.Errorf("asm relocation: %s: %w: %s", name, errUnsupportedBinding, bind)
 		}
 
 		if typ != elf.STT_NOTYPE {
@@ -1181,109 +1184,106 @@ func (ec *elfCode) loadKsymsSection() error {
 	return nil
 }
 
+type libbpfElfSectionDef struct {
+	pattern     string
+	programType sys.ProgType
+	attachType  sys.AttachType
+	flags       libbpfElfSectionFlag
+}
+
+type libbpfElfSectionFlag uint32
+
+// The values correspond to enum sec_def_flags in libbpf.
+const (
+	_SEC_NONE libbpfElfSectionFlag = 0
+
+	_SEC_EXP_ATTACH_OPT libbpfElfSectionFlag = 1 << (iota - 1)
+	_SEC_ATTACHABLE
+	_SEC_ATTACH_BTF
+	_SEC_SLEEPABLE
+	_SEC_XDP_FRAGS
+	_SEC_USDT
+
+	// Ignore any present extra in order to preserve backwards compatibility
+	// with earlier versions of the library.
+	ignoreExtra
+
+	_SEC_ATTACHABLE_OPT = _SEC_ATTACHABLE | _SEC_EXP_ATTACH_OPT
+)
+
+func init() {
+	// Compatibility with older versions of the library.
+	// We prepend libbpf definitions since they contain a prefix match
+	// for "xdp".
+	elfSectionDefs = append([]libbpfElfSectionDef{
+		{"xdp.frags/", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP, _SEC_XDP_FRAGS | ignoreExtra},
+		{"xdp.frags_devmap/", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP_DEVMAP, _SEC_XDP_FRAGS},
+		{"xdp_devmap/", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP_DEVMAP, 0},
+		{"xdp.frags_cpumap/", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP_CPUMAP, _SEC_XDP_FRAGS},
+		{"xdp_cpumap/", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP_CPUMAP, 0},
+		// This has been in the library since the beginning of time. Not sure
+		// where it came from.
+		{"seccomp", sys.BPF_PROG_TYPE_SOCKET_FILTER, 0, _SEC_NONE},
+	}, elfSectionDefs...)
+}
+
 func getProgType(sectionName string) (ProgramType, AttachType, uint32, string) {
-	types := []struct {
-		prefix     string
-		progType   ProgramType
-		attachType AttachType
-		progFlags  uint32
-	}{
-		// Please update the types from libbpf.c and follow the order of it.
-		// https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/tools/lib/bpf/libbpf.c
-		{"socket", SocketFilter, AttachNone, 0},
-		{"sk_reuseport/migrate", SkReuseport, AttachSkReuseportSelectOrMigrate, 0},
-		{"sk_reuseport", SkReuseport, AttachSkReuseportSelect, 0},
-		{"kprobe/", Kprobe, AttachNone, 0},
-		{"uprobe/", Kprobe, AttachNone, 0},
-		{"kretprobe/", Kprobe, AttachNone, 0},
-		{"uretprobe/", Kprobe, AttachNone, 0},
-		{"tc", SchedCLS, AttachNone, 0},
-		{"classifier", SchedCLS, AttachNone, 0},
-		{"action", SchedACT, AttachNone, 0},
-		{"tracepoint/", TracePoint, AttachNone, 0},
-		{"tp/", TracePoint, AttachNone, 0},
-		{"raw_tracepoint/", RawTracepoint, AttachNone, 0},
-		{"raw_tp/", RawTracepoint, AttachNone, 0},
-		{"raw_tracepoint.w/", RawTracepointWritable, AttachNone, 0},
-		{"raw_tp.w/", RawTracepointWritable, AttachNone, 0},
-		{"tp_btf/", Tracing, AttachTraceRawTp, 0},
-		{"fentry/", Tracing, AttachTraceFEntry, 0},
-		{"fmod_ret/", Tracing, AttachModifyReturn, 0},
-		{"fexit/", Tracing, AttachTraceFExit, 0},
-		{"fentry.s/", Tracing, AttachTraceFEntry, unix.BPF_F_SLEEPABLE},
-		{"fmod_ret.s/", Tracing, AttachModifyReturn, unix.BPF_F_SLEEPABLE},
-		{"fexit.s/", Tracing, AttachTraceFExit, unix.BPF_F_SLEEPABLE},
-		{"freplace/", Extension, AttachNone, 0},
-		{"lsm/", LSM, AttachLSMMac, 0},
-		{"lsm.s/", LSM, AttachLSMMac, unix.BPF_F_SLEEPABLE},
-		{"iter/", Tracing, AttachTraceIter, 0},
-		{"iter.s/", Tracing, AttachTraceIter, unix.BPF_F_SLEEPABLE},
-		{"syscall", Syscall, AttachNone, 0},
-		{"xdp.frags_devmap/", XDP, AttachXDPDevMap, unix.BPF_F_XDP_HAS_FRAGS},
-		{"xdp_devmap/", XDP, AttachXDPDevMap, 0},
-		{"xdp.frags_cpumap/", XDP, AttachXDPCPUMap, unix.BPF_F_XDP_HAS_FRAGS},
-		{"xdp_cpumap/", XDP, AttachXDPCPUMap, 0},
-		{"xdp.frags", XDP, AttachNone, unix.BPF_F_XDP_HAS_FRAGS},
-		{"xdp", XDP, AttachNone, 0},
-		{"perf_event", PerfEvent, AttachNone, 0},
-		{"lwt_in", LWTIn, AttachNone, 0},
-		{"lwt_out", LWTOut, AttachNone, 0},
-		{"lwt_xmit", LWTXmit, AttachNone, 0},
-		{"lwt_seg6local", LWTSeg6Local, AttachNone, 0},
-		{"cgroup_skb/ingress", CGroupSKB, AttachCGroupInetIngress, 0},
-		{"cgroup_skb/egress", CGroupSKB, AttachCGroupInetEgress, 0},
-		{"cgroup/skb", CGroupSKB, AttachNone, 0},
-		{"cgroup/sock_create", CGroupSock, AttachCGroupInetSockCreate, 0},
-		{"cgroup/sock_release", CGroupSock, AttachCgroupInetSockRelease, 0},
-		{"cgroup/sock", CGroupSock, AttachCGroupInetSockCreate, 0},
-		{"cgroup/post_bind4", CGroupSock, AttachCGroupInet4PostBind, 0},
-		{"cgroup/post_bind6", CGroupSock, AttachCGroupInet6PostBind, 0},
-		{"cgroup/dev", CGroupDevice, AttachCGroupDevice, 0},
-		{"sockops", SockOps, AttachCGroupSockOps, 0},
-		{"sk_skb/stream_parser", SkSKB, AttachSkSKBStreamParser, 0},
-		{"sk_skb/stream_verdict", SkSKB, AttachSkSKBStreamVerdict, 0},
-		{"sk_skb", SkSKB, AttachNone, 0},
-		{"sk_msg", SkMsg, AttachSkMsgVerdict, 0},
-		{"lirc_mode2", LircMode2, AttachLircMode2, 0},
-		{"flow_dissector", FlowDissector, AttachFlowDissector, 0},
-		{"cgroup/bind4", CGroupSockAddr, AttachCGroupInet4Bind, 0},
-		{"cgroup/bind6", CGroupSockAddr, AttachCGroupInet6Bind, 0},
-		{"cgroup/connect4", CGroupSockAddr, AttachCGroupInet4Connect, 0},
-		{"cgroup/connect6", CGroupSockAddr, AttachCGroupInet6Connect, 0},
-		{"cgroup/sendmsg4", CGroupSockAddr, AttachCGroupUDP4Sendmsg, 0},
-		{"cgroup/sendmsg6", CGroupSockAddr, AttachCGroupUDP6Sendmsg, 0},
-		{"cgroup/recvmsg4", CGroupSockAddr, AttachCGroupUDP4Recvmsg, 0},
-		{"cgroup/recvmsg6", CGroupSockAddr, AttachCGroupUDP6Recvmsg, 0},
-		{"cgroup/getpeername4", CGroupSockAddr, AttachCgroupInet4GetPeername, 0},
-		{"cgroup/getpeername6", CGroupSockAddr, AttachCgroupInet6GetPeername, 0},
-		{"cgroup/getsockname4", CGroupSockAddr, AttachCgroupInet4GetSockname, 0},
-		{"cgroup/getsockname6", CGroupSockAddr, AttachCgroupInet6GetSockname, 0},
-		{"cgroup/sysctl", CGroupSysctl, AttachCGroupSysctl, 0},
-		{"cgroup/getsockopt", CGroupSockopt, AttachCGroupGetsockopt, 0},
-		{"cgroup/setsockopt", CGroupSockopt, AttachCGroupSetsockopt, 0},
-		{"struct_ops+", StructOps, AttachNone, 0},
-		{"sk_lookup/", SkLookup, AttachSkLookup, 0},
-		{"seccomp", SocketFilter, AttachNone, 0},
-		{"kprobe.multi", Kprobe, AttachTraceKprobeMulti, 0},
-		{"kretprobe.multi", Kprobe, AttachTraceKprobeMulti, 0},
-		// Document all prefixes in docs/ebpf/concepts/elf-sections.md.
-	}
+	// Skip optional program marking for now.
+	sectionName = strings.TrimPrefix(sectionName, "?")
 
-	for _, t := range types {
-		if !strings.HasPrefix(sectionName, t.prefix) {
+	for _, t := range elfSectionDefs {
+		extra, ok := matchSectionName(sectionName, t.pattern)
+		if !ok {
 			continue
 		}
 
-		if !strings.HasSuffix(t.prefix, "/") {
-			return t.progType, t.attachType, t.progFlags, ""
+		programType := ProgramType(t.programType)
+		attachType := AttachType(t.attachType)
+
+		var flags uint32
+		if t.flags&_SEC_SLEEPABLE > 0 {
+			flags |= unix.BPF_F_SLEEPABLE
+		}
+		if t.flags&_SEC_XDP_FRAGS > 0 {
+			flags |= unix.BPF_F_XDP_HAS_FRAGS
+		}
+		if t.flags&_SEC_EXP_ATTACH_OPT > 0 {
+			if programType == XDP {
+				// The library doesn't yet have code to fallback to not specifying
+				// attach type. Only do this for XDP since we've enforced correct
+				// attach type for all other program types.
+				attachType = AttachNone
+			}
+		}
+		if t.flags&ignoreExtra > 0 {
+			extra = ""
 		}
 
-		return t.progType, t.attachType, t.progFlags, sectionName[len(t.prefix):]
+		return programType, attachType, flags, extra
 	}
 
 	return UnspecifiedProgram, AttachNone, 0, ""
 }
 
+// matchSectionName checks a section name against a pattern.
+//
+// It's behaviour mirrors that of libbpf's sec_def_matches.
+func matchSectionName(sectionName, pattern string) (extra string, found bool) {
+	have, extra, found := strings.Cut(sectionName, "/")
+	want := strings.TrimRight(pattern, "+/")
+
+	if strings.HasSuffix(pattern, "/") {
+		// Section name must have a slash and extra may be empty.
+		return extra, have == want && found
+	} else if strings.HasSuffix(pattern, "+") {
+		// Section name may have a slash and extra may be empty.
+		return extra, have == want
+	}
+
+	// Section name must have a prefix. extra is ignored.
+	return "", strings.HasPrefix(sectionName, pattern)
+}
+
 func (ec *elfCode) loadSectionRelocations(sec *elf.Section, symbols []elf.Symbol) (map[uint64]elf.Symbol, error) {
 	rels := make(map[uint64]elf.Symbol)
 
diff --git a/vendor/github.com/cilium/ebpf/elf_sections.go b/vendor/github.com/cilium/ebpf/elf_sections.go
new file mode 100644
index 0000000000000000000000000000000000000000..4b58251d9ab42752400257b5224cd7d4e93508a1
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/elf_sections.go
@@ -0,0 +1,109 @@
+// Code generated by internal/cmd/gensections.awk; DO NOT EDIT.
+
+package ebpf
+
+// Code in this file is derived from libbpf, available under BSD-2-Clause.
+
+import "github.com/cilium/ebpf/internal/sys"
+
+var elfSectionDefs = []libbpfElfSectionDef{
+	{"socket", sys.BPF_PROG_TYPE_SOCKET_FILTER, 0, _SEC_NONE},
+	{"sk_reuseport/migrate", sys.BPF_PROG_TYPE_SK_REUSEPORT, sys.BPF_SK_REUSEPORT_SELECT_OR_MIGRATE, _SEC_ATTACHABLE},
+	{"sk_reuseport", sys.BPF_PROG_TYPE_SK_REUSEPORT, sys.BPF_SK_REUSEPORT_SELECT, _SEC_ATTACHABLE},
+	{"kprobe+", sys.BPF_PROG_TYPE_KPROBE, 0, _SEC_NONE},
+	{"uprobe+", sys.BPF_PROG_TYPE_KPROBE, 0, _SEC_NONE},
+	{"uprobe.s+", sys.BPF_PROG_TYPE_KPROBE, 0, _SEC_SLEEPABLE},
+	{"kretprobe+", sys.BPF_PROG_TYPE_KPROBE, 0, _SEC_NONE},
+	{"uretprobe+", sys.BPF_PROG_TYPE_KPROBE, 0, _SEC_NONE},
+	{"uretprobe.s+", sys.BPF_PROG_TYPE_KPROBE, 0, _SEC_SLEEPABLE},
+	{"kprobe.multi+", sys.BPF_PROG_TYPE_KPROBE, sys.BPF_TRACE_KPROBE_MULTI, _SEC_NONE},
+	{"kretprobe.multi+", sys.BPF_PROG_TYPE_KPROBE, sys.BPF_TRACE_KPROBE_MULTI, _SEC_NONE},
+	{"uprobe.multi+", sys.BPF_PROG_TYPE_KPROBE, sys.BPF_TRACE_UPROBE_MULTI, _SEC_NONE},
+	{"uretprobe.multi+", sys.BPF_PROG_TYPE_KPROBE, sys.BPF_TRACE_UPROBE_MULTI, _SEC_NONE},
+	{"uprobe.multi.s+", sys.BPF_PROG_TYPE_KPROBE, sys.BPF_TRACE_UPROBE_MULTI, _SEC_SLEEPABLE},
+	{"uretprobe.multi.s+", sys.BPF_PROG_TYPE_KPROBE, sys.BPF_TRACE_UPROBE_MULTI, _SEC_SLEEPABLE},
+	{"ksyscall+", sys.BPF_PROG_TYPE_KPROBE, 0, _SEC_NONE},
+	{"kretsyscall+", sys.BPF_PROG_TYPE_KPROBE, 0, _SEC_NONE},
+	{"usdt+", sys.BPF_PROG_TYPE_KPROBE, 0, _SEC_USDT},
+	{"usdt.s+", sys.BPF_PROG_TYPE_KPROBE, 0, _SEC_USDT | _SEC_SLEEPABLE},
+	{"tc/ingress", sys.BPF_PROG_TYPE_SCHED_CLS, sys.BPF_TCX_INGRESS, _SEC_NONE},
+	{"tc/egress", sys.BPF_PROG_TYPE_SCHED_CLS, sys.BPF_TCX_EGRESS, _SEC_NONE},
+	{"tcx/ingress", sys.BPF_PROG_TYPE_SCHED_CLS, sys.BPF_TCX_INGRESS, _SEC_NONE},
+	{"tcx/egress", sys.BPF_PROG_TYPE_SCHED_CLS, sys.BPF_TCX_EGRESS, _SEC_NONE},
+	{"tc", sys.BPF_PROG_TYPE_SCHED_CLS, 0, _SEC_NONE},
+	{"classifier", sys.BPF_PROG_TYPE_SCHED_CLS, 0, _SEC_NONE},
+	{"action", sys.BPF_PROG_TYPE_SCHED_ACT, 0, _SEC_NONE},
+	{"netkit/primary", sys.BPF_PROG_TYPE_SCHED_CLS, sys.BPF_NETKIT_PRIMARY, _SEC_NONE},
+	{"netkit/peer", sys.BPF_PROG_TYPE_SCHED_CLS, sys.BPF_NETKIT_PEER, _SEC_NONE},
+	{"tracepoint+", sys.BPF_PROG_TYPE_TRACEPOINT, 0, _SEC_NONE},
+	{"tp+", sys.BPF_PROG_TYPE_TRACEPOINT, 0, _SEC_NONE},
+	{"raw_tracepoint+", sys.BPF_PROG_TYPE_RAW_TRACEPOINT, 0, _SEC_NONE},
+	{"raw_tp+", sys.BPF_PROG_TYPE_RAW_TRACEPOINT, 0, _SEC_NONE},
+	{"raw_tracepoint.w+", sys.BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE, 0, _SEC_NONE},
+	{"raw_tp.w+", sys.BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE, 0, _SEC_NONE},
+	{"tp_btf+", sys.BPF_PROG_TYPE_TRACING, sys.BPF_TRACE_RAW_TP, _SEC_ATTACH_BTF},
+	{"fentry+", sys.BPF_PROG_TYPE_TRACING, sys.BPF_TRACE_FENTRY, _SEC_ATTACH_BTF},
+	{"fmod_ret+", sys.BPF_PROG_TYPE_TRACING, sys.BPF_MODIFY_RETURN, _SEC_ATTACH_BTF},
+	{"fexit+", sys.BPF_PROG_TYPE_TRACING, sys.BPF_TRACE_FEXIT, _SEC_ATTACH_BTF},
+	{"fentry.s+", sys.BPF_PROG_TYPE_TRACING, sys.BPF_TRACE_FENTRY, _SEC_ATTACH_BTF | _SEC_SLEEPABLE},
+	{"fmod_ret.s+", sys.BPF_PROG_TYPE_TRACING, sys.BPF_MODIFY_RETURN, _SEC_ATTACH_BTF | _SEC_SLEEPABLE},
+	{"fexit.s+", sys.BPF_PROG_TYPE_TRACING, sys.BPF_TRACE_FEXIT, _SEC_ATTACH_BTF | _SEC_SLEEPABLE},
+	{"freplace+", sys.BPF_PROG_TYPE_EXT, 0, _SEC_ATTACH_BTF},
+	{"lsm+", sys.BPF_PROG_TYPE_LSM, sys.BPF_LSM_MAC, _SEC_ATTACH_BTF},
+	{"lsm.s+", sys.BPF_PROG_TYPE_LSM, sys.BPF_LSM_MAC, _SEC_ATTACH_BTF | _SEC_SLEEPABLE},
+	{"lsm_cgroup+", sys.BPF_PROG_TYPE_LSM, sys.BPF_LSM_CGROUP, _SEC_ATTACH_BTF},
+	{"iter+", sys.BPF_PROG_TYPE_TRACING, sys.BPF_TRACE_ITER, _SEC_ATTACH_BTF},
+	{"iter.s+", sys.BPF_PROG_TYPE_TRACING, sys.BPF_TRACE_ITER, _SEC_ATTACH_BTF | _SEC_SLEEPABLE},
+	{"syscall", sys.BPF_PROG_TYPE_SYSCALL, 0, _SEC_SLEEPABLE},
+	{"xdp.frags/devmap", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP_DEVMAP, _SEC_XDP_FRAGS},
+	{"xdp/devmap", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP_DEVMAP, _SEC_ATTACHABLE},
+	{"xdp.frags/cpumap", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP_CPUMAP, _SEC_XDP_FRAGS},
+	{"xdp/cpumap", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP_CPUMAP, _SEC_ATTACHABLE},
+	{"xdp.frags", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP, _SEC_XDP_FRAGS},
+	{"xdp", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP, _SEC_ATTACHABLE_OPT},
+	{"perf_event", sys.BPF_PROG_TYPE_PERF_EVENT, 0, _SEC_NONE},
+	{"lwt_in", sys.BPF_PROG_TYPE_LWT_IN, 0, _SEC_NONE},
+	{"lwt_out", sys.BPF_PROG_TYPE_LWT_OUT, 0, _SEC_NONE},
+	{"lwt_xmit", sys.BPF_PROG_TYPE_LWT_XMIT, 0, _SEC_NONE},
+	{"lwt_seg6local", sys.BPF_PROG_TYPE_LWT_SEG6LOCAL, 0, _SEC_NONE},
+	{"sockops", sys.BPF_PROG_TYPE_SOCK_OPS, sys.BPF_CGROUP_SOCK_OPS, _SEC_ATTACHABLE_OPT},
+	{"sk_skb/stream_parser", sys.BPF_PROG_TYPE_SK_SKB, sys.BPF_SK_SKB_STREAM_PARSER, _SEC_ATTACHABLE_OPT},
+	{"sk_skb/stream_verdict", sys.BPF_PROG_TYPE_SK_SKB, sys.BPF_SK_SKB_STREAM_VERDICT, _SEC_ATTACHABLE_OPT},
+	{"sk_skb", sys.BPF_PROG_TYPE_SK_SKB, 0, _SEC_NONE},
+	{"sk_msg", sys.BPF_PROG_TYPE_SK_MSG, sys.BPF_SK_MSG_VERDICT, _SEC_ATTACHABLE_OPT},
+	{"lirc_mode2", sys.BPF_PROG_TYPE_LIRC_MODE2, sys.BPF_LIRC_MODE2, _SEC_ATTACHABLE_OPT},
+	{"flow_dissector", sys.BPF_PROG_TYPE_FLOW_DISSECTOR, sys.BPF_FLOW_DISSECTOR, _SEC_ATTACHABLE_OPT},
+	{"cgroup_skb/ingress", sys.BPF_PROG_TYPE_CGROUP_SKB, sys.BPF_CGROUP_INET_INGRESS, _SEC_ATTACHABLE_OPT},
+	{"cgroup_skb/egress", sys.BPF_PROG_TYPE_CGROUP_SKB, sys.BPF_CGROUP_INET_EGRESS, _SEC_ATTACHABLE_OPT},
+	{"cgroup/skb", sys.BPF_PROG_TYPE_CGROUP_SKB, 0, _SEC_NONE},
+	{"cgroup/sock_create", sys.BPF_PROG_TYPE_CGROUP_SOCK, sys.BPF_CGROUP_INET_SOCK_CREATE, _SEC_ATTACHABLE},
+	{"cgroup/sock_release", sys.BPF_PROG_TYPE_CGROUP_SOCK, sys.BPF_CGROUP_INET_SOCK_RELEASE, _SEC_ATTACHABLE},
+	{"cgroup/sock", sys.BPF_PROG_TYPE_CGROUP_SOCK, sys.BPF_CGROUP_INET_SOCK_CREATE, _SEC_ATTACHABLE_OPT},
+	{"cgroup/post_bind4", sys.BPF_PROG_TYPE_CGROUP_SOCK, sys.BPF_CGROUP_INET4_POST_BIND, _SEC_ATTACHABLE},
+	{"cgroup/post_bind6", sys.BPF_PROG_TYPE_CGROUP_SOCK, sys.BPF_CGROUP_INET6_POST_BIND, _SEC_ATTACHABLE},
+	{"cgroup/bind4", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_INET4_BIND, _SEC_ATTACHABLE},
+	{"cgroup/bind6", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_INET6_BIND, _SEC_ATTACHABLE},
+	{"cgroup/connect4", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_INET4_CONNECT, _SEC_ATTACHABLE},
+	{"cgroup/connect6", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_INET6_CONNECT, _SEC_ATTACHABLE},
+	{"cgroup/connect_unix", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_UNIX_CONNECT, _SEC_ATTACHABLE},
+	{"cgroup/sendmsg4", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_UDP4_SENDMSG, _SEC_ATTACHABLE},
+	{"cgroup/sendmsg6", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_UDP6_SENDMSG, _SEC_ATTACHABLE},
+	{"cgroup/sendmsg_unix", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_UNIX_SENDMSG, _SEC_ATTACHABLE},
+	{"cgroup/recvmsg4", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_UDP4_RECVMSG, _SEC_ATTACHABLE},
+	{"cgroup/recvmsg6", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_UDP6_RECVMSG, _SEC_ATTACHABLE},
+	{"cgroup/recvmsg_unix", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_UNIX_RECVMSG, _SEC_ATTACHABLE},
+	{"cgroup/getpeername4", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_INET4_GETPEERNAME, _SEC_ATTACHABLE},
+	{"cgroup/getpeername6", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_INET6_GETPEERNAME, _SEC_ATTACHABLE},
+	{"cgroup/getpeername_unix", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_UNIX_GETPEERNAME, _SEC_ATTACHABLE},
+	{"cgroup/getsockname4", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_INET4_GETSOCKNAME, _SEC_ATTACHABLE},
+	{"cgroup/getsockname6", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_INET6_GETSOCKNAME, _SEC_ATTACHABLE},
+	{"cgroup/getsockname_unix", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_UNIX_GETSOCKNAME, _SEC_ATTACHABLE},
+	{"cgroup/sysctl", sys.BPF_PROG_TYPE_CGROUP_SYSCTL, sys.BPF_CGROUP_SYSCTL, _SEC_ATTACHABLE},
+	{"cgroup/getsockopt", sys.BPF_PROG_TYPE_CGROUP_SOCKOPT, sys.BPF_CGROUP_GETSOCKOPT, _SEC_ATTACHABLE},
+	{"cgroup/setsockopt", sys.BPF_PROG_TYPE_CGROUP_SOCKOPT, sys.BPF_CGROUP_SETSOCKOPT, _SEC_ATTACHABLE},
+	{"cgroup/dev", sys.BPF_PROG_TYPE_CGROUP_DEVICE, sys.BPF_CGROUP_DEVICE, _SEC_ATTACHABLE_OPT},
+	{"struct_ops+", sys.BPF_PROG_TYPE_STRUCT_OPS, 0, _SEC_NONE},
+	{"struct_ops.s+", sys.BPF_PROG_TYPE_STRUCT_OPS, 0, _SEC_SLEEPABLE},
+	{"sk_lookup", sys.BPF_PROG_TYPE_SK_LOOKUP, sys.BPF_SK_LOOKUP, _SEC_ATTACHABLE},
+	{"netfilter", sys.BPF_PROG_TYPE_NETFILTER, sys.BPF_NETFILTER, _SEC_NONE},
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/sys/mapflags_string.go b/vendor/github.com/cilium/ebpf/internal/sys/mapflags_string.go
index c80744ae0e07a80422021932089f33766d739546..d9fe217222bfb0300f75538581507c8899bcfeb8 100644
--- a/vendor/github.com/cilium/ebpf/internal/sys/mapflags_string.go
+++ b/vendor/github.com/cilium/ebpf/internal/sys/mapflags_string.go
@@ -21,24 +21,28 @@ func _() {
 	_ = x[BPF_F_MMAPABLE-1024]
 	_ = x[BPF_F_PRESERVE_ELEMS-2048]
 	_ = x[BPF_F_INNER_MAP-4096]
+	_ = x[BPF_F_LINK-8192]
+	_ = x[BPF_F_PATH_FD-16384]
 }
 
-const _MapFlags_name = "BPF_F_NO_PREALLOCBPF_F_NO_COMMON_LRUBPF_F_NUMA_NODEBPF_F_RDONLYBPF_F_WRONLYBPF_F_STACK_BUILD_IDBPF_F_ZERO_SEEDBPF_F_RDONLY_PROGBPF_F_WRONLY_PROGBPF_F_CLONEBPF_F_MMAPABLEBPF_F_PRESERVE_ELEMSBPF_F_INNER_MAP"
+const _MapFlags_name = "BPF_F_NO_PREALLOCBPF_F_NO_COMMON_LRUBPF_F_NUMA_NODEBPF_F_RDONLYBPF_F_WRONLYBPF_F_STACK_BUILD_IDBPF_F_ZERO_SEEDBPF_F_RDONLY_PROGBPF_F_WRONLY_PROGBPF_F_CLONEBPF_F_MMAPABLEBPF_F_PRESERVE_ELEMSBPF_F_INNER_MAPBPF_F_LINKBPF_F_PATH_FD"
 
 var _MapFlags_map = map[MapFlags]string{
-	1:    _MapFlags_name[0:17],
-	2:    _MapFlags_name[17:36],
-	4:    _MapFlags_name[36:51],
-	8:    _MapFlags_name[51:63],
-	16:   _MapFlags_name[63:75],
-	32:   _MapFlags_name[75:95],
-	64:   _MapFlags_name[95:110],
-	128:  _MapFlags_name[110:127],
-	256:  _MapFlags_name[127:144],
-	512:  _MapFlags_name[144:155],
-	1024: _MapFlags_name[155:169],
-	2048: _MapFlags_name[169:189],
-	4096: _MapFlags_name[189:204],
+	1:     _MapFlags_name[0:17],
+	2:     _MapFlags_name[17:36],
+	4:     _MapFlags_name[36:51],
+	8:     _MapFlags_name[51:63],
+	16:    _MapFlags_name[63:75],
+	32:    _MapFlags_name[75:95],
+	64:    _MapFlags_name[95:110],
+	128:   _MapFlags_name[110:127],
+	256:   _MapFlags_name[127:144],
+	512:   _MapFlags_name[144:155],
+	1024:  _MapFlags_name[155:169],
+	2048:  _MapFlags_name[169:189],
+	4096:  _MapFlags_name[189:204],
+	8192:  _MapFlags_name[204:214],
+	16384: _MapFlags_name[214:227],
 }
 
 func (i MapFlags) String() string {
diff --git a/vendor/github.com/cilium/ebpf/internal/sys/syscall.go b/vendor/github.com/cilium/ebpf/internal/sys/syscall.go
index 088e82eea2a77d79df39f5e0af22f6715973f7a2..b1d49b8704139fecca5e0460deaac73ee2d1d29e 100644
--- a/vendor/github.com/cilium/ebpf/internal/sys/syscall.go
+++ b/vendor/github.com/cilium/ebpf/internal/sys/syscall.go
@@ -139,6 +139,17 @@ const (
 	BPF_F_MMAPABLE
 	BPF_F_PRESERVE_ELEMS
 	BPF_F_INNER_MAP
+	BPF_F_LINK
+	BPF_F_PATH_FD
+)
+
+// Flags used by bpf_mprog.
+const (
+	BPF_F_REPLACE = 1 << (iota + 2)
+	BPF_F_BEFORE
+	BPF_F_AFTER
+	BPF_F_ID
+	BPF_F_LINK_MPROG = 1 << 13 // aka BPF_F_LINK
 )
 
 // wrappedErrno wraps syscall.Errno to prevent direct comparisons with
diff --git a/vendor/github.com/cilium/ebpf/internal/sys/types.go b/vendor/github.com/cilium/ebpf/internal/sys/types.go
index 51698e06b47b62588ef22f44cb473b144a5d4d1a..137e2ac3651d6e0cc447b8aaf5f7ff03fc54a34a 100644
--- a/vendor/github.com/cilium/ebpf/internal/sys/types.go
+++ b/vendor/github.com/cilium/ebpf/internal/sys/types.go
@@ -65,7 +65,14 @@ const (
 	BPF_TCX_INGRESS                    AttachType = 46
 	BPF_TCX_EGRESS                     AttachType = 47
 	BPF_TRACE_UPROBE_MULTI             AttachType = 48
-	__MAX_BPF_ATTACH_TYPE              AttachType = 49
+	BPF_CGROUP_UNIX_CONNECT            AttachType = 49
+	BPF_CGROUP_UNIX_SENDMSG            AttachType = 50
+	BPF_CGROUP_UNIX_RECVMSG            AttachType = 51
+	BPF_CGROUP_UNIX_GETPEERNAME        AttachType = 52
+	BPF_CGROUP_UNIX_GETSOCKNAME        AttachType = 53
+	BPF_NETKIT_PRIMARY                 AttachType = 54
+	BPF_NETKIT_PEER                    AttachType = 55
+	__MAX_BPF_ATTACH_TYPE              AttachType = 56
 )
 
 type Cmd uint32
@@ -351,46 +358,48 @@ const (
 	BPF_LINK_TYPE_NETFILTER      LinkType = 10
 	BPF_LINK_TYPE_TCX            LinkType = 11
 	BPF_LINK_TYPE_UPROBE_MULTI   LinkType = 12
-	MAX_BPF_LINK_TYPE            LinkType = 13
+	BPF_LINK_TYPE_NETKIT         LinkType = 13
+	MAX_BPF_LINK_TYPE            LinkType = 14
 )
 
 type MapType uint32
 
 const (
-	BPF_MAP_TYPE_UNSPEC                    MapType = 0
-	BPF_MAP_TYPE_HASH                      MapType = 1
-	BPF_MAP_TYPE_ARRAY                     MapType = 2
-	BPF_MAP_TYPE_PROG_ARRAY                MapType = 3
-	BPF_MAP_TYPE_PERF_EVENT_ARRAY          MapType = 4
-	BPF_MAP_TYPE_PERCPU_HASH               MapType = 5
-	BPF_MAP_TYPE_PERCPU_ARRAY              MapType = 6
-	BPF_MAP_TYPE_STACK_TRACE               MapType = 7
-	BPF_MAP_TYPE_CGROUP_ARRAY              MapType = 8
-	BPF_MAP_TYPE_LRU_HASH                  MapType = 9
-	BPF_MAP_TYPE_LRU_PERCPU_HASH           MapType = 10
-	BPF_MAP_TYPE_LPM_TRIE                  MapType = 11
-	BPF_MAP_TYPE_ARRAY_OF_MAPS             MapType = 12
-	BPF_MAP_TYPE_HASH_OF_MAPS              MapType = 13
-	BPF_MAP_TYPE_DEVMAP                    MapType = 14
-	BPF_MAP_TYPE_SOCKMAP                   MapType = 15
-	BPF_MAP_TYPE_CPUMAP                    MapType = 16
-	BPF_MAP_TYPE_XSKMAP                    MapType = 17
-	BPF_MAP_TYPE_SOCKHASH                  MapType = 18
-	BPF_MAP_TYPE_CGROUP_STORAGE_DEPRECATED MapType = 19
-	BPF_MAP_TYPE_CGROUP_STORAGE            MapType = 19
-	BPF_MAP_TYPE_REUSEPORT_SOCKARRAY       MapType = 20
-	BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE     MapType = 21
-	BPF_MAP_TYPE_QUEUE                     MapType = 22
-	BPF_MAP_TYPE_STACK                     MapType = 23
-	BPF_MAP_TYPE_SK_STORAGE                MapType = 24
-	BPF_MAP_TYPE_DEVMAP_HASH               MapType = 25
-	BPF_MAP_TYPE_STRUCT_OPS                MapType = 26
-	BPF_MAP_TYPE_RINGBUF                   MapType = 27
-	BPF_MAP_TYPE_INODE_STORAGE             MapType = 28
-	BPF_MAP_TYPE_TASK_STORAGE              MapType = 29
-	BPF_MAP_TYPE_BLOOM_FILTER              MapType = 30
-	BPF_MAP_TYPE_USER_RINGBUF              MapType = 31
-	BPF_MAP_TYPE_CGRP_STORAGE              MapType = 32
+	BPF_MAP_TYPE_UNSPEC                           MapType = 0
+	BPF_MAP_TYPE_HASH                             MapType = 1
+	BPF_MAP_TYPE_ARRAY                            MapType = 2
+	BPF_MAP_TYPE_PROG_ARRAY                       MapType = 3
+	BPF_MAP_TYPE_PERF_EVENT_ARRAY                 MapType = 4
+	BPF_MAP_TYPE_PERCPU_HASH                      MapType = 5
+	BPF_MAP_TYPE_PERCPU_ARRAY                     MapType = 6
+	BPF_MAP_TYPE_STACK_TRACE                      MapType = 7
+	BPF_MAP_TYPE_CGROUP_ARRAY                     MapType = 8
+	BPF_MAP_TYPE_LRU_HASH                         MapType = 9
+	BPF_MAP_TYPE_LRU_PERCPU_HASH                  MapType = 10
+	BPF_MAP_TYPE_LPM_TRIE                         MapType = 11
+	BPF_MAP_TYPE_ARRAY_OF_MAPS                    MapType = 12
+	BPF_MAP_TYPE_HASH_OF_MAPS                     MapType = 13
+	BPF_MAP_TYPE_DEVMAP                           MapType = 14
+	BPF_MAP_TYPE_SOCKMAP                          MapType = 15
+	BPF_MAP_TYPE_CPUMAP                           MapType = 16
+	BPF_MAP_TYPE_XSKMAP                           MapType = 17
+	BPF_MAP_TYPE_SOCKHASH                         MapType = 18
+	BPF_MAP_TYPE_CGROUP_STORAGE_DEPRECATED        MapType = 19
+	BPF_MAP_TYPE_CGROUP_STORAGE                   MapType = 19
+	BPF_MAP_TYPE_REUSEPORT_SOCKARRAY              MapType = 20
+	BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE_DEPRECATED MapType = 21
+	BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE            MapType = 21
+	BPF_MAP_TYPE_QUEUE                            MapType = 22
+	BPF_MAP_TYPE_STACK                            MapType = 23
+	BPF_MAP_TYPE_SK_STORAGE                       MapType = 24
+	BPF_MAP_TYPE_DEVMAP_HASH                      MapType = 25
+	BPF_MAP_TYPE_STRUCT_OPS                       MapType = 26
+	BPF_MAP_TYPE_RINGBUF                          MapType = 27
+	BPF_MAP_TYPE_INODE_STORAGE                    MapType = 28
+	BPF_MAP_TYPE_TASK_STORAGE                     MapType = 29
+	BPF_MAP_TYPE_BLOOM_FILTER                     MapType = 30
+	BPF_MAP_TYPE_USER_RINGBUF                     MapType = 31
+	BPF_MAP_TYPE_CGRP_STORAGE                     MapType = 32
 )
 
 type ProgType uint32
@@ -462,6 +471,15 @@ const (
 	BPF_STATS_RUN_TIME StatsType = 0
 )
 
+type TcxActionBase int32
+
+const (
+	TCX_NEXT     TcxActionBase = -1
+	TCX_PASS     TcxActionBase = 0
+	TCX_DROP     TcxActionBase = 2
+	TCX_REDIRECT TcxActionBase = 7
+)
+
 type XdpAction uint32
 
 const (
@@ -498,7 +516,7 @@ type LinkInfo struct {
 	Id     LinkID
 	ProgId uint32
 	_      [4]byte
-	Extra  [32]uint8
+	Extra  [40]uint8
 }
 
 type MapInfo struct {
@@ -719,6 +737,25 @@ func LinkCreatePerfEvent(attr *LinkCreatePerfEventAttr) (*FD, error) {
 	return NewFD(int(fd))
 }
 
+type LinkCreateTcxAttr struct {
+	ProgFd           uint32
+	TargetIfindex    uint32
+	AttachType       AttachType
+	Flags            uint32
+	RelativeFdOrId   uint32
+	_                [4]byte
+	ExpectedRevision uint64
+	_                [32]byte
+}
+
+func LinkCreateTcx(attr *LinkCreateTcxAttr) (*FD, error) {
+	fd, err := BPF(BPF_LINK_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+	if err != nil {
+		return nil, err
+	}
+	return NewFD(int(fd))
+}
+
 type LinkCreateTracingAttr struct {
 	ProgFd      uint32
 	TargetFd    uint32
@@ -971,13 +1008,13 @@ func ObjPin(attr *ObjPinAttr) error {
 }
 
 type ProgAttachAttr struct {
-	TargetFd         uint32
-	AttachBpfFd      uint32
-	AttachType       uint32
-	AttachFlags      uint32
-	ReplaceBpfFd     uint32
-	RelativeFd       uint32
-	ExpectedRevision uint64
+	TargetFdOrIfindex uint32
+	AttachBpfFd       uint32
+	AttachType        uint32
+	AttachFlags       uint32
+	ReplaceBpfFd      uint32
+	RelativeFdOrId    uint32
+	ExpectedRevision  uint64
 }
 
 func ProgAttach(attr *ProgAttachAttr) error {
@@ -997,9 +1034,13 @@ func ProgBindMap(attr *ProgBindMapAttr) error {
 }
 
 type ProgDetachAttr struct {
-	TargetFd    uint32
-	AttachBpfFd uint32
-	AttachType  uint32
+	TargetFdOrIfindex uint32
+	AttachBpfFd       uint32
+	AttachType        uint32
+	AttachFlags       uint32
+	_                 [4]byte
+	RelativeFdOrId    uint32
+	ExpectedRevision  uint64
 }
 
 func ProgDetach(attr *ProgDetachAttr) error {
@@ -1065,17 +1106,17 @@ func ProgLoad(attr *ProgLoadAttr) (*FD, error) {
 }
 
 type ProgQueryAttr struct {
-	TargetFd        uint32
-	AttachType      AttachType
-	QueryFlags      uint32
-	AttachFlags     uint32
-	ProgIds         Pointer
-	ProgCount       uint32
-	_               [4]byte
-	ProgAttachFlags Pointer
-	LinkIds         Pointer
-	LinkAttachFlags Pointer
-	Revision        uint64
+	TargetFdOrIfindex uint32
+	AttachType        AttachType
+	QueryFlags        uint32
+	AttachFlags       uint32
+	ProgIds           Pointer
+	Count             uint32
+	_                 [4]byte
+	ProgAttachFlags   Pointer
+	LinkIds           Pointer
+	LinkAttachFlags   Pointer
+	Revision          uint64
 }
 
 func ProgQuery(attr *ProgQueryAttr) error {
@@ -1143,6 +1184,11 @@ type RawTracepointLinkInfo struct {
 	_         [4]byte
 }
 
+type TcxLinkInfo struct {
+	Ifindex    uint32
+	AttachType AttachType
+}
+
 type TracingLinkInfo struct {
 	AttachType  AttachType
 	TargetObjId uint32
diff --git a/vendor/github.com/cilium/ebpf/internal/sysenc/buffer.go b/vendor/github.com/cilium/ebpf/internal/sysenc/buffer.go
index c6959d9cc973f0ec47320ba784dcc9719fc15e4f..d184ea196aebc1db4ad4644a86df96c41ba45105 100644
--- a/vendor/github.com/cilium/ebpf/internal/sysenc/buffer.go
+++ b/vendor/github.com/cilium/ebpf/internal/sysenc/buffer.go
@@ -32,6 +32,7 @@ func UnsafeBuffer(ptr unsafe.Pointer) Buffer {
 
 // SyscallOutput prepares a Buffer for a syscall to write into.
 //
+// size is the length of the desired buffer in bytes.
 // The buffer may point at the underlying memory of dst, in which case [Unmarshal]
 // becomes a no-op.
 //
@@ -53,6 +54,11 @@ func (b Buffer) CopyTo(dst []byte) int {
 	return copy(dst, b.unsafeBytes())
 }
 
+// AppendTo appends the buffer onto dst.
+func (b Buffer) AppendTo(dst []byte) []byte {
+	return append(dst, b.unsafeBytes()...)
+}
+
 // Pointer returns the location where a syscall should write.
 func (b Buffer) Pointer() sys.Pointer {
 	// NB: This deliberately ignores b.length to support zero-copy
diff --git a/vendor/github.com/cilium/ebpf/internal/unix/types_linux.go b/vendor/github.com/cilium/ebpf/internal/unix/types_linux.go
index 51ed7d0597ee21a13f0ee46210eeb49271dd7ab2..bc63724018b9593b37e359a14bea1f6a604e67ab 100644
--- a/vendor/github.com/cilium/ebpf/internal/unix/types_linux.go
+++ b/vendor/github.com/cilium/ebpf/internal/unix/types_linux.go
@@ -25,6 +25,7 @@ const (
 	EACCES     = linux.EACCES
 	EILSEQ     = linux.EILSEQ
 	EOPNOTSUPP = linux.EOPNOTSUPP
+	ESTALE     = linux.ESTALE
 )
 
 const (
diff --git a/vendor/github.com/cilium/ebpf/internal/unix/types_other.go b/vendor/github.com/cilium/ebpf/internal/unix/types_other.go
index 1760e9e796b8ca01f6130dd93158e2d44fc68758..3a0f79cd3c507ff27f8929b5635c5b2c68fc594a 100644
--- a/vendor/github.com/cilium/ebpf/internal/unix/types_other.go
+++ b/vendor/github.com/cilium/ebpf/internal/unix/types_other.go
@@ -27,6 +27,7 @@ const (
 	EACCES
 	EILSEQ
 	EOPNOTSUPP
+	ESTALE
 )
 
 // Constants are distinct to avoid breaking switch statements.
diff --git a/vendor/github.com/cilium/ebpf/link/anchor.go b/vendor/github.com/cilium/ebpf/link/anchor.go
new file mode 100644
index 0000000000000000000000000000000000000000..1a3b5f7681fc8b00f65791632c70be837b1b2288
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/link/anchor.go
@@ -0,0 +1,137 @@
+package link
+
+import (
+	"fmt"
+
+	"github.com/cilium/ebpf"
+	"github.com/cilium/ebpf/internal/sys"
+)
+
+const anchorFlags = sys.BPF_F_REPLACE |
+	sys.BPF_F_BEFORE |
+	sys.BPF_F_AFTER |
+	sys.BPF_F_ID |
+	sys.BPF_F_LINK_MPROG
+
+// Anchor is a reference to a link or program.
+//
+// It is used to describe where an attachment or detachment should take place
+// for link types which support multiple attachment.
+type Anchor interface {
+	// anchor returns an fd or ID and a set of flags.
+	//
+	// By default fdOrID is taken to reference a program, but BPF_F_LINK_MPROG
+	// changes this to refer to a link instead.
+	//
+	// BPF_F_BEFORE, BPF_F_AFTER, BPF_F_REPLACE modify where a link or program
+	// is attached. The default behaviour if none of these flags is specified
+	// matches BPF_F_AFTER.
+	anchor() (fdOrID, flags uint32, _ error)
+}
+
+type firstAnchor struct{}
+
+func (firstAnchor) anchor() (fdOrID, flags uint32, _ error) {
+	return 0, sys.BPF_F_BEFORE, nil
+}
+
+// Head is the position before all other programs or links.
+func Head() Anchor {
+	return firstAnchor{}
+}
+
+type lastAnchor struct{}
+
+func (lastAnchor) anchor() (fdOrID, flags uint32, _ error) {
+	return 0, sys.BPF_F_AFTER, nil
+}
+
+// Tail is the position after all other programs or links.
+func Tail() Anchor {
+	return lastAnchor{}
+}
+
+// Before is the position just in front of target.
+func BeforeLink(target Link) Anchor {
+	return anchor{target, sys.BPF_F_BEFORE}
+}
+
+// After is the position just after target.
+func AfterLink(target Link) Anchor {
+	return anchor{target, sys.BPF_F_AFTER}
+}
+
+// Before is the position just in front of target.
+func BeforeLinkByID(target ID) Anchor {
+	return anchor{target, sys.BPF_F_BEFORE}
+}
+
+// After is the position just after target.
+func AfterLinkByID(target ID) Anchor {
+	return anchor{target, sys.BPF_F_AFTER}
+}
+
+// Before is the position just in front of target.
+func BeforeProgram(target *ebpf.Program) Anchor {
+	return anchor{target, sys.BPF_F_BEFORE}
+}
+
+// After is the position just after target.
+func AfterProgram(target *ebpf.Program) Anchor {
+	return anchor{target, sys.BPF_F_AFTER}
+}
+
+// Replace the target itself.
+func ReplaceProgram(target *ebpf.Program) Anchor {
+	return anchor{target, sys.BPF_F_REPLACE}
+}
+
+// Before is the position just in front of target.
+func BeforeProgramByID(target ebpf.ProgramID) Anchor {
+	return anchor{target, sys.BPF_F_BEFORE}
+}
+
+// After is the position just after target.
+func AfterProgramByID(target ebpf.ProgramID) Anchor {
+	return anchor{target, sys.BPF_F_AFTER}
+}
+
+// Replace the target itself.
+func ReplaceProgramByID(target ebpf.ProgramID) Anchor {
+	return anchor{target, sys.BPF_F_REPLACE}
+}
+
+type anchor struct {
+	target   any
+	position uint32
+}
+
+func (ap anchor) anchor() (fdOrID, flags uint32, _ error) {
+	var typeFlag uint32
+	switch target := ap.target.(type) {
+	case *ebpf.Program:
+		fd := target.FD()
+		if fd < 0 {
+			return 0, 0, sys.ErrClosedFd
+		}
+		fdOrID = uint32(fd)
+		typeFlag = 0
+	case ebpf.ProgramID:
+		fdOrID = uint32(target)
+		typeFlag = sys.BPF_F_ID
+	case interface{ FD() int }:
+		fd := target.FD()
+		if fd < 0 {
+			return 0, 0, sys.ErrClosedFd
+		}
+		fdOrID = uint32(fd)
+		typeFlag = sys.BPF_F_LINK_MPROG
+	case ID:
+		fdOrID = uint32(target)
+		typeFlag = sys.BPF_F_LINK_MPROG | sys.BPF_F_ID
+	default:
+		return 0, 0, fmt.Errorf("invalid target %T", ap.target)
+	}
+
+	return fdOrID, ap.position | typeFlag, nil
+}
diff --git a/vendor/github.com/cilium/ebpf/link/cgroup.go b/vendor/github.com/cilium/ebpf/link/cgroup.go
index 58e85fe9d47312d98e2b1fb3af1612b23bb2c313..79f3d2b7f4c975ef4e03011f4caeb4f6b4a47d2a 100644
--- a/vendor/github.com/cilium/ebpf/link/cgroup.go
+++ b/vendor/github.com/cilium/ebpf/link/cgroup.go
@@ -143,8 +143,7 @@ func (cg *progAttachCgroup) Update(prog *ebpf.Program) error {
 		// Atomically replacing multiple programs requires at least
 		// 5.5 (commit 7dd68b3279f17921 "bpf: Support replacing cgroup-bpf
 		// program in MULTI mode")
-		args.Flags |= uint32(flagReplace)
-		args.Replace = cg.current
+		args.Anchor = ReplaceProgram(cg.current)
 	}
 
 	if err := RawAttachProgram(args); err != nil {
diff --git a/vendor/github.com/cilium/ebpf/link/link.go b/vendor/github.com/cilium/ebpf/link/link.go
index 36acd6ee4b95b4197d7fed387c0fcc6f22dfd673..2025b7d4349ac4c373a51554350e7adcc5676404 100644
--- a/vendor/github.com/cilium/ebpf/link/link.go
+++ b/vendor/github.com/cilium/ebpf/link/link.go
@@ -98,6 +98,8 @@ func wrapRawLink(raw *RawLink) (_ Link, err error) {
 		return &kprobeMultiLink{*raw}, nil
 	case PerfEventType:
 		return nil, fmt.Errorf("recovering perf event fd: %w", ErrNotSupported)
+	case TCXType:
+		return &tcxLink{*raw}, nil
 	default:
 		return raw, nil
 	}
@@ -132,6 +134,7 @@ type TracingInfo sys.TracingLinkInfo
 type CgroupInfo sys.CgroupLinkInfo
 type NetNsInfo sys.NetNsLinkInfo
 type XDPInfo sys.XDPLinkInfo
+type TCXInfo sys.TcxLinkInfo
 
 // Tracing returns tracing type-specific link info.
 //
@@ -157,7 +160,7 @@ func (r Info) NetNs() *NetNsInfo {
 	return e
 }
 
-// ExtraNetNs returns XDP type-specific link info.
+// XDP returns XDP type-specific link info.
 //
 // Returns nil if the type-specific link info isn't available.
 func (r Info) XDP() *XDPInfo {
@@ -165,6 +168,14 @@ func (r Info) XDP() *XDPInfo {
 	return e
 }
 
+// TCX returns TCX type-specific link info.
+//
+// Returns nil if the type-specific link info isn't available.
+func (r Info) TCX() *TCXInfo {
+	e, _ := r.extra.(*TCXInfo)
+	return e
+}
+
 // RawLink is the low-level API to bpf_link.
 //
 // You should consider using the higher level interfaces in this
@@ -315,6 +326,8 @@ func (l *RawLink) Info() (*Info, error) {
 	case RawTracepointType, IterType,
 		PerfEventType, KprobeMultiType:
 		// Extra metadata not supported.
+	case TCXType:
+		extra = &TCXInfo{}
 	default:
 		return nil, fmt.Errorf("unknown link info type: %d", info.Type)
 	}
diff --git a/vendor/github.com/cilium/ebpf/link/program.go b/vendor/github.com/cilium/ebpf/link/program.go
index 053735a67737dcb41eff6840c2e721dbbf209cf4..d8a2a15f93792675c425f23d3c8fe543d8c81fcd 100644
--- a/vendor/github.com/cilium/ebpf/link/program.go
+++ b/vendor/github.com/cilium/ebpf/link/program.go
@@ -2,22 +2,27 @@ package link
 
 import (
 	"fmt"
+	"runtime"
 
 	"github.com/cilium/ebpf"
 	"github.com/cilium/ebpf/internal/sys"
 )
 
 type RawAttachProgramOptions struct {
-	// File descriptor to attach to. This differs for each attach type.
+	// Target to query. This is usually a file descriptor but may refer to
+	// something else based on the attach type.
 	Target int
 	// Program to attach.
 	Program *ebpf.Program
-	// Program to replace (cgroups).
-	Replace *ebpf.Program
-	// Attach must match the attach type of Program (and Replace).
+	// Attach must match the attach type of Program.
 	Attach ebpf.AttachType
-	// Flags control the attach behaviour. This differs for each attach type.
+	// Attach relative to an anchor. Optional.
+	Anchor Anchor
+	// Flags control the attach behaviour. Specify an Anchor instead of
+	// F_LINK, F_ID, F_BEFORE, F_AFTER and F_REPLACE. Optional.
 	Flags uint32
+	// Only attach if the internal revision matches the given value.
+	ExpectedRevision uint64
 }
 
 // RawAttachProgram is a low level wrapper around BPF_PROG_ATTACH.
@@ -25,45 +30,72 @@ type RawAttachProgramOptions struct {
 // You should use one of the higher level abstractions available in this
 // package if possible.
 func RawAttachProgram(opts RawAttachProgramOptions) error {
-	var replaceFd uint32
-	if opts.Replace != nil {
-		replaceFd = uint32(opts.Replace.FD())
+	if opts.Flags&anchorFlags != 0 {
+		return fmt.Errorf("disallowed flags: use Anchor to specify attach target")
 	}
 
 	attr := sys.ProgAttachAttr{
-		TargetFd:     uint32(opts.Target),
-		AttachBpfFd:  uint32(opts.Program.FD()),
-		ReplaceBpfFd: replaceFd,
-		AttachType:   uint32(opts.Attach),
-		AttachFlags:  uint32(opts.Flags),
+		TargetFdOrIfindex: uint32(opts.Target),
+		AttachBpfFd:       uint32(opts.Program.FD()),
+		AttachType:        uint32(opts.Attach),
+		AttachFlags:       uint32(opts.Flags),
+		ExpectedRevision:  opts.ExpectedRevision,
+	}
+
+	if opts.Anchor != nil {
+		fdOrID, flags, err := opts.Anchor.anchor()
+		if err != nil {
+			return fmt.Errorf("attach program: %w", err)
+		}
+
+		if flags == sys.BPF_F_REPLACE {
+			// Ensure that replacing a program works on old kernels.
+			attr.ReplaceBpfFd = fdOrID
+		} else {
+			attr.RelativeFdOrId = fdOrID
+			attr.AttachFlags |= flags
+		}
 	}
 
 	if err := sys.ProgAttach(&attr); err != nil {
 		if haveFeatErr := haveProgAttach(); haveFeatErr != nil {
 			return haveFeatErr
 		}
-		return fmt.Errorf("can't attach program: %w", err)
+		return fmt.Errorf("attach program: %w", err)
 	}
+	runtime.KeepAlive(opts.Program)
 
 	return nil
 }
 
-type RawDetachProgramOptions struct {
-	Target  int
-	Program *ebpf.Program
-	Attach  ebpf.AttachType
-}
+type RawDetachProgramOptions RawAttachProgramOptions
 
 // RawDetachProgram is a low level wrapper around BPF_PROG_DETACH.
 //
 // You should use one of the higher level abstractions available in this
 // package if possible.
 func RawDetachProgram(opts RawDetachProgramOptions) error {
+	if opts.Flags&anchorFlags != 0 {
+		return fmt.Errorf("disallowed flags: use Anchor to specify attach target")
+	}
+
 	attr := sys.ProgDetachAttr{
-		TargetFd:    uint32(opts.Target),
-		AttachBpfFd: uint32(opts.Program.FD()),
-		AttachType:  uint32(opts.Attach),
+		TargetFdOrIfindex: uint32(opts.Target),
+		AttachBpfFd:       uint32(opts.Program.FD()),
+		AttachType:        uint32(opts.Attach),
+		ExpectedRevision:  opts.ExpectedRevision,
 	}
+
+	if opts.Anchor != nil {
+		fdOrID, flags, err := opts.Anchor.anchor()
+		if err != nil {
+			return fmt.Errorf("detach program: %w", err)
+		}
+
+		attr.RelativeFdOrId = fdOrID
+		attr.AttachFlags |= flags
+	}
+
 	if err := sys.ProgDetach(&attr); err != nil {
 		if haveFeatErr := haveProgAttach(); haveFeatErr != nil {
 			return haveFeatErr
diff --git a/vendor/github.com/cilium/ebpf/link/query.go b/vendor/github.com/cilium/ebpf/link/query.go
index c05656512d55963b868d5db0997a378387606e0b..fe534f8efadbc0aecf6f21ae993eab9a02d28bac 100644
--- a/vendor/github.com/cilium/ebpf/link/query.go
+++ b/vendor/github.com/cilium/ebpf/link/query.go
@@ -2,7 +2,6 @@ package link
 
 import (
 	"fmt"
-	"os"
 	"unsafe"
 
 	"github.com/cilium/ebpf"
@@ -11,53 +10,102 @@ import (
 
 // QueryOptions defines additional parameters when querying for programs.
 type QueryOptions struct {
-	// Path can be a path to a cgroup, netns or LIRC2 device
-	Path string
+	// Target to query. This is usually a file descriptor but may refer to
+	// something else based on the attach type.
+	Target int
 	// Attach specifies the AttachType of the programs queried for
 	Attach ebpf.AttachType
 	// QueryFlags are flags for BPF_PROG_QUERY, e.g. BPF_F_QUERY_EFFECTIVE
 	QueryFlags uint32
 }
 
-// QueryPrograms retrieves ProgramIDs associated with the AttachType.
-//
-// Returns (nil, nil) if there are no programs attached to the queried kernel
-// resource. Calling QueryPrograms on a kernel missing PROG_QUERY will result in
-// ErrNotSupported.
-func QueryPrograms(opts QueryOptions) ([]ebpf.ProgramID, error) {
-	if haveProgQuery() != nil {
-		return nil, fmt.Errorf("can't query program IDs: %w", ErrNotSupported)
-	}
+// QueryResult describes which programs and links are active.
+type QueryResult struct {
+	// List of attached programs.
+	Programs []AttachedProgram
 
-	f, err := os.Open(opts.Path)
-	if err != nil {
-		return nil, fmt.Errorf("can't open file: %s", err)
-	}
-	defer f.Close()
+	// Incremented by one every time the set of attached programs changes.
+	// May be zero if not supported by the [ebpf.AttachType].
+	Revision uint64
+}
+
+// HaveLinkInfo returns true if the kernel supports querying link information
+// for a particular [ebpf.AttachType].
+func (qr *QueryResult) HaveLinkInfo() bool {
+	return qr.Revision > 0
+}
+
+type AttachedProgram struct {
+	ID     ebpf.ProgramID
+	linkID ID
+}
+
+// LinkID returns the ID associated with the program.
+//
+// Returns 0, false if the kernel doesn't support retrieving the ID or if the
+// program wasn't attached via a link. See [QueryResult.HaveLinkInfo] if you
+// need to tell the two apart.
+func (ap *AttachedProgram) LinkID() (ID, bool) {
+	return ap.linkID, ap.linkID != 0
+}
 
+// QueryPrograms retrieves a list of programs for the given AttachType.
+//
+// Returns a slice of attached programs, which may be empty.
+// revision counts how many times the set of attached programs has changed and
+// may be zero if not supported by the [ebpf.AttachType].
+// Returns ErrNotSupportd on a kernel without BPF_PROG_QUERY
+func QueryPrograms(opts QueryOptions) (*QueryResult, error) {
 	// query the number of programs to allocate correct slice size
 	attr := sys.ProgQueryAttr{
-		TargetFd:   uint32(f.Fd()),
-		AttachType: sys.AttachType(opts.Attach),
-		QueryFlags: opts.QueryFlags,
+		TargetFdOrIfindex: uint32(opts.Target),
+		AttachType:        sys.AttachType(opts.Attach),
+		QueryFlags:        opts.QueryFlags,
 	}
-	if err := sys.ProgQuery(&attr); err != nil {
-		return nil, fmt.Errorf("can't query program count: %w", err)
+	err := sys.ProgQuery(&attr)
+	if err != nil {
+		if haveFeatErr := haveProgQuery(); haveFeatErr != nil {
+			return nil, fmt.Errorf("query programs: %w", haveFeatErr)
+		}
+		return nil, fmt.Errorf("query programs: %w", err)
 	}
+	if attr.Count == 0 {
+		return &QueryResult{Revision: attr.Revision}, nil
+	}
+
+	// The minimum bpf_mprog revision is 1, so we can use the field to detect
+	// whether the attach type supports link ids.
+	haveLinkIDs := attr.Revision != 0
 
-	// return nil if no progs are attached
-	if attr.ProgCount == 0 {
-		return nil, nil
+	count := attr.Count
+	progIds := make([]ebpf.ProgramID, count)
+	attr = sys.ProgQueryAttr{
+		TargetFdOrIfindex: uint32(opts.Target),
+		AttachType:        sys.AttachType(opts.Attach),
+		QueryFlags:        opts.QueryFlags,
+		Count:             count,
+		ProgIds:           sys.NewPointer(unsafe.Pointer(&progIds[0])),
+	}
+
+	var linkIds []ID
+	if haveLinkIDs {
+		linkIds = make([]ID, count)
+		attr.LinkIds = sys.NewPointer(unsafe.Pointer(&linkIds[0]))
 	}
 
-	// we have at least one prog, so we query again
-	progIds := make([]ebpf.ProgramID, attr.ProgCount)
-	attr.ProgIds = sys.NewPointer(unsafe.Pointer(&progIds[0]))
-	attr.ProgCount = uint32(len(progIds))
 	if err := sys.ProgQuery(&attr); err != nil {
-		return nil, fmt.Errorf("can't query program IDs: %w", err)
+		return nil, fmt.Errorf("query programs: %w", err)
 	}
 
-	return progIds, nil
+	// NB: attr.Count might have changed between the two syscalls.
+	var programs []AttachedProgram
+	for i, id := range progIds[:attr.Count] {
+		ap := AttachedProgram{ID: id}
+		if haveLinkIDs {
+			ap.linkID = linkIds[i]
+		}
+		programs = append(programs, ap)
+	}
 
+	return &QueryResult{programs, attr.Revision}, nil
 }
diff --git a/vendor/github.com/cilium/ebpf/link/syscalls.go b/vendor/github.com/cilium/ebpf/link/syscalls.go
index 012970ec78ed9e044c48d6237b868aeed11bd86a..02460d9fbd6c8d6a2e80cd9a23178f0edfd81e58 100644
--- a/vendor/github.com/cilium/ebpf/link/syscalls.go
+++ b/vendor/github.com/cilium/ebpf/link/syscalls.go
@@ -24,6 +24,7 @@ const (
 	XDPType           = sys.BPF_LINK_TYPE_XDP
 	PerfEventType     = sys.BPF_LINK_TYPE_PERF_EVENT
 	KprobeMultiType   = sys.BPF_LINK_TYPE_KPROBE_MULTI
+	TCXType           = sys.BPF_LINK_TYPE_TCX
 )
 
 var haveProgAttach = internal.NewFeatureTest("BPF_PROG_ATTACH", "4.10", func() error {
@@ -72,10 +73,10 @@ var haveProgAttachReplace = internal.NewFeatureTest("BPF_PROG_ATTACH atomic repl
 	// present.
 	attr := sys.ProgAttachAttr{
 		// We rely on this being checked after attachFlags.
-		TargetFd:    ^uint32(0),
-		AttachBpfFd: uint32(prog.FD()),
-		AttachType:  uint32(ebpf.AttachCGroupInetIngress),
-		AttachFlags: uint32(flagReplace),
+		TargetFdOrIfindex: ^uint32(0),
+		AttachBpfFd:       uint32(prog.FD()),
+		AttachType:        uint32(ebpf.AttachCGroupInetIngress),
+		AttachFlags:       uint32(flagReplace),
 	}
 
 	err = sys.ProgAttach(&attr)
@@ -110,8 +111,8 @@ var haveProgQuery = internal.NewFeatureTest("BPF_PROG_QUERY", "4.15", func() err
 		// We rely on this being checked during the syscall.
 		// With an otherwise correct payload we expect EBADF here
 		// as an indication that the feature is present.
-		TargetFd:   ^uint32(0),
-		AttachType: sys.AttachType(ebpf.AttachCGroupInetIngress),
+		TargetFdOrIfindex: ^uint32(0),
+		AttachType:        sys.AttachType(ebpf.AttachCGroupInetIngress),
 	}
 
 	err := sys.ProgQuery(&attr)
@@ -124,3 +125,38 @@ var haveProgQuery = internal.NewFeatureTest("BPF_PROG_QUERY", "4.15", func() err
 	}
 	return errors.New("syscall succeeded unexpectedly")
 })
+
+var haveTCX = internal.NewFeatureTest("tcx", "6.6", func() error {
+	prog, err := ebpf.NewProgram(&ebpf.ProgramSpec{
+		Type:    ebpf.SchedCLS,
+		License: "MIT",
+		Instructions: asm.Instructions{
+			asm.Mov.Imm(asm.R0, 0),
+			asm.Return(),
+		},
+	})
+
+	if err != nil {
+		return internal.ErrNotSupported
+	}
+
+	defer prog.Close()
+	attr := sys.LinkCreateTcxAttr{
+		// We rely on this being checked during the syscall.
+		// With an otherwise correct payload we expect ENODEV here
+		// as an indication that the feature is present.
+		TargetIfindex: ^uint32(0),
+		ProgFd:        uint32(prog.FD()),
+		AttachType:    sys.AttachType(ebpf.AttachTCXIngress),
+	}
+
+	_, err = sys.LinkCreateTcx(&attr)
+
+	if errors.Is(err, unix.ENODEV) {
+		return nil
+	}
+	if err != nil {
+		return ErrNotSupported
+	}
+	return errors.New("syscall succeeded unexpectedly")
+})
diff --git a/vendor/github.com/cilium/ebpf/link/tcx.go b/vendor/github.com/cilium/ebpf/link/tcx.go
new file mode 100644
index 0000000000000000000000000000000000000000..88f2237d29013ab416559100f80e3d47d264a0f7
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/link/tcx.go
@@ -0,0 +1,71 @@
+package link
+
+import (
+	"fmt"
+	"runtime"
+
+	"github.com/cilium/ebpf"
+	"github.com/cilium/ebpf/internal/sys"
+)
+
+type TCXOptions struct {
+	// Index of the interface to attach to.
+	Interface int
+	// Program to attach.
+	Program *ebpf.Program
+	// One of the AttachTCX* constants.
+	Attach ebpf.AttachType
+	// Attach relative to an anchor. Optional.
+	Anchor Anchor
+	// Only attach if the expected revision matches.
+	ExpectedRevision uint64
+	// Flags control the attach behaviour. Specify an Anchor instead of
+	// F_LINK, F_ID, F_BEFORE, F_AFTER and R_REPLACE. Optional.
+	Flags uint32
+}
+
+func AttachTCX(opts TCXOptions) (Link, error) {
+	if opts.Interface < 0 {
+		return nil, fmt.Errorf("interface %d is out of bounds", opts.Interface)
+	}
+
+	if opts.Flags&anchorFlags != 0 {
+		return nil, fmt.Errorf("disallowed flags: use Anchor to specify attach target")
+	}
+
+	attr := sys.LinkCreateTcxAttr{
+		ProgFd:           uint32(opts.Program.FD()),
+		AttachType:       sys.AttachType(opts.Attach),
+		TargetIfindex:    uint32(opts.Interface),
+		ExpectedRevision: opts.ExpectedRevision,
+		Flags:            opts.Flags,
+	}
+
+	if opts.Anchor != nil {
+		fdOrID, flags, err := opts.Anchor.anchor()
+		if err != nil {
+			return nil, fmt.Errorf("attach tcx link: %w", err)
+		}
+
+		attr.RelativeFdOrId = fdOrID
+		attr.Flags |= flags
+	}
+
+	fd, err := sys.LinkCreateTcx(&attr)
+	runtime.KeepAlive(opts.Program)
+	runtime.KeepAlive(opts.Anchor)
+	if err != nil {
+		if haveFeatErr := haveTCX(); haveFeatErr != nil {
+			return nil, haveFeatErr
+		}
+		return nil, fmt.Errorf("attach tcx link: %w", err)
+	}
+
+	return &tcxLink{RawLink{fd, ""}}, nil
+}
+
+type tcxLink struct {
+	RawLink
+}
+
+var _ Link = (*tcxLink)(nil)
diff --git a/vendor/github.com/cilium/ebpf/linker.go b/vendor/github.com/cilium/ebpf/linker.go
index cf5b02ddf58e1c4f300401411d92e994c50a2fd4..ab21bea87e738d1f1000fd9a8c6a1659aed3ad9b 100644
--- a/vendor/github.com/cilium/ebpf/linker.go
+++ b/vendor/github.com/cilium/ebpf/linker.go
@@ -120,7 +120,7 @@ func hasFunctionReferences(insns asm.Instructions) bool {
 //
 // Passing a nil target will relocate against the running kernel. insns are
 // modified in place.
-func applyRelocations(insns asm.Instructions, target *btf.Spec, bo binary.ByteOrder) error {
+func applyRelocations(insns asm.Instructions, target *btf.Spec, bo binary.ByteOrder, b *btf.Builder) error {
 	var relos []*btf.CORERelocation
 	var reloInsns []*asm.Instruction
 	iter := insns.Iterate()
@@ -139,7 +139,7 @@ func applyRelocations(insns asm.Instructions, target *btf.Spec, bo binary.ByteOr
 		bo = internal.NativeEndian
 	}
 
-	fixups, err := btf.CORERelocate(relos, target, bo)
+	fixups, err := btf.CORERelocate(relos, target, bo, b.Add)
 	if err != nil {
 		return err
 	}
diff --git a/vendor/github.com/cilium/ebpf/map.go b/vendor/github.com/cilium/ebpf/map.go
index ce945ace0832ef6095bb1cf6e62909d10371f9c2..f350d8b11a476d227321737b00f9ad7f64beaef7 100644
--- a/vendor/github.com/cilium/ebpf/map.go
+++ b/vendor/github.com/cilium/ebpf/map.go
@@ -133,7 +133,7 @@ func (spec *MapSpec) fixupMagicFields() (*MapSpec, error) {
 		spec.KeySize = 4
 		spec.ValueSize = 4
 
-		n, err := internal.PossibleCPUs()
+		n, err := PossibleCPU()
 		if err != nil {
 			return nil, fmt.Errorf("fixup perf event array: %w", err)
 		}
@@ -515,7 +515,7 @@ func newMap(fd *sys.FD, name string, typ MapType, keySize, valueSize, maxEntries
 		return m, nil
 	}
 
-	possibleCPUs, err := internal.PossibleCPUs()
+	possibleCPUs, err := PossibleCPU()
 	if err != nil {
 		return nil, err
 	}
@@ -642,11 +642,15 @@ func (m *Map) LookupBytes(key interface{}) ([]byte, error) {
 }
 
 func (m *Map) lookupPerCPU(key, valueOut any, flags MapLookupFlags) error {
+	slice, err := ensurePerCPUSlice(valueOut, int(m.valueSize))
+	if err != nil {
+		return err
+	}
 	valueBytes := make([]byte, m.fullValueSize)
 	if err := m.lookup(key, sys.NewSlicePointer(valueBytes), flags); err != nil {
 		return err
 	}
-	return unmarshalPerCPUValue(valueOut, int(m.valueSize), valueBytes)
+	return unmarshalPerCPUValue(slice, int(m.valueSize), valueBytes)
 }
 
 func (m *Map) lookup(key interface{}, valueOut sys.Pointer, flags MapLookupFlags) error {
@@ -669,11 +673,53 @@ func (m *Map) lookup(key interface{}, valueOut sys.Pointer, flags MapLookupFlags
 }
 
 func (m *Map) lookupAndDeletePerCPU(key, valueOut any, flags MapLookupFlags) error {
+	slice, err := ensurePerCPUSlice(valueOut, int(m.valueSize))
+	if err != nil {
+		return err
+	}
 	valueBytes := make([]byte, m.fullValueSize)
 	if err := m.lookupAndDelete(key, sys.NewSlicePointer(valueBytes), flags); err != nil {
 		return err
 	}
-	return unmarshalPerCPUValue(valueOut, int(m.valueSize), valueBytes)
+	return unmarshalPerCPUValue(slice, int(m.valueSize), valueBytes)
+}
+
+// ensurePerCPUSlice allocates a slice for a per-CPU value if necessary.
+func ensurePerCPUSlice(sliceOrPtr any, elemLength int) (any, error) {
+	sliceOrPtrType := reflect.TypeOf(sliceOrPtr)
+	if sliceOrPtrType.Kind() == reflect.Slice {
+		// The target is a slice, the caller is responsible for ensuring that
+		// size is correct.
+		return sliceOrPtr, nil
+	}
+
+	slicePtrType := sliceOrPtrType
+	if slicePtrType.Kind() != reflect.Ptr || slicePtrType.Elem().Kind() != reflect.Slice {
+		return nil, fmt.Errorf("per-cpu value requires a slice or a pointer to slice")
+	}
+
+	possibleCPUs, err := PossibleCPU()
+	if err != nil {
+		return nil, err
+	}
+
+	sliceType := slicePtrType.Elem()
+	slice := reflect.MakeSlice(sliceType, possibleCPUs, possibleCPUs)
+
+	sliceElemType := sliceType.Elem()
+	sliceElemIsPointer := sliceElemType.Kind() == reflect.Ptr
+	reflect.ValueOf(sliceOrPtr).Elem().Set(slice)
+	if !sliceElemIsPointer {
+		return slice.Interface(), nil
+	}
+	sliceElemType = sliceElemType.Elem()
+
+	for i := 0; i < possibleCPUs; i++ {
+		newElem := reflect.New(sliceElemType)
+		slice.Index(i).Set(newElem)
+	}
+
+	return slice.Interface(), nil
 }
 
 func (m *Map) lookupAndDelete(key any, valuePtr sys.Pointer, flags MapLookupFlags) error {
@@ -917,14 +963,19 @@ func (m *Map) guessNonExistentKey() ([]byte, error) {
 //
 // "keysOut" and "valuesOut" must be of type slice, a pointer
 // to a slice or buffer will not work.
-// "prevKey" is the key to start the batch lookup from, it will
-// *not* be included in the results. Use nil to start at the first key.
+// "cursor" is an pointer to an opaque handle. It must be non-nil. Pass
+// "cursor" to subsequent calls of this function to continue the batching
+// operation in the case of chunking.
+//
+// Warning: This API is not very safe to use as the kernel implementation for
+// batching relies on the user to be aware of subtle details with regarding to
+// different map type implementations.
 //
 // ErrKeyNotExist is returned when the batch lookup has reached
 // the end of all possible results, even when partial results
 // are returned. It should be used to evaluate when lookup is "done".
-func (m *Map) BatchLookup(prevKey, nextKeyOut, keysOut, valuesOut interface{}, opts *BatchOptions) (int, error) {
-	return m.batchLookup(sys.BPF_MAP_LOOKUP_BATCH, prevKey, nextKeyOut, keysOut, valuesOut, opts)
+func (m *Map) BatchLookup(cursor *BatchCursor, keysOut, valuesOut interface{}, opts *BatchOptions) (int, error) {
+	return m.batchLookup(sys.BPF_MAP_LOOKUP_BATCH, cursor, keysOut, valuesOut, opts)
 }
 
 // BatchLookupAndDelete looks up many elements in a map at once,
@@ -932,47 +983,113 @@ func (m *Map) BatchLookup(prevKey, nextKeyOut, keysOut, valuesOut interface{}, o
 // It then deletes all those elements.
 // "keysOut" and "valuesOut" must be of type slice, a pointer
 // to a slice or buffer will not work.
-// "prevKey" is the key to start the batch lookup from, it will
-// *not* be included in the results. Use nil to start at the first key.
+// "cursor" is an pointer to an opaque handle. It must be non-nil. Pass
+// "cursor" to subsequent calls of this function to continue the batching
+// operation in the case of chunking.
+//
+// Warning: This API is not very safe to use as the kernel implementation for
+// batching relies on the user to be aware of subtle details with regarding to
+// different map type implementations.
 //
 // ErrKeyNotExist is returned when the batch lookup has reached
 // the end of all possible results, even when partial results
 // are returned. It should be used to evaluate when lookup is "done".
-func (m *Map) BatchLookupAndDelete(prevKey, nextKeyOut, keysOut, valuesOut interface{}, opts *BatchOptions) (int, error) {
-	return m.batchLookup(sys.BPF_MAP_LOOKUP_AND_DELETE_BATCH, prevKey, nextKeyOut, keysOut, valuesOut, opts)
+func (m *Map) BatchLookupAndDelete(cursor *BatchCursor, keysOut, valuesOut interface{}, opts *BatchOptions) (int, error) {
+	return m.batchLookup(sys.BPF_MAP_LOOKUP_AND_DELETE_BATCH, cursor, keysOut, valuesOut, opts)
 }
 
-func (m *Map) batchLookup(cmd sys.Cmd, startKey, nextKeyOut, keysOut, valuesOut interface{}, opts *BatchOptions) (int, error) {
-	if err := haveBatchAPI(); err != nil {
-		return 0, err
-	}
+// BatchCursor represents a starting point for a batch operation.
+type BatchCursor struct {
+	m      *Map
+	opaque []byte
+}
+
+func (m *Map) batchLookup(cmd sys.Cmd, cursor *BatchCursor, keysOut, valuesOut interface{}, opts *BatchOptions) (int, error) {
 	if m.typ.hasPerCPUValue() {
-		return 0, ErrNotSupported
+		return m.batchLookupPerCPU(cmd, cursor, keysOut, valuesOut, opts)
 	}
-	keysValue := reflect.ValueOf(keysOut)
-	if keysValue.Kind() != reflect.Slice {
-		return 0, fmt.Errorf("keys must be a slice")
+
+	count, err := batchCount(keysOut, valuesOut)
+	if err != nil {
+		return 0, err
 	}
-	valuesValue := reflect.ValueOf(valuesOut)
-	if valuesValue.Kind() != reflect.Slice {
-		return 0, fmt.Errorf("valuesOut must be a slice")
+
+	valueBuf := sysenc.SyscallOutput(valuesOut, count*int(m.fullValueSize))
+
+	n, err := m.batchLookupCmd(cmd, cursor, count, keysOut, valueBuf.Pointer(), opts)
+	if err != nil {
+		return n, err
 	}
-	count := keysValue.Len()
-	if count != valuesValue.Len() {
-		return 0, fmt.Errorf("keysOut and valuesOut must be the same length")
+
+	err = valueBuf.Unmarshal(valuesOut)
+	if err != nil {
+		return 0, err
 	}
-	keyBuf := make([]byte, count*int(m.keySize))
-	keyPtr := sys.NewSlicePointer(keyBuf)
+
+	return n, nil
+}
+
+func (m *Map) batchLookupPerCPU(cmd sys.Cmd, cursor *BatchCursor, keysOut, valuesOut interface{}, opts *BatchOptions) (int, error) {
+	count, err := sliceLen(keysOut)
+	if err != nil {
+		return 0, fmt.Errorf("keys: %w", err)
+	}
+
 	valueBuf := make([]byte, count*int(m.fullValueSize))
 	valuePtr := sys.NewSlicePointer(valueBuf)
-	nextBuf := makeMapSyscallOutput(nextKeyOut, int(m.keySize))
+
+	n, sysErr := m.batchLookupCmd(cmd, cursor, count, keysOut, valuePtr, opts)
+	if sysErr != nil && !errors.Is(sysErr, unix.ENOENT) {
+		return 0, err
+	}
+
+	err = unmarshalBatchPerCPUValue(valuesOut, count, int(m.valueSize), valueBuf)
+	if err != nil {
+		return 0, err
+	}
+
+	return n, sysErr
+}
+
+func (m *Map) batchLookupCmd(cmd sys.Cmd, cursor *BatchCursor, count int, keysOut any, valuePtr sys.Pointer, opts *BatchOptions) (int, error) {
+	cursorLen := int(m.keySize)
+	if cursorLen < 4 {
+		// * generic_map_lookup_batch requires that batch_out is key_size bytes.
+		//   This is used by array and LPM maps.
+		//
+		// * __htab_map_lookup_and_delete_batch requires u32. This is used by the
+		//   various hash maps.
+		//
+		// Use a minimum of 4 bytes to avoid having to distinguish between the two.
+		cursorLen = 4
+	}
+
+	inBatch := cursor.opaque
+	if inBatch == nil {
+		// This is the first lookup, allocate a buffer to hold the cursor.
+		cursor.opaque = make([]byte, cursorLen)
+		cursor.m = m
+	} else if cursor.m != m {
+		// Prevent reuse of a cursor across maps. First, it's unlikely to work.
+		// Second, the maps may require different cursorLen and cursor.opaque
+		// may therefore be too short. This could lead to the kernel clobbering
+		// user space memory.
+		return 0, errors.New("a cursor may not be reused across maps")
+	}
+
+	if err := haveBatchAPI(); err != nil {
+		return 0, err
+	}
+
+	keyBuf := sysenc.SyscallOutput(keysOut, count*int(m.keySize))
 
 	attr := sys.MapLookupBatchAttr{
 		MapFd:    m.fd.Uint(),
-		Keys:     keyPtr,
+		Keys:     keyBuf.Pointer(),
 		Values:   valuePtr,
 		Count:    uint32(count),
-		OutBatch: nextBuf.Pointer(),
+		InBatch:  sys.NewSlicePointer(inBatch),
+		OutBatch: sys.NewSlicePointer(cursor.opaque),
 	}
 
 	if opts != nil {
@@ -980,30 +1097,13 @@ func (m *Map) batchLookup(cmd sys.Cmd, startKey, nextKeyOut, keysOut, valuesOut
 		attr.Flags = opts.Flags
 	}
 
-	var err error
-	if startKey != nil {
-		attr.InBatch, err = marshalMapSyscallInput(startKey, int(m.keySize))
-		if err != nil {
-			return 0, err
-		}
-	}
-
 	_, sysErr := sys.BPF(cmd, unsafe.Pointer(&attr), unsafe.Sizeof(attr))
 	sysErr = wrapMapError(sysErr)
 	if sysErr != nil && !errors.Is(sysErr, unix.ENOENT) {
 		return 0, sysErr
 	}
 
-	err = nextBuf.Unmarshal(nextKeyOut)
-	if err != nil {
-		return 0, err
-	}
-	err = sysenc.Unmarshal(keysOut, keyBuf)
-	if err != nil {
-		return 0, err
-	}
-	err = sysenc.Unmarshal(valuesOut, valueBuf)
-	if err != nil {
+	if err := keyBuf.Unmarshal(keysOut); err != nil {
 		return 0, err
 	}
 
@@ -1016,29 +1116,24 @@ func (m *Map) batchLookup(cmd sys.Cmd, startKey, nextKeyOut, keysOut, valuesOut
 // to a slice or buffer will not work.
 func (m *Map) BatchUpdate(keys, values interface{}, opts *BatchOptions) (int, error) {
 	if m.typ.hasPerCPUValue() {
-		return 0, ErrNotSupported
+		return m.batchUpdatePerCPU(keys, values, opts)
 	}
-	keysValue := reflect.ValueOf(keys)
-	if keysValue.Kind() != reflect.Slice {
-		return 0, fmt.Errorf("keys must be a slice")
-	}
-	valuesValue := reflect.ValueOf(values)
-	if valuesValue.Kind() != reflect.Slice {
-		return 0, fmt.Errorf("values must be a slice")
-	}
-	var (
-		count    = keysValue.Len()
-		valuePtr sys.Pointer
-		err      error
-	)
-	if count != valuesValue.Len() {
-		return 0, fmt.Errorf("keys and values must be the same length")
+
+	count, err := batchCount(keys, values)
+	if err != nil {
+		return 0, err
 	}
-	keyPtr, err := marshalMapSyscallInput(keys, count*int(m.keySize))
+
+	valuePtr, err := marshalMapSyscallInput(values, count*int(m.valueSize))
 	if err != nil {
 		return 0, err
 	}
-	valuePtr, err = marshalMapSyscallInput(values, count*int(m.valueSize))
+
+	return m.batchUpdate(count, keys, valuePtr, opts)
+}
+
+func (m *Map) batchUpdate(count int, keys any, valuePtr sys.Pointer, opts *BatchOptions) (int, error) {
+	keyPtr, err := marshalMapSyscallInput(keys, count*int(m.keySize))
 	if err != nil {
 		return 0, err
 	}
@@ -1065,17 +1160,28 @@ func (m *Map) BatchUpdate(keys, values interface{}, opts *BatchOptions) (int, er
 	return int(attr.Count), nil
 }
 
+func (m *Map) batchUpdatePerCPU(keys, values any, opts *BatchOptions) (int, error) {
+	count, err := sliceLen(keys)
+	if err != nil {
+		return 0, fmt.Errorf("keys: %w", err)
+	}
+
+	valueBuf, err := marshalBatchPerCPUValue(values, count, int(m.valueSize))
+	if err != nil {
+		return 0, err
+	}
+
+	return m.batchUpdate(count, keys, sys.NewSlicePointer(valueBuf), opts)
+}
+
 // BatchDelete batch deletes entries in the map by keys.
 // "keys" must be of type slice, a pointer to a slice or buffer will not work.
 func (m *Map) BatchDelete(keys interface{}, opts *BatchOptions) (int, error) {
-	if m.typ.hasPerCPUValue() {
-		return 0, ErrNotSupported
-	}
-	keysValue := reflect.ValueOf(keys)
-	if keysValue.Kind() != reflect.Slice {
-		return 0, fmt.Errorf("keys must be a slice")
+	count, err := sliceLen(keys)
+	if err != nil {
+		return 0, fmt.Errorf("keys: %w", err)
 	}
-	count := keysValue.Len()
+
 	keyPtr, err := marshalMapSyscallInput(keys, count*int(m.keySize))
 	if err != nil {
 		return 0, fmt.Errorf("cannot marshal keys: %v", err)
@@ -1102,6 +1208,24 @@ func (m *Map) BatchDelete(keys interface{}, opts *BatchOptions) (int, error) {
 	return int(attr.Count), nil
 }
 
+func batchCount(keys, values any) (int, error) {
+	keysLen, err := sliceLen(keys)
+	if err != nil {
+		return 0, fmt.Errorf("keys: %w", err)
+	}
+
+	valuesLen, err := sliceLen(values)
+	if err != nil {
+		return 0, fmt.Errorf("values: %w", err)
+	}
+
+	if keysLen != valuesLen {
+		return 0, fmt.Errorf("keys and values must have the same length")
+	}
+
+	return keysLen, nil
+}
+
 // Iterate traverses a map.
 //
 // It's safe to create multiple iterators at the same time.
@@ -1365,8 +1489,10 @@ func marshalMap(m *Map, length int) ([]byte, error) {
 //
 // See Map.Iterate.
 type MapIterator struct {
-	target            *Map
-	curKey            []byte
+	target *Map
+	// Temporary storage to avoid allocations in Next(). This is any instead
+	// of []byte to avoid allocations.
+	cursor            any
 	count, maxEntries uint32
 	done              bool
 	err               error
@@ -1394,34 +1520,30 @@ func (mi *MapIterator) Next(keyOut, valueOut interface{}) bool {
 		return false
 	}
 
-	// For array-like maps NextKeyBytes returns nil only on after maxEntries
+	// For array-like maps NextKey returns nil only after maxEntries
 	// iterations.
 	for mi.count <= mi.maxEntries {
-		var nextKey []byte
-		if mi.curKey == nil {
-			// Pass nil interface to NextKeyBytes to make sure the Map's first key
+		if mi.cursor == nil {
+			// Pass nil interface to NextKey to make sure the Map's first key
 			// is returned. If we pass an uninitialized []byte instead, it'll see a
 			// non-nil interface and try to marshal it.
-			nextKey, mi.err = mi.target.NextKeyBytes(nil)
-
-			mi.curKey = make([]byte, mi.target.keySize)
+			mi.cursor = make([]byte, mi.target.keySize)
+			mi.err = mi.target.NextKey(nil, mi.cursor)
 		} else {
-			nextKey, mi.err = mi.target.NextKeyBytes(mi.curKey)
-		}
-		if mi.err != nil {
-			mi.err = fmt.Errorf("get next key: %w", mi.err)
-			return false
+			mi.err = mi.target.NextKey(mi.cursor, mi.cursor)
 		}
 
-		if nextKey == nil {
+		if errors.Is(mi.err, ErrKeyNotExist) {
 			mi.done = true
+			mi.err = nil
+			return false
+		} else if mi.err != nil {
+			mi.err = fmt.Errorf("get next key: %w", mi.err)
 			return false
 		}
 
-		mi.curKey = nextKey
-
 		mi.count++
-		mi.err = mi.target.Lookup(nextKey, valueOut)
+		mi.err = mi.target.Lookup(mi.cursor, valueOut)
 		if errors.Is(mi.err, ErrKeyNotExist) {
 			// Even though the key should be valid, we couldn't look up
 			// its value. If we're iterating a hash map this is probably
@@ -1438,10 +1560,11 @@ func (mi *MapIterator) Next(keyOut, valueOut interface{}) bool {
 			return false
 		}
 
+		buf := mi.cursor.([]byte)
 		if ptr, ok := keyOut.(unsafe.Pointer); ok {
-			copy(unsafe.Slice((*byte)(ptr), len(nextKey)), nextKey)
+			copy(unsafe.Slice((*byte)(ptr), len(buf)), buf)
 		} else {
-			mi.err = sysenc.Unmarshal(keyOut, nextKey)
+			mi.err = sysenc.Unmarshal(keyOut, buf)
 		}
 
 		return mi.err == nil
@@ -1481,3 +1604,12 @@ func NewMapFromID(id MapID) (*Map, error) {
 
 	return newMapFromFD(fd)
 }
+
+// sliceLen returns the length if the value is a slice or an error otherwise.
+func sliceLen(slice any) (int, error) {
+	sliceValue := reflect.ValueOf(slice)
+	if sliceValue.Kind() != reflect.Slice {
+		return 0, fmt.Errorf("%T is not a slice", slice)
+	}
+	return sliceValue.Len(), nil
+}
diff --git a/vendor/github.com/cilium/ebpf/marshalers.go b/vendor/github.com/cilium/ebpf/marshalers.go
index e89a12f0fb182acee5196a49666ebcdbc39c65ef..1efa5d425dfd2b817ece0e35a30d486e4f3bb6c0 100644
--- a/vendor/github.com/cilium/ebpf/marshalers.go
+++ b/vendor/github.com/cilium/ebpf/marshalers.go
@@ -10,6 +10,8 @@ import (
 	"github.com/cilium/ebpf/internal"
 	"github.com/cilium/ebpf/internal/sys"
 	"github.com/cilium/ebpf/internal/sysenc"
+
+	"golang.org/x/exp/slices"
 )
 
 // marshalMapSyscallInput converts an arbitrary value into a pointer suitable
@@ -43,79 +45,125 @@ func makeMapSyscallOutput(dst any, length int) sysenc.Buffer {
 	return sysenc.SyscallOutput(dst, length)
 }
 
-// marshalPerCPUValue encodes a slice containing one value per
+// appendPerCPUSlice encodes a slice containing one value per
 // possible CPU into a buffer of bytes.
 //
 // Values are initialized to zero if the slice has less elements than CPUs.
-func marshalPerCPUValue(slice any, elemLength int) (sys.Pointer, error) {
+func appendPerCPUSlice(buf []byte, slice any, possibleCPUs, elemLength, alignedElemLength int) ([]byte, error) {
 	sliceType := reflect.TypeOf(slice)
 	if sliceType.Kind() != reflect.Slice {
-		return sys.Pointer{}, errors.New("per-CPU value requires slice")
-	}
-
-	possibleCPUs, err := internal.PossibleCPUs()
-	if err != nil {
-		return sys.Pointer{}, err
+		return nil, errors.New("per-CPU value requires slice")
 	}
 
 	sliceValue := reflect.ValueOf(slice)
 	sliceLen := sliceValue.Len()
 	if sliceLen > possibleCPUs {
-		return sys.Pointer{}, fmt.Errorf("per-CPU value exceeds number of CPUs")
+		return nil, fmt.Errorf("per-CPU value greater than number of CPUs")
 	}
 
-	alignedElemLength := internal.Align(elemLength, 8)
-	buf := make([]byte, alignedElemLength*possibleCPUs)
-
+	// Grow increases the slice's capacity, _if_necessary_
+	buf = slices.Grow(buf, alignedElemLength*possibleCPUs)
 	for i := 0; i < sliceLen; i++ {
 		elem := sliceValue.Index(i).Interface()
 		elemBytes, err := sysenc.Marshal(elem, elemLength)
 		if err != nil {
-			return sys.Pointer{}, err
+			return nil, err
 		}
 
-		offset := i * alignedElemLength
-		elemBytes.CopyTo(buf[offset : offset+elemLength])
+		buf = elemBytes.AppendTo(buf)
+		buf = append(buf, make([]byte, alignedElemLength-elemLength)...)
+	}
+
+	// Ensure buf is zero-padded full size.
+	buf = append(buf, make([]byte, (possibleCPUs-sliceLen)*alignedElemLength)...)
+
+	return buf, nil
+}
+
+// marshalPerCPUValue encodes a slice containing one value per
+// possible CPU into a buffer of bytes.
+//
+// Values are initialized to zero if the slice has less elements than CPUs.
+func marshalPerCPUValue(slice any, elemLength int) (sys.Pointer, error) {
+	possibleCPUs, err := PossibleCPU()
+	if err != nil {
+		return sys.Pointer{}, err
+	}
+
+	alignedElemLength := internal.Align(elemLength, 8)
+	buf := make([]byte, 0, alignedElemLength*possibleCPUs)
+	buf, err = appendPerCPUSlice(buf, slice, possibleCPUs, elemLength, alignedElemLength)
+	if err != nil {
+		return sys.Pointer{}, err
 	}
 
 	return sys.NewSlicePointer(buf), nil
 }
 
+// marshalBatchPerCPUValue encodes a batch-sized slice of slices containing
+// one value per possible CPU into a buffer of bytes.
+func marshalBatchPerCPUValue(slice any, batchLen, elemLength int) ([]byte, error) {
+	sliceType := reflect.TypeOf(slice)
+	if sliceType.Kind() != reflect.Slice {
+		return nil, fmt.Errorf("batch value requires a slice")
+	}
+	sliceValue := reflect.ValueOf(slice)
+
+	possibleCPUs, err := PossibleCPU()
+	if err != nil {
+		return nil, err
+	}
+	if sliceValue.Len() != batchLen*possibleCPUs {
+		return nil, fmt.Errorf("per-CPU slice has incorrect length, expected %d, got %d",
+			batchLen*possibleCPUs, sliceValue.Len())
+	}
+	alignedElemLength := internal.Align(elemLength, 8)
+	buf := make([]byte, 0, batchLen*alignedElemLength*possibleCPUs)
+	for i := 0; i < batchLen; i++ {
+		batch := sliceValue.Slice(i*possibleCPUs, (i+1)*possibleCPUs).Interface()
+		buf, err = appendPerCPUSlice(buf, batch, possibleCPUs, elemLength, alignedElemLength)
+		if err != nil {
+			return nil, fmt.Errorf("batch %d: %w", i, err)
+		}
+	}
+	return buf, nil
+}
+
 // unmarshalPerCPUValue decodes a buffer into a slice containing one value per
 // possible CPU.
 //
-// slicePtr must be a pointer to a slice.
-func unmarshalPerCPUValue(slicePtr any, elemLength int, buf []byte) error {
-	slicePtrType := reflect.TypeOf(slicePtr)
-	if slicePtrType.Kind() != reflect.Ptr || slicePtrType.Elem().Kind() != reflect.Slice {
-		return fmt.Errorf("per-cpu value requires pointer to slice")
+// slice must be a literal slice and not a pointer.
+func unmarshalPerCPUValue(slice any, elemLength int, buf []byte) error {
+	sliceType := reflect.TypeOf(slice)
+	if sliceType.Kind() != reflect.Slice {
+		return fmt.Errorf("per-CPU value requires a slice")
 	}
 
-	possibleCPUs, err := internal.PossibleCPUs()
+	possibleCPUs, err := PossibleCPU()
 	if err != nil {
 		return err
 	}
 
-	sliceType := slicePtrType.Elem()
-	slice := reflect.MakeSlice(sliceType, possibleCPUs, possibleCPUs)
+	sliceValue := reflect.ValueOf(slice)
+	if sliceValue.Len() != possibleCPUs {
+		return fmt.Errorf("per-CPU slice has incorrect length, expected %d, got %d",
+			possibleCPUs, sliceValue.Len())
+	}
 
 	sliceElemType := sliceType.Elem()
 	sliceElemIsPointer := sliceElemType.Kind() == reflect.Ptr
-	if sliceElemIsPointer {
-		sliceElemType = sliceElemType.Elem()
-	}
-
 	stride := internal.Align(elemLength, 8)
 	for i := 0; i < possibleCPUs; i++ {
 		var elem any
+		v := sliceValue.Index(i)
 		if sliceElemIsPointer {
-			newElem := reflect.New(sliceElemType)
-			slice.Index(i).Set(newElem)
-			elem = newElem.Interface()
+			if !v.Elem().CanAddr() {
+				return fmt.Errorf("per-CPU slice elements cannot be nil")
+			}
+			elem = v.Elem().Addr().Interface()
 		} else {
-			elem = slice.Index(i).Addr().Interface()
+			elem = v.Addr().Interface()
 		}
-
 		err := sysenc.Unmarshal(elem, buf[:elemLength])
 		if err != nil {
 			return fmt.Errorf("cpu %d: %w", i, err)
@@ -123,7 +171,41 @@ func unmarshalPerCPUValue(slicePtr any, elemLength int, buf []byte) error {
 
 		buf = buf[stride:]
 	}
+	return nil
+}
+
+// unmarshalBatchPerCPUValue decodes a buffer into a batch-sized slice
+// containing one value per possible CPU.
+//
+// slice must have length batchLen * PossibleCPUs().
+func unmarshalBatchPerCPUValue(slice any, batchLen, elemLength int, buf []byte) error {
+	sliceType := reflect.TypeOf(slice)
+	if sliceType.Kind() != reflect.Slice {
+		return fmt.Errorf("batch requires a slice")
+	}
+
+	sliceValue := reflect.ValueOf(slice)
+	possibleCPUs, err := PossibleCPU()
+	if err != nil {
+		return err
+	}
+	if sliceValue.Len() != batchLen*possibleCPUs {
+		return fmt.Errorf("per-CPU slice has incorrect length, expected %d, got %d",
+			sliceValue.Len(), batchLen*possibleCPUs)
+	}
 
-	reflect.ValueOf(slicePtr).Elem().Set(slice)
+	fullValueSize := possibleCPUs * internal.Align(elemLength, 8)
+	if len(buf) != batchLen*fullValueSize {
+		return fmt.Errorf("input buffer has incorrect length, expected %d, got %d",
+			len(buf), batchLen*fullValueSize)
+	}
+
+	for i := 0; i < batchLen; i++ {
+		elem := sliceValue.Slice(i*possibleCPUs, (i+1)*possibleCPUs).Interface()
+		if err := unmarshalPerCPUValue(elem, elemLength, buf[:fullValueSize]); err != nil {
+			return fmt.Errorf("batch %d: %w", i, err)
+		}
+		buf = buf[fullValueSize:]
+	}
 	return nil
 }
diff --git a/vendor/github.com/cilium/ebpf/perf/reader.go b/vendor/github.com/cilium/ebpf/perf/reader.go
index 1aec79d50b27d9d269f16668085640da59129713..3c820708c720cf2ee719916baec3d14ab0357dba 100644
--- a/vendor/github.com/cilium/ebpf/perf/reader.go
+++ b/vendor/github.com/cilium/ebpf/perf/reader.go
@@ -231,11 +231,12 @@ func NewReaderWithOptions(array *ebpf.Map, perCPUBuffer int, opts ReaderOptions)
 			pauseFds = append(pauseFds, -1)
 			continue
 		}
-		bufferSize = ring.size()
 
 		if err != nil {
 			return nil, fmt.Errorf("failed to create perf ring for CPU %d: %v", i, err)
 		}
+
+		bufferSize = ring.size()
 		rings = append(rings, ring)
 		pauseFds = append(pauseFds, ring.fd)
 
diff --git a/vendor/github.com/cilium/ebpf/prog.go b/vendor/github.com/cilium/ebpf/prog.go
index 6d46a0422b96b4ab4fe5e3715336f61572973ac4..e904abc54c447f0cd667e4b9c7a1bfeb75fd20cd 100644
--- a/vendor/github.com/cilium/ebpf/prog.go
+++ b/vendor/github.com/cilium/ebpf/prog.go
@@ -242,14 +242,26 @@ func newProgramWithOptions(spec *ProgramSpec, opts ProgramOptions) (*Program, er
 	insns := make(asm.Instructions, len(spec.Instructions))
 	copy(insns, spec.Instructions)
 
-	handle, fib, lib, err := btf.MarshalExtInfos(insns)
-	if err != nil && !errors.Is(err, btf.ErrNotSupported) {
-		return nil, fmt.Errorf("load ext_infos: %w", err)
+	var b btf.Builder
+	if err := applyRelocations(insns, opts.KernelTypes, spec.ByteOrder, &b); err != nil {
+		return nil, fmt.Errorf("apply CO-RE relocations: %w", err)
 	}
-	if handle != nil {
-		defer handle.Close()
 
-		attr.ProgBtfFd = uint32(handle.FD())
+	errExtInfos := haveProgramExtInfos()
+	if !b.Empty() && errors.Is(errExtInfos, ErrNotSupported) {
+		// There is at least one CO-RE relocation which relies on a stable local
+		// type ID.
+		// Return ErrNotSupported instead of E2BIG if there is no BTF support.
+		return nil, errExtInfos
+	}
+
+	if errExtInfos == nil {
+		// Only add func and line info if the kernel supports it. This allows
+		// BPF compiled with modern toolchains to work on old kernels.
+		fib, lib, err := btf.MarshalExtInfos(insns, &b)
+		if err != nil {
+			return nil, fmt.Errorf("marshal ext_infos: %w", err)
+		}
 
 		attr.FuncInfoRecSize = btf.FuncInfoSize
 		attr.FuncInfoCnt = uint32(len(fib)) / btf.FuncInfoSize
@@ -260,8 +272,14 @@ func newProgramWithOptions(spec *ProgramSpec, opts ProgramOptions) (*Program, er
 		attr.LineInfo = sys.NewSlicePointer(lib)
 	}
 
-	if err := applyRelocations(insns, opts.KernelTypes, spec.ByteOrder); err != nil {
-		return nil, fmt.Errorf("apply CO-RE relocations: %w", err)
+	if !b.Empty() {
+		handle, err := btf.NewHandle(&b)
+		if err != nil {
+			return nil, fmt.Errorf("load BTF: %w", err)
+		}
+		defer handle.Close()
+
+		attr.ProgBtfFd = uint32(handle.FD())
 	}
 
 	kconfig, err := resolveKconfigReferences(insns)
@@ -703,10 +721,6 @@ func (p *Program) run(opts *RunOptions) (uint32, time.Duration, error) {
 		Cpu:         opts.CPU,
 	}
 
-	if attr.Repeat == 0 {
-		attr.Repeat = 1
-	}
-
 retry:
 	for {
 		err := sys.ProgRun(&attr)
@@ -715,7 +729,7 @@ retry:
 		}
 
 		if errors.Is(err, unix.EINTR) {
-			if attr.Repeat == 1 {
+			if attr.Repeat <= 1 {
 				// Older kernels check whether enough repetitions have been
 				// executed only after checking for pending signals.
 				//
diff --git a/vendor/github.com/cilium/ebpf/ringbuf/reader.go b/vendor/github.com/cilium/ebpf/ringbuf/reader.go
index ea11c823fb1c84860897d1219149e8abd0a23a56..c6adaf2f969d1f6a09bbc7cfce2e8229c9ecd4bd 100644
--- a/vendor/github.com/cilium/ebpf/ringbuf/reader.go
+++ b/vendor/github.com/cilium/ebpf/ringbuf/reader.go
@@ -117,6 +117,7 @@ type Reader struct {
 	header      []byte
 	haveData    bool
 	deadline    time.Time
+	bufferSize  int
 }
 
 // NewReader creates a new BPF ringbuf reader.
@@ -151,6 +152,7 @@ func NewReader(ringbufMap *ebpf.Map) (*Reader, error) {
 		ring:        ring,
 		epollEvents: make([]unix.EpollEvent, 1),
 		header:      make([]byte, ringbufHeaderSize),
+		bufferSize:  ring.size(),
 	}, nil
 }
 
@@ -238,5 +240,5 @@ func (r *Reader) ReadInto(rec *Record) error {
 
 // BufferSize returns the size in bytes of the ring buffer
 func (r *Reader) BufferSize() int {
-	return r.ring.size()
+	return r.bufferSize
 }
diff --git a/vendor/github.com/cilium/ebpf/run-tests.sh b/vendor/github.com/cilium/ebpf/run-tests.sh
index 629a069dd14027c263ffa1da4f1914dc0605c82b..9f435613903d0417d3e59094c66b82b2684b8e30 100644
--- a/vendor/github.com/cilium/ebpf/run-tests.sh
+++ b/vendor/github.com/cilium/ebpf/run-tests.sh
@@ -14,6 +14,8 @@ set -euo pipefail
 script="$(realpath "$0")"
 readonly script
 
+source "$(dirname "$script")/testdata/sh/lib.sh"
+
 quote_env() {
   for var in "$@"; do
     if [ -v "$var" ]; then
@@ -96,8 +98,8 @@ elif [[ "${1:-}" = "--exec-test" ]]; then
   mount -t bpf bpf /sys/fs/bpf
   mount -t tracefs tracefs /sys/kernel/debug/tracing
 
-  if [[ -d "/run/input/bpf" ]]; then
-    export KERNEL_SELFTESTS="/run/input/bpf"
+  if [[ -d "/run/input/usr/src/linux/tools/testing/selftests/bpf" ]]; then
+    export KERNEL_SELFTESTS="/run/input/usr/src/linux/tools/testing/selftests/bpf"
   fi
 
   if [[ -d "/run/input/lib/modules" ]]; then
@@ -117,48 +119,21 @@ if [[ -z "${1:-}" ]]; then
   exit 1
 fi
 
-readonly input="$(mktemp -d)"
-readonly tmp_dir="${TMPDIR:-/tmp}"
-
-fetch() {
-    echo Fetching "${1}"
-    pushd "${tmp_dir}" > /dev/null
-    curl --no-progress-meter -L -O --fail --etag-compare "${1}.etag" --etag-save "${1}.etag" "https://github.com/cilium/ci-kernels/raw/${BRANCH:-master}/${1}"
-    local ret=$?
-    popd > /dev/null
-    return $ret
-}
-
-machine="$(uname -m)"
-readonly machine
+input="$(mktemp -d)"
+readonly input
 
 if [[ -f "${1}" ]]; then
+  # First argument is a local file.
   readonly kernel="${1}"
-  cp "${1}" "${input}/bzImage"
+  cp "${1}" "${input}/boot/vmlinuz"
 else
-# LINUX_VERSION_CODE test compares this to discovered value.
-  export KERNEL_VERSION="${1}"
-
-  if [ "${machine}" = "x86_64" ]; then
-    readonly kernel="linux-${1}-amd64.tgz"
-    readonly selftests="linux-${1}-amd64-selftests-bpf.tgz"
-  elif [ "${machine}" = "aarch64" ]; then
-    readonly kernel="linux-${1}-arm64.tgz"
-    readonly selftests=""
-  else
-    echo "Arch ${machine} is not supported"
-    exit 1
-  fi
+  readonly kernel="${1}"
 
-  fetch "${kernel}"
-  tar xf "${tmp_dir}/${kernel}" -C "${input}"
+  # LINUX_VERSION_CODE test compares this to discovered value.
+  export KERNEL_VERSION="${1}"
 
-  if [ -n "${selftests}" ] && fetch "${selftests}"; then
-    echo "Decompressing selftests"
-    mkdir "${input}/bpf"
-    tar --strip-components=5 -xf "${tmp_dir}/${selftests}" -C "${input}/bpf"
-  else
-    echo "No selftests found, disabling"
+  if ! extract_oci_image "ghcr.io/cilium/ci-kernels:${kernel}-selftests" "${input}"; then
+    extract_oci_image "ghcr.io/cilium/ci-kernels:${kernel}" "${input}"
   fi
 fi
 shift
diff --git a/vendor/github.com/cilium/ebpf/syscalls.go b/vendor/github.com/cilium/ebpf/syscalls.go
index cdf1fcf2ef58c48c75ccde95f26009448bc0bdd2..4aef7faebc8af4b78f2c85219ca7ab61febcad25 100644
--- a/vendor/github.com/cilium/ebpf/syscalls.go
+++ b/vendor/github.com/cilium/ebpf/syscalls.go
@@ -4,6 +4,7 @@ import (
 	"bytes"
 	"errors"
 	"fmt"
+	"math"
 	"os"
 	"runtime"
 
@@ -302,3 +303,35 @@ var haveSyscallWrapper = internal.NewFeatureTest("syscall wrapper", "4.17", func
 
 	return evt.Close()
 })
+
+var haveProgramExtInfos = internal.NewFeatureTest("program ext_infos", "5.0", func() error {
+	insns := asm.Instructions{
+		asm.Mov.Imm(asm.R0, 0),
+		asm.Return(),
+	}
+
+	buf := bytes.NewBuffer(make([]byte, 0, insns.Size()))
+	if err := insns.Marshal(buf, internal.NativeEndian); err != nil {
+		return err
+	}
+	bytecode := buf.Bytes()
+
+	_, err := sys.ProgLoad(&sys.ProgLoadAttr{
+		ProgType:    sys.ProgType(SocketFilter),
+		License:     sys.NewStringPointer("MIT"),
+		Insns:       sys.NewSlicePointer(bytecode),
+		InsnCnt:     uint32(len(bytecode) / asm.InstructionSize),
+		FuncInfoCnt: 1,
+		ProgBtfFd:   math.MaxUint32,
+	})
+
+	if errors.Is(err, unix.EBADF) {
+		return nil
+	}
+
+	if errors.Is(err, unix.E2BIG) {
+		return ErrNotSupported
+	}
+
+	return err
+})
diff --git a/vendor/github.com/cilium/ebpf/types.go b/vendor/github.com/cilium/ebpf/types.go
index e9215519a2fd10a79c9caa82403a34325d76bf38..5146721c899bf08804f2bf0040e28676394b9197 100644
--- a/vendor/github.com/cilium/ebpf/types.go
+++ b/vendor/github.com/cilium/ebpf/types.go
@@ -125,38 +125,39 @@ type ProgramType uint32
 
 // eBPF program types
 const (
-	UnspecifiedProgram ProgramType = iota
-	SocketFilter
-	Kprobe
-	SchedCLS
-	SchedACT
-	TracePoint
-	XDP
-	PerfEvent
-	CGroupSKB
-	CGroupSock
-	LWTIn
-	LWTOut
-	LWTXmit
-	SockOps
-	SkSKB
-	CGroupDevice
-	SkMsg
-	RawTracepoint
-	CGroupSockAddr
-	LWTSeg6Local
-	LircMode2
-	SkReuseport
-	FlowDissector
-	CGroupSysctl
-	RawTracepointWritable
-	CGroupSockopt
-	Tracing
-	StructOps
-	Extension
-	LSM
-	SkLookup
-	Syscall
+	UnspecifiedProgram    = ProgramType(sys.BPF_PROG_TYPE_UNSPEC)
+	SocketFilter          = ProgramType(sys.BPF_PROG_TYPE_SOCKET_FILTER)
+	Kprobe                = ProgramType(sys.BPF_PROG_TYPE_KPROBE)
+	SchedCLS              = ProgramType(sys.BPF_PROG_TYPE_SCHED_CLS)
+	SchedACT              = ProgramType(sys.BPF_PROG_TYPE_SCHED_ACT)
+	TracePoint            = ProgramType(sys.BPF_PROG_TYPE_TRACEPOINT)
+	XDP                   = ProgramType(sys.BPF_PROG_TYPE_XDP)
+	PerfEvent             = ProgramType(sys.BPF_PROG_TYPE_PERF_EVENT)
+	CGroupSKB             = ProgramType(sys.BPF_PROG_TYPE_CGROUP_SKB)
+	CGroupSock            = ProgramType(sys.BPF_PROG_TYPE_CGROUP_SOCK)
+	LWTIn                 = ProgramType(sys.BPF_PROG_TYPE_LWT_IN)
+	LWTOut                = ProgramType(sys.BPF_PROG_TYPE_LWT_OUT)
+	LWTXmit               = ProgramType(sys.BPF_PROG_TYPE_LWT_XMIT)
+	SockOps               = ProgramType(sys.BPF_PROG_TYPE_SOCK_OPS)
+	SkSKB                 = ProgramType(sys.BPF_PROG_TYPE_SK_SKB)
+	CGroupDevice          = ProgramType(sys.BPF_PROG_TYPE_CGROUP_DEVICE)
+	SkMsg                 = ProgramType(sys.BPF_PROG_TYPE_SK_MSG)
+	RawTracepoint         = ProgramType(sys.BPF_PROG_TYPE_RAW_TRACEPOINT)
+	CGroupSockAddr        = ProgramType(sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR)
+	LWTSeg6Local          = ProgramType(sys.BPF_PROG_TYPE_LWT_SEG6LOCAL)
+	LircMode2             = ProgramType(sys.BPF_PROG_TYPE_LIRC_MODE2)
+	SkReuseport           = ProgramType(sys.BPF_PROG_TYPE_SK_REUSEPORT)
+	FlowDissector         = ProgramType(sys.BPF_PROG_TYPE_FLOW_DISSECTOR)
+	CGroupSysctl          = ProgramType(sys.BPF_PROG_TYPE_CGROUP_SYSCTL)
+	RawTracepointWritable = ProgramType(sys.BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE)
+	CGroupSockopt         = ProgramType(sys.BPF_PROG_TYPE_CGROUP_SOCKOPT)
+	Tracing               = ProgramType(sys.BPF_PROG_TYPE_TRACING)
+	StructOps             = ProgramType(sys.BPF_PROG_TYPE_STRUCT_OPS)
+	Extension             = ProgramType(sys.BPF_PROG_TYPE_EXT)
+	LSM                   = ProgramType(sys.BPF_PROG_TYPE_LSM)
+	SkLookup              = ProgramType(sys.BPF_PROG_TYPE_SK_LOOKUP)
+	Syscall               = ProgramType(sys.BPF_PROG_TYPE_SYSCALL)
+	Netfilter             = ProgramType(sys.BPF_PROG_TYPE_NETFILTER)
 )
 
 // AttachType of the eBPF program, needed to differentiate allowed context accesses in
@@ -170,49 +171,55 @@ type AttachType uint32
 const AttachNone AttachType = 0
 
 const (
-	AttachCGroupInetIngress AttachType = iota
-	AttachCGroupInetEgress
-	AttachCGroupInetSockCreate
-	AttachCGroupSockOps
-	AttachSkSKBStreamParser
-	AttachSkSKBStreamVerdict
-	AttachCGroupDevice
-	AttachSkMsgVerdict
-	AttachCGroupInet4Bind
-	AttachCGroupInet6Bind
-	AttachCGroupInet4Connect
-	AttachCGroupInet6Connect
-	AttachCGroupInet4PostBind
-	AttachCGroupInet6PostBind
-	AttachCGroupUDP4Sendmsg
-	AttachCGroupUDP6Sendmsg
-	AttachLircMode2
-	AttachFlowDissector
-	AttachCGroupSysctl
-	AttachCGroupUDP4Recvmsg
-	AttachCGroupUDP6Recvmsg
-	AttachCGroupGetsockopt
-	AttachCGroupSetsockopt
-	AttachTraceRawTp
-	AttachTraceFEntry
-	AttachTraceFExit
-	AttachModifyReturn
-	AttachLSMMac
-	AttachTraceIter
-	AttachCgroupInet4GetPeername
-	AttachCgroupInet6GetPeername
-	AttachCgroupInet4GetSockname
-	AttachCgroupInet6GetSockname
-	AttachXDPDevMap
-	AttachCgroupInetSockRelease
-	AttachXDPCPUMap
-	AttachSkLookup
-	AttachXDP
-	AttachSkSKBVerdict
-	AttachSkReuseportSelect
-	AttachSkReuseportSelectOrMigrate
-	AttachPerfEvent
-	AttachTraceKprobeMulti
+	AttachCGroupInetIngress          = AttachType(sys.BPF_CGROUP_INET_INGRESS)
+	AttachCGroupInetEgress           = AttachType(sys.BPF_CGROUP_INET_EGRESS)
+	AttachCGroupInetSockCreate       = AttachType(sys.BPF_CGROUP_INET_SOCK_CREATE)
+	AttachCGroupSockOps              = AttachType(sys.BPF_CGROUP_SOCK_OPS)
+	AttachSkSKBStreamParser          = AttachType(sys.BPF_SK_SKB_STREAM_PARSER)
+	AttachSkSKBStreamVerdict         = AttachType(sys.BPF_SK_SKB_STREAM_VERDICT)
+	AttachCGroupDevice               = AttachType(sys.BPF_CGROUP_DEVICE)
+	AttachSkMsgVerdict               = AttachType(sys.BPF_SK_MSG_VERDICT)
+	AttachCGroupInet4Bind            = AttachType(sys.BPF_CGROUP_INET4_BIND)
+	AttachCGroupInet6Bind            = AttachType(sys.BPF_CGROUP_INET6_BIND)
+	AttachCGroupInet4Connect         = AttachType(sys.BPF_CGROUP_INET4_CONNECT)
+	AttachCGroupInet6Connect         = AttachType(sys.BPF_CGROUP_INET6_CONNECT)
+	AttachCGroupInet4PostBind        = AttachType(sys.BPF_CGROUP_INET4_POST_BIND)
+	AttachCGroupInet6PostBind        = AttachType(sys.BPF_CGROUP_INET6_POST_BIND)
+	AttachCGroupUDP4Sendmsg          = AttachType(sys.BPF_CGROUP_UDP4_SENDMSG)
+	AttachCGroupUDP6Sendmsg          = AttachType(sys.BPF_CGROUP_UDP6_SENDMSG)
+	AttachLircMode2                  = AttachType(sys.BPF_LIRC_MODE2)
+	AttachFlowDissector              = AttachType(sys.BPF_FLOW_DISSECTOR)
+	AttachCGroupSysctl               = AttachType(sys.BPF_CGROUP_SYSCTL)
+	AttachCGroupUDP4Recvmsg          = AttachType(sys.BPF_CGROUP_UDP4_RECVMSG)
+	AttachCGroupUDP6Recvmsg          = AttachType(sys.BPF_CGROUP_UDP6_RECVMSG)
+	AttachCGroupGetsockopt           = AttachType(sys.BPF_CGROUP_GETSOCKOPT)
+	AttachCGroupSetsockopt           = AttachType(sys.BPF_CGROUP_SETSOCKOPT)
+	AttachTraceRawTp                 = AttachType(sys.BPF_TRACE_RAW_TP)
+	AttachTraceFEntry                = AttachType(sys.BPF_TRACE_FENTRY)
+	AttachTraceFExit                 = AttachType(sys.BPF_TRACE_FEXIT)
+	AttachModifyReturn               = AttachType(sys.BPF_MODIFY_RETURN)
+	AttachLSMMac                     = AttachType(sys.BPF_LSM_MAC)
+	AttachTraceIter                  = AttachType(sys.BPF_TRACE_ITER)
+	AttachCgroupInet4GetPeername     = AttachType(sys.BPF_CGROUP_INET4_GETPEERNAME)
+	AttachCgroupInet6GetPeername     = AttachType(sys.BPF_CGROUP_INET6_GETPEERNAME)
+	AttachCgroupInet4GetSockname     = AttachType(sys.BPF_CGROUP_INET4_GETSOCKNAME)
+	AttachCgroupInet6GetSockname     = AttachType(sys.BPF_CGROUP_INET6_GETSOCKNAME)
+	AttachXDPDevMap                  = AttachType(sys.BPF_XDP_DEVMAP)
+	AttachCgroupInetSockRelease      = AttachType(sys.BPF_CGROUP_INET_SOCK_RELEASE)
+	AttachXDPCPUMap                  = AttachType(sys.BPF_XDP_CPUMAP)
+	AttachSkLookup                   = AttachType(sys.BPF_SK_LOOKUP)
+	AttachXDP                        = AttachType(sys.BPF_XDP)
+	AttachSkSKBVerdict               = AttachType(sys.BPF_SK_SKB_VERDICT)
+	AttachSkReuseportSelect          = AttachType(sys.BPF_SK_REUSEPORT_SELECT)
+	AttachSkReuseportSelectOrMigrate = AttachType(sys.BPF_SK_REUSEPORT_SELECT_OR_MIGRATE)
+	AttachPerfEvent                  = AttachType(sys.BPF_PERF_EVENT)
+	AttachTraceKprobeMulti           = AttachType(sys.BPF_TRACE_KPROBE_MULTI)
+	AttachLSMCgroup                  = AttachType(sys.BPF_LSM_CGROUP)
+	AttachStructOps                  = AttachType(sys.BPF_STRUCT_OPS)
+	AttachNetfilter                  = AttachType(sys.BPF_NETFILTER)
+	AttachTCXIngress                 = AttachType(sys.BPF_TCX_INGRESS)
+	AttachTCXEgress                  = AttachType(sys.BPF_TCX_EGRESS)
+	AttachTraceUprobeMulti           = AttachType(sys.BPF_TRACE_UPROBE_MULTI)
 )
 
 // AttachFlags of the eBPF program used in BPF_PROG_ATTACH command
diff --git a/vendor/github.com/cilium/ebpf/types_string.go b/vendor/github.com/cilium/ebpf/types_string.go
index e20c37aa4e43258c1d9baa03637b7df70556e814..ee60b5be5b6402f740a4c04549a579cf393102cb 100644
--- a/vendor/github.com/cilium/ebpf/types_string.go
+++ b/vendor/github.com/cilium/ebpf/types_string.go
@@ -86,11 +86,12 @@ func _() {
 	_ = x[LSM-29]
 	_ = x[SkLookup-30]
 	_ = x[Syscall-31]
+	_ = x[Netfilter-32]
 }
 
-const _ProgramType_name = "UnspecifiedProgramSocketFilterKprobeSchedCLSSchedACTTracePointXDPPerfEventCGroupSKBCGroupSockLWTInLWTOutLWTXmitSockOpsSkSKBCGroupDeviceSkMsgRawTracepointCGroupSockAddrLWTSeg6LocalLircMode2SkReuseportFlowDissectorCGroupSysctlRawTracepointWritableCGroupSockoptTracingStructOpsExtensionLSMSkLookupSyscall"
+const _ProgramType_name = "UnspecifiedProgramSocketFilterKprobeSchedCLSSchedACTTracePointXDPPerfEventCGroupSKBCGroupSockLWTInLWTOutLWTXmitSockOpsSkSKBCGroupDeviceSkMsgRawTracepointCGroupSockAddrLWTSeg6LocalLircMode2SkReuseportFlowDissectorCGroupSysctlRawTracepointWritableCGroupSockoptTracingStructOpsExtensionLSMSkLookupSyscallNetfilter"
 
-var _ProgramType_index = [...]uint16{0, 18, 30, 36, 44, 52, 62, 65, 74, 83, 93, 98, 104, 111, 118, 123, 135, 140, 153, 167, 179, 188, 199, 212, 224, 245, 258, 265, 274, 283, 286, 294, 301}
+var _ProgramType_index = [...]uint16{0, 18, 30, 36, 44, 52, 62, 65, 74, 83, 93, 98, 104, 111, 118, 123, 135, 140, 153, 167, 179, 188, 199, 212, 224, 245, 258, 265, 274, 283, 286, 294, 301, 310}
 
 func (i ProgramType) String() string {
 	if i >= ProgramType(len(_ProgramType_index)-1) {
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 0bb3eb3ca8de0fc7bbae8510476308d388111616..6496b700f84926d9820f95d90c749d7a847cf5c1 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -13,7 +13,7 @@ github.com/caarlos0/env/v6
 # github.com/cespare/xxhash/v2 v2.2.0
 ## explicit; go 1.11
 github.com/cespare/xxhash/v2
-# github.com/cilium/ebpf v0.12.3
+# github.com/cilium/ebpf v0.12.4-0.20240124115601-f95957d1669c
 ## explicit; go 1.20
 github.com/cilium/ebpf
 github.com/cilium/ebpf/asm