From 47f7dff040fc603a1007442db59e443da2f50823 Mon Sep 17 00:00:00 2001
From: "Mohamed S. Mahmoud" <mmahmoud@redhat.com>
Date: Tue, 11 Jul 2023 07:06:58 -0400
Subject: [PATCH] update cilium to v0.11.0 (#147)

Signed-off-by: msherif1234 <mmahmoud@redhat.com>
---
 Makefile                                      |   2 +-
 go.mod                                        |   5 +-
 go.sum                                        |  12 +-
 vendor/github.com/cilium/ebpf/.clang-format   |   2 +
 vendor/github.com/cilium/ebpf/.golangci.yaml  |   4 +-
 vendor/github.com/cilium/ebpf/ARCHITECTURE.md |  62 ++-
 vendor/github.com/cilium/ebpf/CONTRIBUTING.md |  26 +-
 vendor/github.com/cilium/ebpf/MAINTAINERS.md  |   7 +-
 vendor/github.com/cilium/ebpf/Makefile        |  17 +-
 vendor/github.com/cilium/ebpf/README.md       |   3 +-
 vendor/github.com/cilium/ebpf/asm/func.go     |  10 +-
 .../github.com/cilium/ebpf/asm/func_string.go |  14 +-
 .../github.com/cilium/ebpf/asm/instruction.go |  15 +-
 vendor/github.com/cilium/ebpf/asm/register.go |   9 +-
 vendor/github.com/cilium/ebpf/btf/btf.go      | 357 +++++++------
 .../github.com/cilium/ebpf/btf/btf_types.go   |  12 +-
 vendor/github.com/cilium/ebpf/btf/core.go     | 106 ++--
 vendor/github.com/cilium/ebpf/btf/ext_info.go |  67 +--
 vendor/github.com/cilium/ebpf/btf/handle.go   |  51 +-
 vendor/github.com/cilium/ebpf/btf/marshal.go  | 367 ++++++++-----
 vendor/github.com/cilium/ebpf/btf/strings.go  |  67 ++-
 .../github.com/cilium/ebpf/btf/traversal.go   |   7 +-
 vendor/github.com/cilium/ebpf/btf/types.go    | 169 +++---
 .../github.com/cilium/ebpf/btf/workarounds.go |  26 +
 vendor/github.com/cilium/ebpf/collection.go   | 105 +++-
 vendor/github.com/cilium/ebpf/elf_reader.go   | 148 +++++-
 vendor/github.com/cilium/ebpf/info.go         |  54 +-
 .../github.com/cilium/ebpf/internal/align.go  |   6 +-
 .../github.com/cilium/ebpf/internal/buffer.go |  31 ++
 vendor/github.com/cilium/ebpf/internal/cpu.go |  17 +-
 .../github.com/cilium/ebpf/internal/deque.go  |  47 +-
 .../cilium/ebpf/internal/endian_be.go         |   1 -
 .../cilium/ebpf/internal/endian_le.go         |   3 +-
 .../cilium/ebpf/internal/epoll/poller.go      |   2 +-
 vendor/github.com/cilium/ebpf/internal/io.go  |  66 +++
 .../cilium/ebpf/internal/kconfig/kconfig.go   | 267 ++++++++++
 .../cilium/ebpf/internal/memoize.go           |  26 +
 .../github.com/cilium/ebpf/internal/output.go |  13 +
 .../cilium/ebpf/internal/pinning.go           |  20 +-
 .../cilium/ebpf/internal/platform.go          |  43 ++
 .../github.com/cilium/ebpf/internal/statfs.go |  23 +
 .../github.com/cilium/ebpf/internal/sys/fd.go |  53 +-
 .../cilium/ebpf/internal/sys/fd_trace.go      |  93 ++++
 .../cilium/ebpf/internal/sys/ptr.go           |   2 +-
 .../cilium/ebpf/internal/sys/ptr_32_be.go     |   1 -
 .../cilium/ebpf/internal/sys/ptr_32_le.go     |   1 -
 .../cilium/ebpf/internal/sys/ptr_64.go        |   1 -
 .../cilium/ebpf/internal/sys/signals.go       |  25 +-
 .../cilium/ebpf/internal/sys/syscall.go       |   3 +
 .../cilium/ebpf/internal/sys/types.go         |  54 +-
 .../cilium/ebpf/internal/tracefs/kprobe.go    | 359 +++++++++++++
 .../ebpf/internal/tracefs/probetype_string.go |  24 +
 .../cilium/ebpf/internal/tracefs/uprobe.go    |  16 +
 .../cilium/ebpf/internal/unix/types_linux.go  |  12 +
 .../cilium/ebpf/internal/unix/types_other.go  |  26 +-
 .../github.com/cilium/ebpf/internal/vdso.go   |   4 +-
 .../cilium/ebpf/internal/version.go           |  22 +-
 vendor/github.com/cilium/ebpf/link/cgroup.go  |  55 +-
 vendor/github.com/cilium/ebpf/link/kprobe.go  | 371 +++-----------
 .../cilium/ebpf/link/kprobe_multi.go          |   2 +-
 vendor/github.com/cilium/ebpf/link/link.go    |  27 +-
 .../github.com/cilium/ebpf/link/perf_event.go | 228 ++-------
 .../github.com/cilium/ebpf/link/platform.go   |  25 -
 vendor/github.com/cilium/ebpf/link/query.go   |   6 +-
 .../cilium/ebpf/link/socket_filter.go         |   4 +-
 .../github.com/cilium/ebpf/link/syscalls.go   |   2 +-
 .../github.com/cilium/ebpf/link/tracepoint.go |  19 +-
 vendor/github.com/cilium/ebpf/link/tracing.go |  79 ++-
 vendor/github.com/cilium/ebpf/link/uprobe.go  | 131 ++---
 vendor/github.com/cilium/ebpf/linker.go       | 193 ++++++-
 vendor/github.com/cilium/ebpf/map.go          | 135 +++--
 vendor/github.com/cilium/ebpf/marshalers.go   |   6 +-
 vendor/github.com/cilium/ebpf/prog.go         | 113 ++--
 vendor/github.com/cilium/ebpf/run-tests.sh    |  47 +-
 vendor/github.com/cilium/ebpf/syscalls.go     |  47 +-
 vendor/golang.org/x/exp/LICENSE               |  27 +
 vendor/golang.org/x/exp/PATENTS               |  22 +
 .../x/exp/constraints/constraints.go          |  50 ++
 vendor/golang.org/x/exp/maps/maps.go          |  94 ++++
 vendor/golang.org/x/exp/slices/slices.go      | 258 ++++++++++
 vendor/golang.org/x/exp/slices/sort.go        | 126 +++++
 vendor/golang.org/x/exp/slices/zsortfunc.go   | 479 +++++++++++++++++
 .../golang.org/x/exp/slices/zsortordered.go   | 481 ++++++++++++++++++
 vendor/golang.org/x/sys/unix/ioctl.go         |  17 +-
 vendor/golang.org/x/sys/unix/ioctl_zos.go     |   8 +-
 vendor/golang.org/x/sys/unix/ptrace_darwin.go |   6 +
 vendor/golang.org/x/sys/unix/ptrace_ios.go    |   6 +
 vendor/golang.org/x/sys/unix/syscall_aix.go   |   5 +-
 vendor/golang.org/x/sys/unix/syscall_bsd.go   |   3 +-
 .../golang.org/x/sys/unix/syscall_darwin.go   |  12 +-
 .../x/sys/unix/syscall_darwin_amd64.go        |   1 +
 .../x/sys/unix/syscall_darwin_arm64.go        |   1 +
 .../x/sys/unix/syscall_dragonfly.go           |   1 +
 .../golang.org/x/sys/unix/syscall_freebsd.go  |  43 +-
 .../x/sys/unix/syscall_freebsd_386.go         |  17 +-
 .../x/sys/unix/syscall_freebsd_amd64.go       |  17 +-
 .../x/sys/unix/syscall_freebsd_arm.go         |  15 +-
 .../x/sys/unix/syscall_freebsd_arm64.go       |  15 +-
 .../x/sys/unix/syscall_freebsd_riscv64.go     |  15 +-
 vendor/golang.org/x/sys/unix/syscall_hurd.go  |   8 +
 vendor/golang.org/x/sys/unix/syscall_linux.go |  36 +-
 .../golang.org/x/sys/unix/syscall_netbsd.go   |   5 +-
 .../golang.org/x/sys/unix/syscall_openbsd.go  |   1 +
 .../golang.org/x/sys/unix/syscall_solaris.go  |  21 +-
 .../x/sys/unix/syscall_zos_s390x.go           |   4 +-
 vendor/golang.org/x/sys/unix/zerrors_linux.go |  10 +-
 .../x/sys/unix/zptrace_armnn_linux.go         |   8 +-
 .../x/sys/unix/zptrace_linux_arm64.go         |   4 +-
 .../x/sys/unix/zptrace_mipsnn_linux.go        |   8 +-
 .../x/sys/unix/zptrace_mipsnnle_linux.go      |   8 +-
 .../x/sys/unix/zptrace_x86_linux.go           |   8 +-
 .../golang.org/x/sys/unix/zsyscall_aix_ppc.go |  10 +
 .../x/sys/unix/zsyscall_aix_ppc64.go          |  10 +
 .../x/sys/unix/zsyscall_aix_ppc64_gc.go       |   7 +
 .../x/sys/unix/zsyscall_aix_ppc64_gccgo.go    |   8 +
 .../x/sys/unix/zsyscall_darwin_amd64.go       |  16 +
 .../x/sys/unix/zsyscall_darwin_arm64.go       |  16 +
 .../x/sys/unix/zsyscall_dragonfly_amd64.go    |  10 +
 .../x/sys/unix/zsyscall_freebsd_386.go        |  20 +
 .../x/sys/unix/zsyscall_freebsd_amd64.go      |  20 +
 .../x/sys/unix/zsyscall_freebsd_arm.go        |  20 +
 .../x/sys/unix/zsyscall_freebsd_arm64.go      |  20 +
 .../x/sys/unix/zsyscall_freebsd_riscv64.go    |  20 +
 .../golang.org/x/sys/unix/zsyscall_linux.go   |  10 +
 .../x/sys/unix/zsyscall_netbsd_386.go         |  10 +
 .../x/sys/unix/zsyscall_netbsd_amd64.go       |  10 +
 .../x/sys/unix/zsyscall_netbsd_arm.go         |  10 +
 .../x/sys/unix/zsyscall_netbsd_arm64.go       |  10 +
 .../x/sys/unix/zsyscall_openbsd_386.go        |   8 +
 .../x/sys/unix/zsyscall_openbsd_amd64.go      |   8 +
 .../x/sys/unix/zsyscall_openbsd_arm.go        |   8 +
 .../x/sys/unix/zsyscall_openbsd_arm64.go      |   8 +
 .../x/sys/unix/zsyscall_openbsd_mips64.go     |   8 +
 .../x/sys/unix/zsyscall_openbsd_ppc64.go      |   8 +
 .../x/sys/unix/zsyscall_openbsd_riscv64.go    |   8 +
 .../x/sys/unix/zsyscall_solaris_amd64.go      |  11 +
 .../x/sys/unix/zsyscall_zos_s390x.go          |  10 +
 .../x/sys/unix/ztypes_freebsd_386.go          |   2 +-
 .../x/sys/unix/ztypes_freebsd_amd64.go        |   2 +-
 .../x/sys/unix/ztypes_freebsd_arm.go          |   2 +-
 .../x/sys/unix/ztypes_freebsd_arm64.go        |   2 +-
 .../x/sys/unix/ztypes_freebsd_riscv64.go      |   2 +-
 vendor/golang.org/x/sys/unix/ztypes_linux.go  | 140 +++--
 .../golang.org/x/sys/unix/ztypes_linux_386.go |   2 +-
 .../x/sys/unix/ztypes_linux_amd64.go          |   2 +-
 .../golang.org/x/sys/unix/ztypes_linux_arm.go |   2 +-
 .../x/sys/unix/ztypes_linux_arm64.go          |   2 +-
 .../x/sys/unix/ztypes_linux_loong64.go        |   2 +-
 .../x/sys/unix/ztypes_linux_mips.go           |   2 +-
 .../x/sys/unix/ztypes_linux_mips64.go         |   2 +-
 .../x/sys/unix/ztypes_linux_mips64le.go       |   2 +-
 .../x/sys/unix/ztypes_linux_mipsle.go         |   2 +-
 .../golang.org/x/sys/unix/ztypes_linux_ppc.go |   2 +-
 .../x/sys/unix/ztypes_linux_ppc64.go          |   2 +-
 .../x/sys/unix/ztypes_linux_ppc64le.go        |   2 +-
 .../x/sys/unix/ztypes_linux_riscv64.go        |   2 +-
 .../x/sys/unix/ztypes_linux_s390x.go          |   2 +-
 .../x/sys/unix/ztypes_linux_sparc64.go        |   2 +-
 .../x/sys/windows/syscall_windows.go          |   6 +-
 .../golang.org/x/sys/windows/types_windows.go |  85 ++++
 .../x/sys/windows/zsyscall_windows.go         |  27 +
 vendor/modules.txt                            |  13 +-
 162 files changed, 5266 insertions(+), 1664 deletions(-)
 create mode 100644 vendor/github.com/cilium/ebpf/btf/workarounds.go
 create mode 100644 vendor/github.com/cilium/ebpf/internal/buffer.go
 create mode 100644 vendor/github.com/cilium/ebpf/internal/kconfig/kconfig.go
 create mode 100644 vendor/github.com/cilium/ebpf/internal/memoize.go
 create mode 100644 vendor/github.com/cilium/ebpf/internal/platform.go
 create mode 100644 vendor/github.com/cilium/ebpf/internal/statfs.go
 create mode 100644 vendor/github.com/cilium/ebpf/internal/sys/fd_trace.go
 create mode 100644 vendor/github.com/cilium/ebpf/internal/tracefs/kprobe.go
 create mode 100644 vendor/github.com/cilium/ebpf/internal/tracefs/probetype_string.go
 create mode 100644 vendor/github.com/cilium/ebpf/internal/tracefs/uprobe.go
 delete mode 100644 vendor/github.com/cilium/ebpf/link/platform.go
 create mode 100644 vendor/golang.org/x/exp/LICENSE
 create mode 100644 vendor/golang.org/x/exp/PATENTS
 create mode 100644 vendor/golang.org/x/exp/constraints/constraints.go
 create mode 100644 vendor/golang.org/x/exp/maps/maps.go
 create mode 100644 vendor/golang.org/x/exp/slices/slices.go
 create mode 100644 vendor/golang.org/x/exp/slices/sort.go
 create mode 100644 vendor/golang.org/x/exp/slices/zsortfunc.go
 create mode 100644 vendor/golang.org/x/exp/slices/zsortordered.go

diff --git a/Makefile b/Makefile
index a6415b86..dea367bf 100644
--- a/Makefile
+++ b/Makefile
@@ -35,7 +35,7 @@ OCI_BIN_PATH := $(shell which docker 2>/dev/null || which podman)
 OCI_BIN ?= $(shell basename ${OCI_BIN_PATH})
 
 LOCAL_GENERATOR_IMAGE ?= ebpf-generator:latest
-CILIUM_EBPF_VERSION := v0.10.0
+CILIUM_EBPF_VERSION := v0.11.0
 GOLANGCI_LINT_VERSION = v1.50.1
 CLANG ?= clang
 CFLAGS := -O2 -g -Wall -Werror $(CFLAGS)
diff --git a/go.mod b/go.mod
index f7a78a36..520b1b66 100644
--- a/go.mod
+++ b/go.mod
@@ -4,7 +4,7 @@ go 1.18
 
 require (
 	github.com/caarlos0/env/v6 v6.9.1
-	github.com/cilium/ebpf v0.10.0
+	github.com/cilium/ebpf v0.11.0
 	github.com/gavv/monotime v0.0.0-20190418164738-30dba4353424
 	github.com/mariomac/guara v0.0.0-20220523124851-5fc279816f1f
 	github.com/netobserv/gopipes v0.3.0
@@ -15,7 +15,7 @@ require (
 	github.com/vishvananda/netlink v1.1.0
 	github.com/vladimirvivien/gexe v0.1.1
 	github.com/vmware/go-ipfix v0.5.12
-	golang.org/x/sys v0.5.0
+	golang.org/x/sys v0.6.0
 	google.golang.org/grpc v1.53.0
 	google.golang.org/protobuf v1.28.1
 	k8s.io/api v0.24.0
@@ -60,6 +60,7 @@ require (
 	github.com/xdg/scram v1.0.5 // indirect
 	github.com/xdg/stringprep v1.0.3 // indirect
 	golang.org/x/crypto v0.5.0 // indirect
+	golang.org/x/exp v0.0.0-20230224173230-c95f2b4c22f2 // indirect
 	golang.org/x/net v0.7.0 // indirect
 	golang.org/x/oauth2 v0.4.0 // indirect
 	golang.org/x/term v0.5.0 // indirect
diff --git a/go.sum b/go.sum
index 61c3a3cc..df3da8a6 100644
--- a/go.sum
+++ b/go.sum
@@ -87,8 +87,8 @@ github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj
 github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
 github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
 github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
-github.com/cilium/ebpf v0.10.0 h1:nk5HPMeoBXtOzbkZBWym+ZWq1GIiHUsBFXxwewXAHLQ=
-github.com/cilium/ebpf v0.10.0/go.mod h1:DPiVdY/kT534dgc9ERmvP8mWA+9gvwgKfRvk4nNWnoE=
+github.com/cilium/ebpf v0.11.0 h1:V8gS/bTCCjX9uUnkUFUpPsksM8n1lXBAvHcpiFk1X2Y=
+github.com/cilium/ebpf v0.11.0/go.mod h1:WE7CZAnqOL2RouJ4f1uyNhqr2P4CCvXFIqdRDUgWsVs=
 github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
 github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
 github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
@@ -130,7 +130,7 @@ github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoD
 github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
 github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g=
 github.com/frankban/quicktest v1.10.2/go.mod h1:K+q6oSqb0W0Ininfk863uOk1lMy69l/P6txr3mVT54s=
-github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY=
+github.com/frankban/quicktest v1.14.5 h1:dfYrrRyLtiqT9GyKXgdh+k4inNeTvmGbuSgZ3lx3GhA=
 github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
 github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
 github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI=
@@ -530,6 +530,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
 golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
 golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
+golang.org/x/exp v0.0.0-20230224173230-c95f2b4c22f2 h1:Jvc7gsqn21cJHCmAWx0LiimpP18LZmUxkT5Mp7EZ1mI=
+golang.org/x/exp v0.0.0-20230224173230-c95f2b4c22f2/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
 golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
 golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
 golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
@@ -703,8 +705,8 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc
 golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU=
-golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ=
+golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
 golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
 golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
diff --git a/vendor/github.com/cilium/ebpf/.clang-format b/vendor/github.com/cilium/ebpf/.clang-format
index 4eb94b1b..3f74dc02 100644
--- a/vendor/github.com/cilium/ebpf/.clang-format
+++ b/vendor/github.com/cilium/ebpf/.clang-format
@@ -14,4 +14,6 @@ KeepEmptyLinesAtTheStartOfBlocks: false
 TabWidth:        4
 UseTab:          ForContinuationAndIndentation
 ColumnLimit:     1000
+# Go compiler comments need to stay unindented.
+CommentPragmas: '^go:.*'
 ...
diff --git a/vendor/github.com/cilium/ebpf/.golangci.yaml b/vendor/github.com/cilium/ebpf/.golangci.yaml
index dc62dd6d..06743dfc 100644
--- a/vendor/github.com/cilium/ebpf/.golangci.yaml
+++ b/vendor/github.com/cilium/ebpf/.golangci.yaml
@@ -9,7 +9,6 @@ issues:
 linters:
   disable-all: true
   enable:
-    - deadcode
     - errcheck
     - goimports
     - gosimple
@@ -17,10 +16,9 @@ linters:
     - ineffassign
     - misspell
     - staticcheck
-    - structcheck
     - typecheck
     - unused
-    - varcheck
+    - gofmt
 
     # Could be enabled later:
     # - gocyclo
diff --git a/vendor/github.com/cilium/ebpf/ARCHITECTURE.md b/vendor/github.com/cilium/ebpf/ARCHITECTURE.md
index 8cd7e248..26f555eb 100644
--- a/vendor/github.com/cilium/ebpf/ARCHITECTURE.md
+++ b/vendor/github.com/cilium/ebpf/ARCHITECTURE.md
@@ -1,7 +1,21 @@
 Architecture of the library
 ===
 
-    ELF -> Specifications -> Objects -> Links
+```mermaid
+graph RL
+    Program --> ProgramSpec --> ELF
+    btf.Spec --> ELF
+    Map --> MapSpec --> ELF
+    Links --> Map & Program
+    ProgramSpec -.-> btf.Spec
+    MapSpec -.-> btf.Spec
+    subgraph Collection
+        Program & Map
+    end
+    subgraph CollectionSpec
+        ProgramSpec & MapSpec & btf.Spec
+    end
+```
 
 ELF
 ---
@@ -11,7 +25,7 @@ an ELF file which contains program byte code (aka BPF), but also metadata for
 maps used by the program. The metadata follows the conventions set by libbpf
 shipped with the kernel. Certain ELF sections have special meaning
 and contain structures defined by libbpf. Newer versions of clang emit
-additional metadata in BPF Type Format (aka BTF).
+additional metadata in [BPF Type Format](#BTF).
 
 The library aims to be compatible with libbpf so that moving from a C toolchain
 to a Go one creates little friction. To that end, the [ELF reader](elf_reader.go)
@@ -20,41 +34,33 @@ if possible.
 
 The output of the ELF reader is a `CollectionSpec` which encodes
 all of the information contained in the ELF in a form that is easy to work with
-in Go.
-
-### BTF
-
-The BPF Type Format describes more than just the types used by a BPF program. It
-includes debug aids like which source line corresponds to which instructions and
-what global variables are used.
-
-[BTF parsing](internal/btf/) lives in a separate internal package since exposing
-it would mean an additional maintenance burden, and because the API still
-has sharp corners. The most important concept is the `btf.Type` interface, which
-also describes things that aren't really types like `.rodata` or `.bss` sections.
-`btf.Type`s can form cyclical graphs, which can easily lead to infinite loops if
-one is not careful. Hopefully a safe pattern to work with `btf.Type` emerges as
-we write more code that deals with it.
+in Go. The returned `CollectionSpec` should be deterministic: reading the same ELF
+file on different systems must produce the same output.
+As a corollary, any changes that depend on the runtime environment like the
+current kernel version must happen when creating [Objects](#Objects).
 
 Specifications
 ---
 
-`CollectionSpec`, `ProgramSpec` and `MapSpec` are blueprints for in-kernel
+`CollectionSpec` is a very simple container for `ProgramSpec`, `MapSpec` and
+`btf.Spec`. Avoid adding functionality to it if possible.
+
+`ProgramSpec` and `MapSpec` are blueprints for in-kernel
 objects and contain everything necessary to execute the relevant `bpf(2)`
-syscalls. Since the ELF reader outputs a `CollectionSpec` it's possible to
-modify clang-compiled BPF code, for example to rewrite constants. At the same
-time the [asm](asm/) package provides an assembler that can be used to generate
-`ProgramSpec` on the fly.
+syscalls. They refer to `btf.Spec` for type information such as `Map` key and
+value types.
 
-Creating a spec should never require any privileges or be restricted in any way,
-for example by only allowing programs in native endianness. This ensures that
-the library stays flexible.
+The [asm](asm/) package provides an assembler that can be used to generate
+`ProgramSpec` on the fly.
 
 Objects
 ---
 
-`Program` and `Map` are the result of loading specs into the kernel. Sometimes
-loading a spec will fail because the kernel is too old, or a feature is not
+`Program` and `Map` are the result of loading specifications into the kernel.
+Features that depend on knowledge of the current system (e.g kernel version)
+are implemented at this point.
+
+Sometimes loading a spec will fail because the kernel is too old, or a feature is not
 enabled. There are multiple ways the library deals with that:
 
 * Fallback: older kernels don't allow naming programs and maps. The library
@@ -73,7 +79,7 @@ useful when our higher-level API doesn't support a particular use case.
 Links
 ---
 
-BPF can be attached to many different points in the kernel and newer BPF hooks
+Programs can be attached to many different points in the kernel and newer BPF hooks
 tend to use bpf_link to do so. Older hooks unfortunately use a combination of
 syscalls, netlink messages, etc. Adding support for a new link type should not
 pull in large dependencies like netlink, so XDP programs or tracepoints are
diff --git a/vendor/github.com/cilium/ebpf/CONTRIBUTING.md b/vendor/github.com/cilium/ebpf/CONTRIBUTING.md
index 0d29eae8..bf57da93 100644
--- a/vendor/github.com/cilium/ebpf/CONTRIBUTING.md
+++ b/vendor/github.com/cilium/ebpf/CONTRIBUTING.md
@@ -5,15 +5,23 @@ the form of pull requests and issues reporting bugs or suggesting new features
 are welcome. Please take a look at [the architecture](ARCHITECTURE.md) to get
 a better understanding for the high-level goals.
 
-New features must be accompanied by tests. Before starting work on any large
-feature, please [join](https://ebpf.io/slack) the
-[#ebpf-go](https://cilium.slack.com/messages/ebpf-go) channel on Slack to
-discuss the design first.
+## Adding a new feature
 
-When submitting pull requests, consider writing details about what problem you
-are solving and why the proposed approach solves that problem in commit messages
-and/or pull request description to help future library users and maintainers to
-reason about the proposed changes.
+1. [Join](https://ebpf.io/slack) the
+[#ebpf-go](https://cilium.slack.com/messages/ebpf-go) channel to discuss your requirements and how the feature can be implemented. The most important part is figuring out how much new exported API is necessary. **The less new API is required the easier it will be to land the feature.**
+2. (*optional*) Create a draft PR if you want to discuss the implementation or have hit a problem. It's fine if this doesn't compile or contains debug statements.
+3. Create a PR that is ready to merge. This must pass CI and have tests.
+
+### API stability
+
+The library doesn't guarantee the stability of its API at the moment.
+
+1. If possible avoid breakage by introducing new API and deprecating the old one
+   at the same time. If an API was deprecated in v0.x it can be removed in v0.x+1.
+2. Breaking API in a way that causes compilation failures is acceptable but must
+   have good reasons.
+3. Changing the semantics of the API without causing compilation failures is
+   heavily discouraged.
 
 ## Running the tests
 
@@ -35,6 +43,6 @@ Examples:
 ./run-tests.sh 5.4
 
 # Run a subset of tests:
-./run-tests.sh 5.4 go test ./link
+./run-tests.sh 5.4 ./link
 ```
 
diff --git a/vendor/github.com/cilium/ebpf/MAINTAINERS.md b/vendor/github.com/cilium/ebpf/MAINTAINERS.md
index 9c18e7e7..a56a03e3 100644
--- a/vendor/github.com/cilium/ebpf/MAINTAINERS.md
+++ b/vendor/github.com/cilium/ebpf/MAINTAINERS.md
@@ -1,8 +1,3 @@
 # Maintainers
 
- * [Lorenz Bauer] 
- * [Timo Beckers] (Isovalent)
-
-
-[Lorenz Bauer]: https://github.com/lmb
-[Timo Beckers]: https://github.com/ti-mo
+Maintainers can be found in the [Cilium Maintainers file](https://github.com/cilium/community/blob/main/roles/Maintainers.md)
diff --git a/vendor/github.com/cilium/ebpf/Makefile b/vendor/github.com/cilium/ebpf/Makefile
index c6dbebca..abcd6c1a 100644
--- a/vendor/github.com/cilium/ebpf/Makefile
+++ b/vendor/github.com/cilium/ebpf/Makefile
@@ -28,6 +28,7 @@ TARGETS := \
 	testdata/loader-clang-7 \
 	testdata/loader-clang-9 \
 	testdata/loader-$(CLANG) \
+	testdata/manyprogs \
 	testdata/btf_map_init \
 	testdata/invalid_map \
 	testdata/raw_tracepoint \
@@ -39,9 +40,15 @@ TARGETS := \
 	testdata/map_spin_lock \
 	testdata/subprog_reloc \
 	testdata/fwd_decl \
+	testdata/kconfig \
+	testdata/kconfig_config \
+	testdata/kfunc \
+	testdata/invalid-kfunc \
+	testdata/kfunc-kmod \
 	btf/testdata/relocs \
 	btf/testdata/relocs_read \
-	btf/testdata/relocs_read_tgt
+	btf/testdata/relocs_read_tgt \
+	cmd/bpf2go/testdata/minimal
 
 .PHONY: all clean container-all container-shell generate
 
@@ -49,12 +56,12 @@ TARGETS := \
 
 # Build all ELF binaries using a containerized LLVM toolchain.
 container-all:
-	${CONTAINER_ENGINE} run --rm ${CONTAINER_RUN_ARGS} \
+	+${CONTAINER_ENGINE} run --rm -ti ${CONTAINER_RUN_ARGS} \
 		-v "${REPODIR}":/ebpf -w /ebpf --env MAKEFLAGS \
 		--env CFLAGS="-fdebug-prefix-map=/ebpf=." \
 		--env HOME="/tmp" \
 		"${IMAGE}:${VERSION}" \
-		$(MAKE) all
+		make all
 
 # (debug) Drop the user into a shell inside the container as root.
 container-shell:
@@ -96,11 +103,11 @@ testdata/loader-%-eb.elf: testdata/loader.c
 	$(STRIP) -g $@
 
 .PHONY: generate-btf
-generate-btf: KERNEL_VERSION?=5.18
+generate-btf: KERNEL_VERSION?=5.19
 generate-btf:
 	$(eval TMP := $(shell mktemp -d))
 	curl -fL "$(CI_KERNEL_URL)/linux-$(KERNEL_VERSION).bz" -o "$(TMP)/bzImage"
-	./testdata/extract-vmlinux "$(TMP)/bzImage" > "$(TMP)/vmlinux"
+	/lib/modules/$(uname -r)/build/scripts/extract-vmlinux "$(TMP)/bzImage" > "$(TMP)/vmlinux"
 	$(OBJCOPY) --dump-section .BTF=/dev/stdout "$(TMP)/vmlinux" /dev/null | gzip > "btf/testdata/vmlinux.btf.gz"
 	curl -fL "$(CI_KERNEL_URL)/linux-$(KERNEL_VERSION)-selftests-bpf.tgz" -o "$(TMP)/selftests.tgz"
 	tar -xf "$(TMP)/selftests.tgz" --to-stdout tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.ko | \
diff --git a/vendor/github.com/cilium/ebpf/README.md b/vendor/github.com/cilium/ebpf/README.md
index 85e2d150..eff08d8d 100644
--- a/vendor/github.com/cilium/ebpf/README.md
+++ b/vendor/github.com/cilium/ebpf/README.md
@@ -16,7 +16,7 @@ ecosystem.
 A small collection of Go and eBPF programs that serve as examples for building
 your own tools can be found under [examples/](examples/).
 
-Contributions are highly encouraged, as they highlight certain use cases of
+[Contributions](CONTRIBUTING.md) are highly encouraged, as they highlight certain use cases of
 eBPF and the library, and help shape the future of the project.
 
 ## Getting Help
@@ -53,6 +53,7 @@ This library includes the following packages:
   of `bpftool feature probe` for discovering BPF-related kernel features using native Go.
 * [rlimit](https://pkg.go.dev/github.com/cilium/ebpf/rlimit) provides a convenient API to lift
   the `RLIMIT_MEMLOCK` constraint on kernels before 5.11.
+* [btf](https://pkg.go.dev/github.com/cilium/ebpf/btf) allows reading the BPF Type Format.
 
 ## Requirements
 
diff --git a/vendor/github.com/cilium/ebpf/asm/func.go b/vendor/github.com/cilium/ebpf/asm/func.go
index 4c0ac9a2..18f6a75d 100644
--- a/vendor/github.com/cilium/ebpf/asm/func.go
+++ b/vendor/github.com/cilium/ebpf/asm/func.go
@@ -14,7 +14,7 @@ func (_ BuiltinFunc) Max() BuiltinFunc {
 // You can regenerate this list using the following gawk script:
 //
 //	/FN\(.+\),/ {
-//	  match($1, /\((.+)\)/, r)
+//	  match($1, /\(([a-z_0-9]+),/, r)
 //	  split(r[1], p, "_")
 //	  printf "Fn"
 //	  for (i in p) {
@@ -229,6 +229,14 @@ const (
 	FnDynptrRead
 	FnDynptrWrite
 	FnDynptrData
+	FnTcpRawGenSyncookieIpv4
+	FnTcpRawGenSyncookieIpv6
+	FnTcpRawCheckSyncookieIpv4
+	FnTcpRawCheckSyncookieIpv6
+	FnKtimeGetTaiNs
+	FnUserRingbufDrain
+	FnCgrpStorageGet
+	FnCgrpStorageDelete
 
 	maxBuiltinFunc
 )
diff --git a/vendor/github.com/cilium/ebpf/asm/func_string.go b/vendor/github.com/cilium/ebpf/asm/func_string.go
index b7431b7f..47150bc4 100644
--- a/vendor/github.com/cilium/ebpf/asm/func_string.go
+++ b/vendor/github.com/cilium/ebpf/asm/func_string.go
@@ -212,12 +212,20 @@ func _() {
 	_ = x[FnDynptrRead-201]
 	_ = x[FnDynptrWrite-202]
 	_ = x[FnDynptrData-203]
-	_ = x[maxBuiltinFunc-204]
+	_ = x[FnTcpRawGenSyncookieIpv4-204]
+	_ = x[FnTcpRawGenSyncookieIpv6-205]
+	_ = x[FnTcpRawCheckSyncookieIpv4-206]
+	_ = x[FnTcpRawCheckSyncookieIpv6-207]
+	_ = x[FnKtimeGetTaiNs-208]
+	_ = x[FnUserRingbufDrain-209]
+	_ = x[FnCgrpStorageGet-210]
+	_ = x[FnCgrpStorageDelete-211]
+	_ = x[maxBuiltinFunc-212]
 }
 
-const _BuiltinFunc_name = "FnUnspecFnMapLookupElemFnMapUpdateElemFnMapDeleteElemFnProbeReadFnKtimeGetNsFnTracePrintkFnGetPrandomU32FnGetSmpProcessorIdFnSkbStoreBytesFnL3CsumReplaceFnL4CsumReplaceFnTailCallFnCloneRedirectFnGetCurrentPidTgidFnGetCurrentUidGidFnGetCurrentCommFnGetCgroupClassidFnSkbVlanPushFnSkbVlanPopFnSkbGetTunnelKeyFnSkbSetTunnelKeyFnPerfEventReadFnRedirectFnGetRouteRealmFnPerfEventOutputFnSkbLoadBytesFnGetStackidFnCsumDiffFnSkbGetTunnelOptFnSkbSetTunnelOptFnSkbChangeProtoFnSkbChangeTypeFnSkbUnderCgroupFnGetHashRecalcFnGetCurrentTaskFnProbeWriteUserFnCurrentTaskUnderCgroupFnSkbChangeTailFnSkbPullDataFnCsumUpdateFnSetHashInvalidFnGetNumaNodeIdFnSkbChangeHeadFnXdpAdjustHeadFnProbeReadStrFnGetSocketCookieFnGetSocketUidFnSetHashFnSetsockoptFnSkbAdjustRoomFnRedirectMapFnSkRedirectMapFnSockMapUpdateFnXdpAdjustMetaFnPerfEventReadValueFnPerfProgReadValueFnGetsockoptFnOverrideReturnFnSockOpsCbFlagsSetFnMsgRedirectMapFnMsgApplyBytesFnMsgCorkBytesFnMsgPullDataFnBindFnXdpAdjustTailFnSkbGetXfrmStateFnGetStackFnSkbLoadBytesRelativeFnFibLookupFnSockHashUpdateFnMsgRedirectHashFnSkRedirectHashFnLwtPushEncapFnLwtSeg6StoreBytesFnLwtSeg6AdjustSrhFnLwtSeg6ActionFnRcRepeatFnRcKeydownFnSkbCgroupIdFnGetCurrentCgroupIdFnGetLocalStorageFnSkSelectReuseportFnSkbAncestorCgroupIdFnSkLookupTcpFnSkLookupUdpFnSkReleaseFnMapPushElemFnMapPopElemFnMapPeekElemFnMsgPushDataFnMsgPopDataFnRcPointerRelFnSpinLockFnSpinUnlockFnSkFullsockFnTcpSockFnSkbEcnSetCeFnGetListenerSockFnSkcLookupTcpFnTcpCheckSyncookieFnSysctlGetNameFnSysctlGetCurrentValueFnSysctlGetNewValueFnSysctlSetNewValueFnStrtolFnStrtoulFnSkStorageGetFnSkStorageDeleteFnSendSignalFnTcpGenSyncookieFnSkbOutputFnProbeReadUserFnProbeReadKernelFnProbeReadUserStrFnProbeReadKernelStrFnTcpSendAckFnSendSignalThreadFnJiffies64FnReadBranchRecordsFnGetNsCurrentPidTgidFnXdpOutputFnGetNetnsCookieFnGetCurrentAncestorCgroupIdFnSkAssignFnKtimeGetBootNsFnSeqPrintfFnSeqWriteFnSkCgroupIdFnSkAncestorCgroupIdFnRingbufOutputFnRingbufReserveFnRingbufSubmitFnRingbufDiscardFnRingbufQueryFnCsumLevelFnSkcToTcp6SockFnSkcToTcpSockFnSkcToTcpTimewaitSockFnSkcToTcpRequestSockFnSkcToUdp6SockFnGetTaskStackFnLoadHdrOptFnStoreHdrOptFnReserveHdrOptFnInodeStorageGetFnInodeStorageDeleteFnDPathFnCopyFromUserFnSnprintfBtfFnSeqPrintfBtfFnSkbCgroupClassidFnRedirectNeighFnPerCpuPtrFnThisCpuPtrFnRedirectPeerFnTaskStorageGetFnTaskStorageDeleteFnGetCurrentTaskBtfFnBprmOptsSetFnKtimeGetCoarseNsFnImaInodeHashFnSockFromFileFnCheckMtuFnForEachMapElemFnSnprintfFnSysBpfFnBtfFindByNameKindFnSysCloseFnTimerInitFnTimerSetCallbackFnTimerStartFnTimerCancelFnGetFuncIpFnGetAttachCookieFnTaskPtRegsFnGetBranchSnapshotFnTraceVprintkFnSkcToUnixSockFnKallsymsLookupNameFnFindVmaFnLoopFnStrncmpFnGetFuncArgFnGetFuncRetFnGetFuncArgCntFnGetRetvalFnSetRetvalFnXdpGetBuffLenFnXdpLoadBytesFnXdpStoreBytesFnCopyFromUserTaskFnSkbSetTstampFnImaFileHashFnKptrXchgFnMapLookupPercpuElemFnSkcToMptcpSockFnDynptrFromMemFnRingbufReserveDynptrFnRingbufSubmitDynptrFnRingbufDiscardDynptrFnDynptrReadFnDynptrWriteFnDynptrDatamaxBuiltinFunc"
+const _BuiltinFunc_name = "FnUnspecFnMapLookupElemFnMapUpdateElemFnMapDeleteElemFnProbeReadFnKtimeGetNsFnTracePrintkFnGetPrandomU32FnGetSmpProcessorIdFnSkbStoreBytesFnL3CsumReplaceFnL4CsumReplaceFnTailCallFnCloneRedirectFnGetCurrentPidTgidFnGetCurrentUidGidFnGetCurrentCommFnGetCgroupClassidFnSkbVlanPushFnSkbVlanPopFnSkbGetTunnelKeyFnSkbSetTunnelKeyFnPerfEventReadFnRedirectFnGetRouteRealmFnPerfEventOutputFnSkbLoadBytesFnGetStackidFnCsumDiffFnSkbGetTunnelOptFnSkbSetTunnelOptFnSkbChangeProtoFnSkbChangeTypeFnSkbUnderCgroupFnGetHashRecalcFnGetCurrentTaskFnProbeWriteUserFnCurrentTaskUnderCgroupFnSkbChangeTailFnSkbPullDataFnCsumUpdateFnSetHashInvalidFnGetNumaNodeIdFnSkbChangeHeadFnXdpAdjustHeadFnProbeReadStrFnGetSocketCookieFnGetSocketUidFnSetHashFnSetsockoptFnSkbAdjustRoomFnRedirectMapFnSkRedirectMapFnSockMapUpdateFnXdpAdjustMetaFnPerfEventReadValueFnPerfProgReadValueFnGetsockoptFnOverrideReturnFnSockOpsCbFlagsSetFnMsgRedirectMapFnMsgApplyBytesFnMsgCorkBytesFnMsgPullDataFnBindFnXdpAdjustTailFnSkbGetXfrmStateFnGetStackFnSkbLoadBytesRelativeFnFibLookupFnSockHashUpdateFnMsgRedirectHashFnSkRedirectHashFnLwtPushEncapFnLwtSeg6StoreBytesFnLwtSeg6AdjustSrhFnLwtSeg6ActionFnRcRepeatFnRcKeydownFnSkbCgroupIdFnGetCurrentCgroupIdFnGetLocalStorageFnSkSelectReuseportFnSkbAncestorCgroupIdFnSkLookupTcpFnSkLookupUdpFnSkReleaseFnMapPushElemFnMapPopElemFnMapPeekElemFnMsgPushDataFnMsgPopDataFnRcPointerRelFnSpinLockFnSpinUnlockFnSkFullsockFnTcpSockFnSkbEcnSetCeFnGetListenerSockFnSkcLookupTcpFnTcpCheckSyncookieFnSysctlGetNameFnSysctlGetCurrentValueFnSysctlGetNewValueFnSysctlSetNewValueFnStrtolFnStrtoulFnSkStorageGetFnSkStorageDeleteFnSendSignalFnTcpGenSyncookieFnSkbOutputFnProbeReadUserFnProbeReadKernelFnProbeReadUserStrFnProbeReadKernelStrFnTcpSendAckFnSendSignalThreadFnJiffies64FnReadBranchRecordsFnGetNsCurrentPidTgidFnXdpOutputFnGetNetnsCookieFnGetCurrentAncestorCgroupIdFnSkAssignFnKtimeGetBootNsFnSeqPrintfFnSeqWriteFnSkCgroupIdFnSkAncestorCgroupIdFnRingbufOutputFnRingbufReserveFnRingbufSubmitFnRingbufDiscardFnRingbufQueryFnCsumLevelFnSkcToTcp6SockFnSkcToTcpSockFnSkcToTcpTimewaitSockFnSkcToTcpRequestSockFnSkcToUdp6SockFnGetTaskStackFnLoadHdrOptFnStoreHdrOptFnReserveHdrOptFnInodeStorageGetFnInodeStorageDeleteFnDPathFnCopyFromUserFnSnprintfBtfFnSeqPrintfBtfFnSkbCgroupClassidFnRedirectNeighFnPerCpuPtrFnThisCpuPtrFnRedirectPeerFnTaskStorageGetFnTaskStorageDeleteFnGetCurrentTaskBtfFnBprmOptsSetFnKtimeGetCoarseNsFnImaInodeHashFnSockFromFileFnCheckMtuFnForEachMapElemFnSnprintfFnSysBpfFnBtfFindByNameKindFnSysCloseFnTimerInitFnTimerSetCallbackFnTimerStartFnTimerCancelFnGetFuncIpFnGetAttachCookieFnTaskPtRegsFnGetBranchSnapshotFnTraceVprintkFnSkcToUnixSockFnKallsymsLookupNameFnFindVmaFnLoopFnStrncmpFnGetFuncArgFnGetFuncRetFnGetFuncArgCntFnGetRetvalFnSetRetvalFnXdpGetBuffLenFnXdpLoadBytesFnXdpStoreBytesFnCopyFromUserTaskFnSkbSetTstampFnImaFileHashFnKptrXchgFnMapLookupPercpuElemFnSkcToMptcpSockFnDynptrFromMemFnRingbufReserveDynptrFnRingbufSubmitDynptrFnRingbufDiscardDynptrFnDynptrReadFnDynptrWriteFnDynptrDataFnTcpRawGenSyncookieIpv4FnTcpRawGenSyncookieIpv6FnTcpRawCheckSyncookieIpv4FnTcpRawCheckSyncookieIpv6FnKtimeGetTaiNsFnUserRingbufDrainFnCgrpStorageGetFnCgrpStorageDeletemaxBuiltinFunc"
 
-var _BuiltinFunc_index = [...]uint16{0, 8, 23, 38, 53, 64, 76, 89, 104, 123, 138, 153, 168, 178, 193, 212, 230, 246, 264, 277, 289, 306, 323, 338, 348, 363, 380, 394, 406, 416, 433, 450, 466, 481, 497, 512, 528, 544, 568, 583, 596, 608, 624, 639, 654, 669, 683, 700, 714, 723, 735, 750, 763, 778, 793, 808, 828, 847, 859, 875, 894, 910, 925, 939, 952, 958, 973, 990, 1000, 1022, 1033, 1049, 1066, 1082, 1096, 1115, 1133, 1148, 1158, 1169, 1182, 1202, 1219, 1238, 1259, 1272, 1285, 1296, 1309, 1321, 1334, 1347, 1359, 1373, 1383, 1395, 1407, 1416, 1429, 1446, 1460, 1479, 1494, 1517, 1536, 1555, 1563, 1572, 1586, 1603, 1615, 1632, 1643, 1658, 1675, 1693, 1713, 1725, 1743, 1754, 1773, 1794, 1805, 1821, 1849, 1859, 1875, 1886, 1896, 1908, 1928, 1943, 1959, 1974, 1990, 2004, 2015, 2030, 2044, 2066, 2087, 2102, 2116, 2128, 2141, 2156, 2173, 2193, 2200, 2214, 2227, 2241, 2259, 2274, 2285, 2297, 2311, 2327, 2346, 2365, 2378, 2396, 2410, 2424, 2434, 2450, 2460, 2468, 2487, 2497, 2508, 2526, 2538, 2551, 2562, 2579, 2591, 2610, 2624, 2639, 2659, 2668, 2674, 2683, 2695, 2707, 2722, 2733, 2744, 2759, 2773, 2788, 2806, 2820, 2833, 2843, 2864, 2880, 2895, 2917, 2938, 2960, 2972, 2985, 2997, 3011}
+var _BuiltinFunc_index = [...]uint16{0, 8, 23, 38, 53, 64, 76, 89, 104, 123, 138, 153, 168, 178, 193, 212, 230, 246, 264, 277, 289, 306, 323, 338, 348, 363, 380, 394, 406, 416, 433, 450, 466, 481, 497, 512, 528, 544, 568, 583, 596, 608, 624, 639, 654, 669, 683, 700, 714, 723, 735, 750, 763, 778, 793, 808, 828, 847, 859, 875, 894, 910, 925, 939, 952, 958, 973, 990, 1000, 1022, 1033, 1049, 1066, 1082, 1096, 1115, 1133, 1148, 1158, 1169, 1182, 1202, 1219, 1238, 1259, 1272, 1285, 1296, 1309, 1321, 1334, 1347, 1359, 1373, 1383, 1395, 1407, 1416, 1429, 1446, 1460, 1479, 1494, 1517, 1536, 1555, 1563, 1572, 1586, 1603, 1615, 1632, 1643, 1658, 1675, 1693, 1713, 1725, 1743, 1754, 1773, 1794, 1805, 1821, 1849, 1859, 1875, 1886, 1896, 1908, 1928, 1943, 1959, 1974, 1990, 2004, 2015, 2030, 2044, 2066, 2087, 2102, 2116, 2128, 2141, 2156, 2173, 2193, 2200, 2214, 2227, 2241, 2259, 2274, 2285, 2297, 2311, 2327, 2346, 2365, 2378, 2396, 2410, 2424, 2434, 2450, 2460, 2468, 2487, 2497, 2508, 2526, 2538, 2551, 2562, 2579, 2591, 2610, 2624, 2639, 2659, 2668, 2674, 2683, 2695, 2707, 2722, 2733, 2744, 2759, 2773, 2788, 2806, 2820, 2833, 2843, 2864, 2880, 2895, 2917, 2938, 2960, 2972, 2985, 2997, 3021, 3045, 3071, 3097, 3112, 3130, 3146, 3165, 3179}
 
 func (i BuiltinFunc) String() string {
 	if i < 0 || i >= BuiltinFunc(len(_BuiltinFunc_index)-1) {
diff --git a/vendor/github.com/cilium/ebpf/asm/instruction.go b/vendor/github.com/cilium/ebpf/asm/instruction.go
index 19c5b646..ef01eaa3 100644
--- a/vendor/github.com/cilium/ebpf/asm/instruction.go
+++ b/vendor/github.com/cilium/ebpf/asm/instruction.go
@@ -226,6 +226,13 @@ func (ins *Instruction) IsFunctionCall() bool {
 	return ins.OpCode.JumpOp() == Call && ins.Src == PseudoCall
 }
 
+// IsKfuncCall returns true if the instruction calls a kfunc.
+//
+// This is not the same thing as a BPF helper call.
+func (ins *Instruction) IsKfuncCall() bool {
+	return ins.OpCode.JumpOp() == Call && ins.Src == PseudoKfuncCall
+}
+
 // IsLoadOfFunctionPointer returns true if the instruction loads a function pointer.
 func (ins *Instruction) IsLoadOfFunctionPointer() bool {
 	return ins.OpCode.IsDWordLoad() && ins.Src == PseudoFunc
@@ -318,10 +325,14 @@ func (ins Instruction) Format(f fmt.State, c rune) {
 	case cls.IsJump():
 		switch jop := op.JumpOp(); jop {
 		case Call:
-			if ins.Src == PseudoCall {
+			switch ins.Src {
+			case PseudoCall:
 				// bpf-to-bpf call
 				fmt.Fprint(f, ins.Constant)
-			} else {
+			case PseudoKfuncCall:
+				// kfunc call
+				fmt.Fprintf(f, "Kfunc(%d)", ins.Constant)
+			default:
 				fmt.Fprint(f, BuiltinFunc(ins.Constant))
 			}
 
diff --git a/vendor/github.com/cilium/ebpf/asm/register.go b/vendor/github.com/cilium/ebpf/asm/register.go
index dd5d44f1..457a3b8a 100644
--- a/vendor/github.com/cilium/ebpf/asm/register.go
+++ b/vendor/github.com/cilium/ebpf/asm/register.go
@@ -35,10 +35,11 @@ const (
 
 // Pseudo registers used by 64bit loads and jumps
 const (
-	PseudoMapFD    = R1 // BPF_PSEUDO_MAP_FD
-	PseudoMapValue = R2 // BPF_PSEUDO_MAP_VALUE
-	PseudoCall     = R1 // BPF_PSEUDO_CALL
-	PseudoFunc     = R4 // BPF_PSEUDO_FUNC
+	PseudoMapFD     = R1 // BPF_PSEUDO_MAP_FD
+	PseudoMapValue  = R2 // BPF_PSEUDO_MAP_VALUE
+	PseudoCall      = R1 // BPF_PSEUDO_CALL
+	PseudoFunc      = R4 // BPF_PSEUDO_FUNC
+	PseudoKfuncCall = R2 // BPF_PSEUDO_KFUNC_CALL
 )
 
 func (r Register) String() string {
diff --git a/vendor/github.com/cilium/ebpf/btf/btf.go b/vendor/github.com/cilium/ebpf/btf/btf.go
index a27dcd16..86eb7d68 100644
--- a/vendor/github.com/cilium/ebpf/btf/btf.go
+++ b/vendor/github.com/cilium/ebpf/btf/btf.go
@@ -2,7 +2,6 @@ package btf
 
 import (
 	"bufio"
-	"bytes"
 	"debug/elf"
 	"encoding/binary"
 	"errors"
@@ -31,11 +30,9 @@ var (
 // ID represents the unique ID of a BTF object.
 type ID = sys.BTFID
 
-// Spec represents decoded BTF.
+// Spec allows querying a set of Types and loading the set into the
+// kernel.
 type Spec struct {
-	// Data from .BTF.
-	strings *stringTable
-
 	// All types contained by the spec, not including types from the base in
 	// case the spec was parsed from split BTF.
 	types []Type
@@ -43,10 +40,17 @@ type Spec struct {
 	// Type IDs indexed by type.
 	typeIDs map[Type]TypeID
 
+	// The ID of the first type in types.
+	firstTypeID TypeID
+
 	// Types indexed by essential name.
 	// Includes all struct flavors and types with the same name.
 	namedTypes map[essentialName][]Type
 
+	// String table from ELF, may be nil.
+	strings *stringTable
+
+	// Byte order of the ELF we decoded the spec from, may be nil.
 	byteOrder binary.ByteOrder
 }
 
@@ -76,6 +80,18 @@ func (h *btfHeader) stringStart() int64 {
 	return int64(h.HdrLen + h.StringOff)
 }
 
+// newSpec creates a Spec containing only Void.
+func newSpec() *Spec {
+	return &Spec{
+		[]Type{(*Void)(nil)},
+		map[Type]TypeID{(*Void)(nil): 0},
+		0,
+		make(map[essentialName][]Type),
+		nil,
+		nil,
+	}
+}
+
 // LoadSpec opens file and calls LoadSpecFromReader on it.
 func LoadSpec(file string) (*Spec, error) {
 	fh, err := os.Open(file)
@@ -95,7 +111,7 @@ func LoadSpecFromReader(rd io.ReaderAt) (*Spec, error) {
 	file, err := internal.NewSafeELFFile(rd)
 	if err != nil {
 		if bo := guessRawBTFByteOrder(rd); bo != nil {
-			return loadRawSpec(io.NewSectionReader(rd, 0, math.MaxInt64), bo, nil, nil)
+			return loadRawSpec(io.NewSectionReader(rd, 0, math.MaxInt64), bo, nil)
 		}
 
 		return nil, err
@@ -119,7 +135,7 @@ func LoadSpecAndExtInfosFromReader(rd io.ReaderAt) (*Spec, *ExtInfos, error) {
 		return nil, nil, err
 	}
 
-	extInfos, err := loadExtInfosFromELF(file, spec.types, spec.strings)
+	extInfos, err := loadExtInfosFromELF(file, spec)
 	if err != nil && !errors.Is(err, ErrNotFound) {
 		return nil, nil, err
 	}
@@ -199,7 +215,7 @@ func loadSpecFromELF(file *internal.SafeELFFile) (*Spec, error) {
 		return nil, fmt.Errorf("compressed BTF is not supported")
 	}
 
-	spec, err := loadRawSpec(btfSection.ReaderAt, file.ByteOrder, nil, nil)
+	spec, err := loadRawSpec(btfSection.ReaderAt, file.ByteOrder, nil)
 	if err != nil {
 		return nil, err
 	}
@@ -212,31 +228,53 @@ func loadSpecFromELF(file *internal.SafeELFFile) (*Spec, error) {
 	return spec, nil
 }
 
-func loadRawSpec(btf io.ReaderAt, bo binary.ByteOrder,
-	baseTypes types, baseStrings *stringTable) (*Spec, error) {
+func loadRawSpec(btf io.ReaderAt, bo binary.ByteOrder, base *Spec) (*Spec, error) {
+	var (
+		baseStrings *stringTable
+		firstTypeID TypeID
+		err         error
+	)
+
+	if base != nil {
+		if base.firstTypeID != 0 {
+			return nil, fmt.Errorf("can't use split BTF as base")
+		}
+
+		if base.strings == nil {
+			return nil, fmt.Errorf("parse split BTF: base must be loaded from an ELF")
+		}
+
+		baseStrings = base.strings
+
+		firstTypeID, err = base.nextTypeID()
+		if err != nil {
+			return nil, err
+		}
+	}
 
 	rawTypes, rawStrings, err := parseBTF(btf, bo, baseStrings)
 	if err != nil {
 		return nil, err
 	}
 
-	types, err := inflateRawTypes(rawTypes, baseTypes, rawStrings)
+	types, err := inflateRawTypes(rawTypes, rawStrings, base)
 	if err != nil {
 		return nil, err
 	}
 
-	typeIDs, typesByName := indexTypes(types, TypeID(len(baseTypes)))
+	typeIDs, typesByName := indexTypes(types, firstTypeID)
 
 	return &Spec{
-		namedTypes: typesByName,
-		typeIDs:    typeIDs,
-		types:      types,
-		strings:    rawStrings,
-		byteOrder:  bo,
+		namedTypes:  typesByName,
+		typeIDs:     typeIDs,
+		types:       types,
+		firstTypeID: firstTypeID,
+		strings:     rawStrings,
+		byteOrder:   bo,
 	}, nil
 }
 
-func indexTypes(types []Type, typeIDOffset TypeID) (map[Type]TypeID, map[essentialName][]Type) {
+func indexTypes(types []Type, firstTypeID TypeID) (map[Type]TypeID, map[essentialName][]Type) {
 	namedTypes := 0
 	for _, typ := range types {
 		if typ.TypeName() != "" {
@@ -254,7 +292,7 @@ func indexTypes(types []Type, typeIDOffset TypeID) (map[Type]TypeID, map[essenti
 		if name := newEssentialName(typ.TypeName()); name != "" {
 			typesByName[name] = append(typesByName[name], typ)
 		}
-		typeIDs[typ] = TypeID(i) + typeIDOffset
+		typeIDs[typ] = firstTypeID + TypeID(i)
 	}
 
 	return typeIDs, typesByName
@@ -266,7 +304,10 @@ func indexTypes(types []Type, typeIDOffset TypeID) (map[Type]TypeID, map[essenti
 // for vmlinux ELFs. Returns an error wrapping ErrNotSupported if BTF is not enabled.
 func LoadKernelSpec() (*Spec, error) {
 	spec, _, err := kernelSpec()
-	return spec, err
+	if err != nil {
+		return nil, err
+	}
+	return spec.Copy(), nil
 }
 
 var kernelBTF struct {
@@ -297,7 +338,7 @@ func kernelSpec() (*Spec, bool, error) {
 	}
 
 	if spec != nil {
-		return spec.Copy(), fallback, nil
+		return spec, fallback, nil
 	}
 
 	spec, fallback, err := loadKernelSpec()
@@ -306,7 +347,7 @@ func kernelSpec() (*Spec, bool, error) {
 	}
 
 	kernelBTF.spec, kernelBTF.fallback = spec, fallback
-	return spec.Copy(), fallback, nil
+	return spec, fallback, nil
 }
 
 func loadKernelSpec() (_ *Spec, fallback bool, _ error) {
@@ -314,7 +355,7 @@ func loadKernelSpec() (_ *Spec, fallback bool, _ error) {
 	if err == nil {
 		defer fh.Close()
 
-		spec, err := loadRawSpec(fh, internal.NativeEndian, nil, nil)
+		spec, err := loadRawSpec(fh, internal.NativeEndian, nil)
 		return spec, false, err
 	}
 
@@ -433,6 +474,8 @@ type symbol struct {
 	name    string
 }
 
+// fixupDatasec attempts to patch up missing info in Datasecs and its members by
+// supplementing them with information from the ELF headers and symbol table.
 func fixupDatasec(types []Type, sectionSizes map[string]uint32, offsets map[symbol]uint32) error {
 	for _, typ := range types {
 		ds, ok := typ.(*Datasec)
@@ -441,8 +484,34 @@ func fixupDatasec(types []Type, sectionSizes map[string]uint32, offsets map[symb
 		}
 
 		name := ds.Name
-		if name == ".kconfig" || name == ".ksyms" {
-			return fmt.Errorf("reference to %s: %w", name, ErrNotSupported)
+
+		// Some Datasecs are virtual and don't have corresponding ELF sections.
+		switch name {
+		case ".ksyms":
+			// .ksyms describes forward declarations of kfunc signatures.
+			// Nothing to fix up, all sizes and offsets are 0.
+			for _, vsi := range ds.Vars {
+				_, ok := vsi.Type.(*Func)
+				if !ok {
+					// Only Funcs are supported in the .ksyms Datasec.
+					return fmt.Errorf("data section %s: expected *btf.Func, not %T: %w", name, vsi.Type, ErrNotSupported)
+				}
+			}
+
+			continue
+		case ".kconfig":
+			// .kconfig has a size of 0 and has all members' offsets set to 0.
+			// Fix up all offsets and set the Datasec's size.
+			if err := fixupDatasecLayout(ds); err != nil {
+				return err
+			}
+
+			// Fix up extern to global linkage to avoid a BTF verifier error.
+			for _, vsi := range ds.Vars {
+				vsi.Type.(*Var).Linkage = GlobalVar
+			}
+
+			continue
 		}
 
 		if ds.Size != 0 {
@@ -466,18 +535,52 @@ func fixupDatasec(types []Type, sectionSizes map[string]uint32, offsets map[symb
 	return nil
 }
 
+// fixupDatasecLayout populates ds.Vars[].Offset according to var sizes and
+// alignment. Calculate and set ds.Size.
+func fixupDatasecLayout(ds *Datasec) error {
+	var off uint32
+
+	for i, vsi := range ds.Vars {
+		v, ok := vsi.Type.(*Var)
+		if !ok {
+			return fmt.Errorf("member %d: unsupported type %T", i, vsi.Type)
+		}
+
+		size, err := Sizeof(v.Type)
+		if err != nil {
+			return fmt.Errorf("variable %s: getting size: %w", v.Name, err)
+		}
+		align, err := alignof(v.Type)
+		if err != nil {
+			return fmt.Errorf("variable %s: getting alignment: %w", v.Name, err)
+		}
+
+		// Align the current member based on the offset of the end of the previous
+		// member and the alignment of the current member.
+		off = internal.Align(off, uint32(align))
+
+		ds.Vars[i].Offset = off
+
+		off += uint32(size)
+	}
+
+	ds.Size = off
+
+	return nil
+}
+
 // Copy creates a copy of Spec.
 func (s *Spec) Copy() *Spec {
 	types := copyTypes(s.types, nil)
-
-	typeIDs, typesByName := indexTypes(types, s.firstTypeID())
+	typeIDs, typesByName := indexTypes(types, s.firstTypeID)
 
 	// NB: Other parts of spec are not copied since they are immutable.
 	return &Spec{
-		s.strings,
 		types,
 		typeIDs,
+		s.firstTypeID,
 		typesByName,
+		s.strings,
 		s.byteOrder,
 	}
 }
@@ -492,19 +595,31 @@ func (sw sliceWriter) Write(p []byte) (int, error) {
 	return copy(sw, p), nil
 }
 
+// nextTypeID returns the next unallocated type ID or an error if there are no
+// more type IDs.
+func (s *Spec) nextTypeID() (TypeID, error) {
+	id := s.firstTypeID + TypeID(len(s.types))
+	if id < s.firstTypeID {
+		return 0, fmt.Errorf("no more type IDs")
+	}
+	return id, nil
+}
+
 // TypeByID returns the BTF Type with the given type ID.
 //
 // Returns an error wrapping ErrNotFound if a Type with the given ID
 // does not exist in the Spec.
 func (s *Spec) TypeByID(id TypeID) (Type, error) {
-	firstID := s.firstTypeID()
-	lastID := firstID + TypeID(len(s.types))
+	if id < s.firstTypeID {
+		return nil, fmt.Errorf("look up type with ID %d (first ID is %d): %w", id, s.firstTypeID, ErrNotFound)
+	}
 
-	if id < firstID || id >= lastID {
-		return nil, fmt.Errorf("expected type ID between %d and %d, got %d: %w", firstID, lastID, id, ErrNotFound)
+	index := int(id - s.firstTypeID)
+	if index >= len(s.types) {
+		return nil, fmt.Errorf("look up type with ID %d: %w", id, ErrNotFound)
 	}
 
-	return s.types[id-firstID], nil
+	return s.types[index], nil
 }
 
 // TypeID returns the ID for a given Type.
@@ -625,25 +740,17 @@ func (s *Spec) TypeByName(name string, typ interface{}) error {
 	return nil
 }
 
-// firstTypeID returns the first type ID or zero.
-func (s *Spec) firstTypeID() TypeID {
-	if len(s.types) > 0 {
-		return s.typeIDs[s.types[0]]
-	}
-	return 0
-}
-
 // LoadSplitSpecFromReader loads split BTF from a reader.
 //
 // Types from base are used to resolve references in the split BTF.
 // The returned Spec only contains types from the split BTF, not from the base.
 func LoadSplitSpecFromReader(r io.ReaderAt, base *Spec) (*Spec, error) {
-	return loadRawSpec(r, internal.NativeEndian, base.types, base.strings)
+	return loadRawSpec(r, internal.NativeEndian, base)
 }
 
 // TypesIterator iterates over types of a given spec.
 type TypesIterator struct {
-	spec  *Spec
+	types []Type
 	index int
 	// The last visited type in the spec.
 	Type Type
@@ -651,69 +758,31 @@ type TypesIterator struct {
 
 // Iterate returns the types iterator.
 func (s *Spec) Iterate() *TypesIterator {
-	return &TypesIterator{spec: s, index: 0}
+	// We share the backing array of types with the Spec. This is safe since
+	// we don't allow deletion or shuffling of types.
+	return &TypesIterator{types: s.types, index: 0}
 }
 
 // Next returns true as long as there are any remaining types.
 func (iter *TypesIterator) Next() bool {
-	if len(iter.spec.types) <= iter.index {
+	if len(iter.types) <= iter.index {
 		return false
 	}
 
-	iter.Type = iter.spec.types[iter.index]
+	iter.Type = iter.types[iter.index]
 	iter.index++
 	return true
 }
 
-func marshalBTF(types interface{}, strings []byte, bo binary.ByteOrder) []byte {
-	const minHeaderLength = 24
-
-	typesLen := uint32(binary.Size(types))
-	header := btfHeader{
-		Magic:     btfMagic,
-		Version:   1,
-		HdrLen:    minHeaderLength,
-		TypeOff:   0,
-		TypeLen:   typesLen,
-		StringOff: typesLen,
-		StringLen: uint32(len(strings)),
-	}
-
-	buf := new(bytes.Buffer)
-	_ = binary.Write(buf, bo, &header)
-	_ = binary.Write(buf, bo, types)
-	buf.Write(strings)
-
-	return buf.Bytes()
-}
-
 // haveBTF attempts to load a BTF blob containing an Int. It should pass on any
 // kernel that supports BPF_BTF_LOAD.
 var haveBTF = internal.NewFeatureTest("BTF", "4.18", func() error {
-	var (
-		types struct {
-			Integer btfType
-			btfInt
-		}
-		strings = []byte{0}
-	)
-	types.Integer.SetKind(kindInt) // 0-length anonymous integer
-
-	btf := marshalBTF(&types, strings, internal.NativeEndian)
-
-	fd, err := sys.BtfLoad(&sys.BtfLoadAttr{
-		Btf:     sys.NewSlicePointer(btf),
-		BtfSize: uint32(len(btf)),
-	})
+	// 0-length anonymous integer
+	err := probeBTF(&Int{})
 	if errors.Is(err, unix.EINVAL) || errors.Is(err, unix.EPERM) {
 		return internal.ErrNotSupported
 	}
-	if err != nil {
-		return err
-	}
-
-	fd.Close()
-	return nil
+	return err
 })
 
 // haveMapBTF attempts to load a minimal BTF blob containing a Var. It is
@@ -724,37 +793,18 @@ var haveMapBTF = internal.NewFeatureTest("Map BTF (Var/Datasec)", "5.2", func()
 		return err
 	}
 
-	var (
-		types struct {
-			Integer btfType
-			Var     btfType
-			btfVariable
-		}
-		strings = []byte{0, 'a', 0}
-	)
-
-	types.Integer.SetKind(kindPointer)
-	types.Var.NameOff = 1
-	types.Var.SetKind(kindVar)
-	types.Var.SizeType = 1
-
-	btf := marshalBTF(&types, strings, internal.NativeEndian)
+	v := &Var{
+		Name: "a",
+		Type: &Pointer{(*Void)(nil)},
+	}
 
-	fd, err := sys.BtfLoad(&sys.BtfLoadAttr{
-		Btf:     sys.NewSlicePointer(btf),
-		BtfSize: uint32(len(btf)),
-	})
+	err := probeBTF(v)
 	if errors.Is(err, unix.EINVAL) || errors.Is(err, unix.EPERM) {
 		// Treat both EINVAL and EPERM as not supported: creating the map may still
 		// succeed without Btf* attrs.
 		return internal.ErrNotSupported
 	}
-	if err != nil {
-		return err
-	}
-
-	fd.Close()
-	return nil
+	return err
 })
 
 // haveProgBTF attempts to load a BTF blob containing a Func and FuncProto. It
@@ -765,34 +815,16 @@ var haveProgBTF = internal.NewFeatureTest("Program BTF (func/line_info)", "5.0",
 		return err
 	}
 
-	var (
-		types struct {
-			FuncProto btfType
-			Func      btfType
-		}
-		strings = []byte{0, 'a', 0}
-	)
-
-	types.FuncProto.SetKind(kindFuncProto)
-	types.Func.SetKind(kindFunc)
-	types.Func.SizeType = 1 // aka FuncProto
-	types.Func.NameOff = 1
-
-	btf := marshalBTF(&types, strings, internal.NativeEndian)
+	fn := &Func{
+		Name: "a",
+		Type: &FuncProto{Return: (*Void)(nil)},
+	}
 
-	fd, err := sys.BtfLoad(&sys.BtfLoadAttr{
-		Btf:     sys.NewSlicePointer(btf),
-		BtfSize: uint32(len(btf)),
-	})
+	err := probeBTF(fn)
 	if errors.Is(err, unix.EINVAL) || errors.Is(err, unix.EPERM) {
 		return internal.ErrNotSupported
 	}
-	if err != nil {
-		return err
-	}
-
-	fd.Close()
-	return nil
+	return err
 })
 
 var haveFuncLinkage = internal.NewFeatureTest("BTF func linkage", "5.6", func() error {
@@ -800,33 +832,38 @@ var haveFuncLinkage = internal.NewFeatureTest("BTF func linkage", "5.6", func()
 		return err
 	}
 
-	var (
-		types struct {
-			FuncProto btfType
-			Func      btfType
-		}
-		strings = []byte{0, 'a', 0}
-	)
-
-	types.FuncProto.SetKind(kindFuncProto)
-	types.Func.SetKind(kindFunc)
-	types.Func.SizeType = 1 // aka FuncProto
-	types.Func.NameOff = 1
-	types.Func.SetLinkage(GlobalFunc)
-
-	btf := marshalBTF(&types, strings, internal.NativeEndian)
+	fn := &Func{
+		Name:    "a",
+		Type:    &FuncProto{Return: (*Void)(nil)},
+		Linkage: GlobalFunc,
+	}
 
-	fd, err := sys.BtfLoad(&sys.BtfLoadAttr{
-		Btf:     sys.NewSlicePointer(btf),
-		BtfSize: uint32(len(btf)),
-	})
+	err := probeBTF(fn)
 	if errors.Is(err, unix.EINVAL) {
 		return internal.ErrNotSupported
 	}
+	return err
+})
+
+func probeBTF(typ Type) error {
+	b, err := NewBuilder([]Type{typ})
 	if err != nil {
 		return err
 	}
 
-	fd.Close()
-	return nil
-})
+	buf, err := b.Marshal(nil, nil)
+	if err != nil {
+		return err
+	}
+
+	fd, err := sys.BtfLoad(&sys.BtfLoadAttr{
+		Btf:     sys.NewSlicePointer(buf),
+		BtfSize: uint32(len(buf)),
+	})
+
+	if err == nil {
+		fd.Close()
+	}
+
+	return err
+}
diff --git a/vendor/github.com/cilium/ebpf/btf/btf_types.go b/vendor/github.com/cilium/ebpf/btf/btf_types.go
index dc568a90..a253b7c9 100644
--- a/vendor/github.com/cilium/ebpf/btf/btf_types.go
+++ b/vendor/github.com/cilium/ebpf/btf/btf_types.go
@@ -4,6 +4,7 @@ import (
 	"encoding/binary"
 	"fmt"
 	"io"
+	"unsafe"
 )
 
 //go:generate stringer -linecomment -output=btf_types_string.go -type=FuncLinkage,VarLinkage,btfKind
@@ -193,13 +194,22 @@ func (bt *btfType) SetSize(size uint32) {
 	bt.SizeType = size
 }
 
+func (bt *btfType) Marshal(w io.Writer, bo binary.ByteOrder) error {
+	buf := make([]byte, unsafe.Sizeof(*bt))
+	bo.PutUint32(buf[0:], bt.NameOff)
+	bo.PutUint32(buf[4:], bt.Info)
+	bo.PutUint32(buf[8:], bt.SizeType)
+	_, err := w.Write(buf)
+	return err
+}
+
 type rawType struct {
 	btfType
 	data interface{}
 }
 
 func (rt *rawType) Marshal(w io.Writer, bo binary.ByteOrder) error {
-	if err := binary.Write(w, bo, &rt.btfType); err != nil {
+	if err := rt.btfType.Marshal(w, bo); err != nil {
 		return err
 	}
 
diff --git a/vendor/github.com/cilium/ebpf/btf/core.go b/vendor/github.com/cilium/ebpf/btf/core.go
index a0d2c1f9..a5c40d36 100644
--- a/vendor/github.com/cilium/ebpf/btf/core.go
+++ b/vendor/github.com/cilium/ebpf/btf/core.go
@@ -165,6 +165,14 @@ func (k coreKind) String() string {
 // Fixups are returned in the order of relos, e.g. fixup[i] is the solution
 // for relos[i].
 func CORERelocate(relos []*CORERelocation, target *Spec, bo binary.ByteOrder) ([]COREFixup, error) {
+	if target == nil {
+		var err error
+		target, _, err = kernelSpec()
+		if err != nil {
+			return nil, fmt.Errorf("load kernel spec: %w", err)
+		}
+	}
+
 	if bo != target.byteOrder {
 		return nil, fmt.Errorf("can't relocate %s against %s", bo, target.byteOrder)
 	}
@@ -229,6 +237,7 @@ func CORERelocate(relos []*CORERelocation, target *Spec, bo binary.ByteOrder) ([
 
 var errAmbiguousRelocation = errors.New("ambiguous relocation")
 var errImpossibleRelocation = errors.New("impossible relocation")
+var errIncompatibleTypes = errors.New("incompatible types")
 
 // coreCalculateFixups finds the target type that best matches all relocations.
 //
@@ -239,12 +248,11 @@ var errImpossibleRelocation = errors.New("impossible relocation")
 func coreCalculateFixups(relos []*CORERelocation, targetSpec *Spec, targets []Type, bo binary.ByteOrder) ([]COREFixup, error) {
 	bestScore := len(relos)
 	var bestFixups []COREFixup
-	for i := range targets {
-		targetID, err := targetSpec.TypeID(targets[i])
+	for _, target := range targets {
+		targetID, err := targetSpec.TypeID(target)
 		if err != nil {
 			return nil, fmt.Errorf("target type ID: %w", err)
 		}
-		target := Copy(targets[i], UnderlyingType)
 
 		score := 0 // lower is better
 		fixups := make([]COREFixup, 0, len(relos))
@@ -298,6 +306,8 @@ func coreCalculateFixups(relos []*CORERelocation, targetSpec *Spec, targets []Ty
 	return bestFixups, nil
 }
 
+var errNoSignedness = errors.New("no signedness")
+
 // coreCalculateFixup calculates the fixup for a single local type, target type
 // and relocation.
 func coreCalculateFixup(relo *CORERelocation, target Type, targetID TypeID, bo binary.ByteOrder) (COREFixup, error) {
@@ -315,7 +325,7 @@ func coreCalculateFixup(relo *CORERelocation, target Type, targetID TypeID, bo b
 	}
 	zero := COREFixup{}
 
-	local := Copy(relo.typ, UnderlyingType)
+	local := relo.typ
 
 	switch relo.kind {
 	case reloTypeIDTarget, reloTypeSize, reloTypeExists:
@@ -324,7 +334,7 @@ func coreCalculateFixup(relo *CORERelocation, target Type, targetID TypeID, bo b
 		}
 
 		err := coreAreTypesCompatible(local, target)
-		if errors.Is(err, errImpossibleRelocation) {
+		if errors.Is(err, errIncompatibleTypes) {
 			return poison()
 		}
 		if err != nil {
@@ -369,21 +379,8 @@ func coreCalculateFixup(relo *CORERelocation, target Type, targetID TypeID, bo b
 			return fixup(uint32(localValue.Value), uint32(targetValue.Value))
 		}
 
-	case reloFieldSigned:
-		switch local.(type) {
-		case *Enum:
-			return fixup(1, 1)
-		case *Int:
-			return fixup(
-				uint32(local.(*Int).Encoding&Signed),
-				uint32(target.(*Int).Encoding&Signed),
-			)
-		default:
-			return fixupWithoutValidation(0, 0)
-		}
-
-	case reloFieldByteOffset, reloFieldByteSize, reloFieldExists, reloFieldLShiftU64, reloFieldRShiftU64:
-		if _, ok := target.(*Fwd); ok {
+	case reloFieldByteOffset, reloFieldByteSize, reloFieldExists, reloFieldLShiftU64, reloFieldRShiftU64, reloFieldSigned:
+		if _, ok := as[*Fwd](target); ok {
 			// We can't relocate fields using a forward declaration, so
 			// skip it. If a non-forward declaration is present in the BTF
 			// we'll find it in one of the other iterations.
@@ -448,12 +445,42 @@ func coreCalculateFixup(relo *CORERelocation, target Type, targetID TypeID, bo b
 			}
 
 			return fixupWithoutValidation(0, uint32(64-targetSize))
+
+		case reloFieldSigned:
+			switch local := UnderlyingType(localField.Type).(type) {
+			case *Enum:
+				target, ok := as[*Enum](targetField.Type)
+				if !ok {
+					return zero, fmt.Errorf("target isn't *Enum but %T", targetField.Type)
+				}
+
+				return fixup(boolToUint32(local.Signed), boolToUint32(target.Signed))
+			case *Int:
+				target, ok := as[*Int](targetField.Type)
+				if !ok {
+					return zero, fmt.Errorf("target isn't *Int but %T", targetField.Type)
+				}
+
+				return fixup(
+					uint32(local.Encoding&Signed),
+					uint32(target.Encoding&Signed),
+				)
+			default:
+				return zero, fmt.Errorf("type %T: %w", local, errNoSignedness)
+			}
 		}
 	}
 
 	return zero, ErrNotSupported
 }
 
+func boolToUint32(val bool) uint32 {
+	if val {
+		return 1
+	}
+	return 0
+}
+
 /* coreAccessor contains a path through a struct. It contains at least one index.
  *
  * The interpretation depends on the kind of the relocation. The following is
@@ -513,7 +540,7 @@ func (ca coreAccessor) String() string {
 }
 
 func (ca coreAccessor) enumValue(t Type) (*EnumValue, error) {
-	e, ok := t.(*Enum)
+	e, ok := as[*Enum](t)
 	if !ok {
 		return nil, fmt.Errorf("not an enum: %s", t)
 	}
@@ -598,7 +625,7 @@ func (cf *coreField) sizeBits() (Bits, error) {
 	// between kernel versions. Synthesise the size to make the shifts work.
 	size, err := Sizeof(cf.Type)
 	if err != nil {
-		return 0, nil
+		return 0, err
 	}
 	return Bits(size * 8), nil
 }
@@ -628,7 +655,7 @@ func coreFindField(localT Type, localAcc coreAccessor, targetT Type) (coreField,
 
 	var localMaybeFlex, targetMaybeFlex bool
 	for i, acc := range localAcc[1:] {
-		switch localType := local.Type.(type) {
+		switch localType := UnderlyingType(local.Type).(type) {
 		case composite:
 			// For composite types acc is used to find the field in the local type,
 			// and then we try to find a field in target with the same name.
@@ -639,21 +666,21 @@ func coreFindField(localT Type, localAcc coreAccessor, targetT Type) (coreField,
 
 			localMember := localMembers[acc]
 			if localMember.Name == "" {
-				_, ok := localMember.Type.(composite)
+				localMemberType, ok := as[composite](localMember.Type)
 				if !ok {
 					return coreField{}, coreField{}, fmt.Errorf("unnamed field with type %s: %s", localMember.Type, ErrNotSupported)
 				}
 
 				// This is an anonymous struct or union, ignore it.
 				local = coreField{
-					Type:   localMember.Type,
+					Type:   localMemberType,
 					offset: local.offset + localMember.Offset.Bytes(),
 				}
 				localMaybeFlex = false
 				continue
 			}
 
-			targetType, ok := target.Type.(composite)
+			targetType, ok := as[composite](target.Type)
 			if !ok {
 				return coreField{}, coreField{}, fmt.Errorf("target not composite: %w", errImpossibleRelocation)
 			}
@@ -699,7 +726,7 @@ func coreFindField(localT Type, localAcc coreAccessor, targetT Type) (coreField,
 
 		case *Array:
 			// For arrays, acc is the index in the target.
-			targetType, ok := target.Type.(*Array)
+			targetType, ok := as[*Array](target.Type)
 			if !ok {
 				return coreField{}, coreField{}, fmt.Errorf("target not array: %w", errImpossibleRelocation)
 			}
@@ -793,7 +820,7 @@ func coreFindMember(typ composite, name string) (Member, bool, error) {
 				continue
 			}
 
-			comp, ok := member.Type.(composite)
+			comp, ok := as[composite](member.Type)
 			if !ok {
 				return Member{}, false, fmt.Errorf("anonymous non-composite type %T not allowed", member.Type)
 			}
@@ -812,7 +839,7 @@ func coreFindEnumValue(local Type, localAcc coreAccessor, target Type) (localVal
 		return nil, nil, err
 	}
 
-	targetEnum, ok := target.(*Enum)
+	targetEnum, ok := as[*Enum](target)
 	if !ok {
 		return nil, nil, errImpossibleRelocation
 	}
@@ -829,6 +856,13 @@ func coreFindEnumValue(local Type, localAcc coreAccessor, target Type) (localVal
 	return nil, nil, errImpossibleRelocation
 }
 
+// CheckTypeCompatibility checks local and target types for Compatibility according to CO-RE rules.
+//
+// Only layout compatibility is checked, ignoring names of the root type.
+func CheckTypeCompatibility(localType Type, targetType Type) error {
+	return coreAreTypesCompatible(localType, targetType)
+}
+
 /* The comment below is from bpf_core_types_are_compat in libbpf.c:
  *
  * Check local and target types for compatibility. This check is used for
@@ -850,9 +884,10 @@ func coreFindEnumValue(local Type, localAcc coreAccessor, target Type) (localVal
  * These rules are not set in stone and probably will be adjusted as we get
  * more experience with using BPF CO-RE relocations.
  *
- * Returns errImpossibleRelocation if types are not compatible.
+ * Returns errIncompatibleTypes if types are not compatible.
  */
 func coreAreTypesCompatible(localType Type, targetType Type) error {
+
 	var (
 		localTs, targetTs typeDeque
 		l, t              = &localType, &targetType
@@ -864,11 +899,11 @@ func coreAreTypesCompatible(localType Type, targetType Type) error {
 			return errors.New("types are nested too deep")
 		}
 
-		localType = *l
-		targetType = *t
+		localType = UnderlyingType(*l)
+		targetType = UnderlyingType(*t)
 
 		if reflect.TypeOf(localType) != reflect.TypeOf(targetType) {
-			return fmt.Errorf("type mismatch: %w", errImpossibleRelocation)
+			return fmt.Errorf("type mismatch: %w", errIncompatibleTypes)
 		}
 
 		switch lv := (localType).(type) {
@@ -883,7 +918,7 @@ func coreAreTypesCompatible(localType Type, targetType Type) error {
 		case *FuncProto:
 			tv := targetType.(*FuncProto)
 			if len(lv.Params) != len(tv.Params) {
-				return fmt.Errorf("function param mismatch: %w", errImpossibleRelocation)
+				return fmt.Errorf("function param mismatch: %w", errIncompatibleTypes)
 			}
 
 			depth++
@@ -932,6 +967,9 @@ func coreAreTypesCompatible(localType Type, targetType Type) error {
  * Returns errImpossibleRelocation if the members are not compatible.
  */
 func coreAreMembersCompatible(localType Type, targetType Type) error {
+	localType = UnderlyingType(localType)
+	targetType = UnderlyingType(targetType)
+
 	doNamesMatch := func(a, b string) error {
 		if a == "" || b == "" {
 			// allow anonymous and named type to match
diff --git a/vendor/github.com/cilium/ebpf/btf/ext_info.go b/vendor/github.com/cilium/ebpf/btf/ext_info.go
index 36f3b7ba..b764fb7b 100644
--- a/vendor/github.com/cilium/ebpf/btf/ext_info.go
+++ b/vendor/github.com/cilium/ebpf/btf/ext_info.go
@@ -8,7 +8,6 @@ import (
 	"io"
 	"math"
 	"sort"
-	"sync"
 
 	"github.com/cilium/ebpf/asm"
 	"github.com/cilium/ebpf/internal"
@@ -25,7 +24,7 @@ type ExtInfos struct {
 // loadExtInfosFromELF parses ext infos from the .BTF.ext section in an ELF.
 //
 // Returns an error wrapping ErrNotFound if no ext infos are present.
-func loadExtInfosFromELF(file *internal.SafeELFFile, ts types, strings *stringTable) (*ExtInfos, error) {
+func loadExtInfosFromELF(file *internal.SafeELFFile, spec *Spec) (*ExtInfos, error) {
 	section := file.Section(".BTF.ext")
 	if section == nil {
 		return nil, fmt.Errorf("btf ext infos: %w", ErrNotFound)
@@ -35,11 +34,11 @@ func loadExtInfosFromELF(file *internal.SafeELFFile, ts types, strings *stringTa
 		return nil, fmt.Errorf("compressed ext_info is not supported")
 	}
 
-	return loadExtInfos(section.ReaderAt, file.ByteOrder, ts, strings)
+	return loadExtInfos(section.ReaderAt, file.ByteOrder, spec, spec.strings)
 }
 
 // loadExtInfos parses bare ext infos.
-func loadExtInfos(r io.ReaderAt, bo binary.ByteOrder, ts types, strings *stringTable) (*ExtInfos, error) {
+func loadExtInfos(r io.ReaderAt, bo binary.ByteOrder, spec *Spec, strings *stringTable) (*ExtInfos, error) {
 	// Open unbuffered section reader. binary.Read() calls io.ReadFull on
 	// the header structs, resulting in one syscall per header.
 	headerRd := io.NewSectionReader(r, 0, math.MaxInt64)
@@ -61,7 +60,7 @@ func loadExtInfos(r io.ReaderAt, bo binary.ByteOrder, ts types, strings *stringT
 
 	funcInfos := make(map[string][]funcInfo, len(btfFuncInfos))
 	for section, bfis := range btfFuncInfos {
-		funcInfos[section], err = newFuncInfos(bfis, ts)
+		funcInfos[section], err = newFuncInfos(bfis, spec)
 		if err != nil {
 			return nil, fmt.Errorf("section %s: func infos: %w", section, err)
 		}
@@ -94,7 +93,7 @@ func loadExtInfos(r io.ReaderAt, bo binary.ByteOrder, ts types, strings *stringT
 
 	coreRelos := make(map[string][]coreRelocationInfo, len(btfCORERelos))
 	for section, brs := range btfCORERelos {
-		coreRelos[section], err = newRelocationInfos(brs, ts, strings)
+		coreRelos[section], err = newRelocationInfos(brs, spec, strings)
 		if err != nil {
 			return nil, fmt.Errorf("section %s: CO-RE relocations: %w", section, err)
 		}
@@ -131,12 +130,6 @@ func (ei *ExtInfos) Assign(insns asm.Instructions, section string) {
 	}
 }
 
-var nativeEncoderPool = sync.Pool{
-	New: func() any {
-		return newEncoder(kernelEncoderOptions, nil)
-	},
-}
-
 // MarshalExtInfos encodes function and line info embedded in insns into kernel
 // wire format.
 //
@@ -157,15 +150,10 @@ func MarshalExtInfos(insns asm.Instructions) (_ *Handle, funcInfos, lineInfos []
 		}
 	}
 
-	// Avoid allocating encoder, etc. if there is no BTF at all.
 	return nil, nil, nil, nil
 
 marshal:
-	enc := nativeEncoderPool.Get().(*encoder)
-	defer nativeEncoderPool.Put(enc)
-
-	enc.Reset()
-
+	var b Builder
 	var fiBuf, liBuf bytes.Buffer
 	for {
 		if fn := FuncMetadata(iter.Ins); fn != nil {
@@ -173,7 +161,7 @@ marshal:
 				fn:     fn,
 				offset: iter.Offset,
 			}
-			if err := fi.marshal(&fiBuf, enc); err != nil {
+			if err := fi.marshal(&fiBuf, &b); err != nil {
 				return nil, nil, nil, fmt.Errorf("write func info: %w", err)
 			}
 		}
@@ -183,7 +171,7 @@ marshal:
 				line:   line,
 				offset: iter.Offset,
 			}
-			if err := li.marshal(&liBuf, enc.strings); err != nil {
+			if err := li.marshal(&liBuf, &b); err != nil {
 				return nil, nil, nil, fmt.Errorf("write line info: %w", err)
 			}
 		}
@@ -193,12 +181,7 @@ marshal:
 		}
 	}
 
-	btf, err := enc.Encode()
-	if err != nil {
-		return nil, nil, nil, err
-	}
-
-	handle, err := newHandleFromRawBTF(btf)
+	handle, err := NewHandle(&b)
 	return handle, fiBuf.Bytes(), liBuf.Bytes(), err
 }
 
@@ -354,8 +337,8 @@ type bpfFuncInfo struct {
 	TypeID  TypeID
 }
 
-func newFuncInfo(fi bpfFuncInfo, ts types) (*funcInfo, error) {
-	typ, err := ts.ByID(fi.TypeID)
+func newFuncInfo(fi bpfFuncInfo, spec *Spec) (*funcInfo, error) {
+	typ, err := spec.TypeByID(fi.TypeID)
 	if err != nil {
 		return nil, err
 	}
@@ -376,10 +359,10 @@ func newFuncInfo(fi bpfFuncInfo, ts types) (*funcInfo, error) {
 	}, nil
 }
 
-func newFuncInfos(bfis []bpfFuncInfo, ts types) ([]funcInfo, error) {
+func newFuncInfos(bfis []bpfFuncInfo, spec *Spec) ([]funcInfo, error) {
 	fis := make([]funcInfo, 0, len(bfis))
 	for _, bfi := range bfis {
-		fi, err := newFuncInfo(bfi, ts)
+		fi, err := newFuncInfo(bfi, spec)
 		if err != nil {
 			return nil, fmt.Errorf("offset %d: %w", bfi.InsnOff, err)
 		}
@@ -392,8 +375,8 @@ func newFuncInfos(bfis []bpfFuncInfo, ts types) ([]funcInfo, error) {
 }
 
 // marshal into the BTF wire format.
-func (fi *funcInfo) marshal(w *bytes.Buffer, enc *encoder) error {
-	id, err := enc.Add(fi.fn)
+func (fi *funcInfo) marshal(w *bytes.Buffer, b *Builder) error {
+	id, err := b.Add(fi.fn)
 	if err != nil {
 		return err
 	}
@@ -408,7 +391,7 @@ func (fi *funcInfo) marshal(w *bytes.Buffer, enc *encoder) error {
 	return err
 }
 
-// parseLineInfos parses a func_info sub-section within .BTF.ext ito a map of
+// parseFuncInfos parses a func_info sub-section within .BTF.ext ito a map of
 // func infos indexed by section name.
 func parseFuncInfos(r io.Reader, bo binary.ByteOrder, strings *stringTable) (map[string][]bpfFuncInfo, error) {
 	recordSize, err := parseExtInfoRecordSize(r, bo)
@@ -558,7 +541,7 @@ func newLineInfos(blis []bpfLineInfo, strings *stringTable) ([]lineInfo, error)
 }
 
 // marshal writes the binary representation of the LineInfo to w.
-func (li *lineInfo) marshal(w *bytes.Buffer, stb *stringTableBuilder) error {
+func (li *lineInfo) marshal(w *bytes.Buffer, b *Builder) error {
 	line := li.line
 	if line.lineNumber > bpfLineMax {
 		return fmt.Errorf("line %d exceeds %d", line.lineNumber, bpfLineMax)
@@ -568,12 +551,12 @@ func (li *lineInfo) marshal(w *bytes.Buffer, stb *stringTableBuilder) error {
 		return fmt.Errorf("column %d exceeds %d", line.lineColumn, bpfColumnMax)
 	}
 
-	fileNameOff, err := stb.Add(line.fileName)
+	fileNameOff, err := b.addString(line.fileName)
 	if err != nil {
 		return fmt.Errorf("file name %q: %w", line.fileName, err)
 	}
 
-	lineOff, err := stb.Add(line.line)
+	lineOff, err := b.addString(line.line)
 	if err != nil {
 		return fmt.Errorf("line %q: %w", line.line, err)
 	}
@@ -669,6 +652,10 @@ type CORERelocation struct {
 	id TypeID
 }
 
+func (cr *CORERelocation) String() string {
+	return fmt.Sprintf("CORERelocation(%s, %s[%s], local_id=%d)", cr.kind, cr.typ, cr.accessor, cr.id)
+}
+
 func CORERelocationMetadata(ins *asm.Instruction) *CORERelocation {
 	relo, _ := ins.Metadata.Get(coreRelocationMeta{}).(*CORERelocation)
 	return relo
@@ -679,8 +666,8 @@ type coreRelocationInfo struct {
 	offset asm.RawInstructionOffset
 }
 
-func newRelocationInfo(relo bpfCORERelo, ts types, strings *stringTable) (*coreRelocationInfo, error) {
-	typ, err := ts.ByID(relo.TypeID)
+func newRelocationInfo(relo bpfCORERelo, spec *Spec, strings *stringTable) (*coreRelocationInfo, error) {
+	typ, err := spec.TypeByID(relo.TypeID)
 	if err != nil {
 		return nil, err
 	}
@@ -706,10 +693,10 @@ func newRelocationInfo(relo bpfCORERelo, ts types, strings *stringTable) (*coreR
 	}, nil
 }
 
-func newRelocationInfos(brs []bpfCORERelo, ts types, strings *stringTable) ([]coreRelocationInfo, error) {
+func newRelocationInfos(brs []bpfCORERelo, spec *Spec, strings *stringTable) ([]coreRelocationInfo, error) {
 	rs := make([]coreRelocationInfo, 0, len(brs))
 	for _, br := range brs {
-		relo, err := newRelocationInfo(br, ts, strings)
+		relo, err := newRelocationInfo(br, spec, strings)
 		if err != nil {
 			return nil, fmt.Errorf("offset %d: %w", br.InsnOff, err)
 		}
diff --git a/vendor/github.com/cilium/ebpf/btf/handle.go b/vendor/github.com/cilium/ebpf/btf/handle.go
index 9a864d17..b6b3e87f 100644
--- a/vendor/github.com/cilium/ebpf/btf/handle.go
+++ b/vendor/github.com/cilium/ebpf/btf/handle.go
@@ -22,32 +22,25 @@ type Handle struct {
 	needsKernelBase bool
 }
 
-// NewHandle loads BTF into the kernel.
+// NewHandle loads the contents of a [Builder] into the kernel.
 //
-// Returns ErrNotSupported if BTF is not supported.
-func NewHandle(spec *Spec) (*Handle, error) {
-	if spec.byteOrder != nil && spec.byteOrder != internal.NativeEndian {
-		return nil, fmt.Errorf("can't load %s BTF on %s", spec.byteOrder, internal.NativeEndian)
-	}
-
-	enc := newEncoder(kernelEncoderOptions, newStringTableBuilderFromTable(spec.strings))
+// Returns an error wrapping ErrNotSupported if the kernel doesn't support BTF.
+func NewHandle(b *Builder) (*Handle, error) {
+	small := getByteSlice()
+	defer putByteSlice(small)
 
-	for _, typ := range spec.types {
-		_, err := enc.Add(typ)
-		if err != nil {
-			return nil, fmt.Errorf("add %s: %w", typ, err)
-		}
-	}
-
-	btf, err := enc.Encode()
+	buf, err := b.Marshal(*small, KernelMarshalOptions())
 	if err != nil {
 		return nil, fmt.Errorf("marshal BTF: %w", err)
 	}
 
-	return newHandleFromRawBTF(btf)
+	return NewHandleFromRawBTF(buf)
 }
 
-func newHandleFromRawBTF(btf []byte) (*Handle, error) {
+// NewHandleFromRawBTF loads raw BTF into the kernel.
+//
+// Returns an error wrapping ErrNotSupported if the kernel doesn't support BTF.
+func NewHandleFromRawBTF(btf []byte) (*Handle, error) {
 	if uint64(len(btf)) > math.MaxUint32 {
 		return nil, errors.New("BTF exceeds the maximum size")
 	}
@@ -104,7 +97,10 @@ func NewHandleFromID(id ID) (*Handle, error) {
 }
 
 // Spec parses the kernel BTF into Go types.
-func (h *Handle) Spec() (*Spec, error) {
+//
+// base must contain type information for vmlinux if the handle is for
+// a kernel module. It may be nil otherwise.
+func (h *Handle) Spec(base *Spec) (*Spec, error) {
 	var btfInfo sys.BtfInfo
 	btfBuffer := make([]byte, h.size)
 	btfInfo.Btf, btfInfo.BtfSize = sys.NewSlicePointerLen(btfBuffer)
@@ -113,20 +109,11 @@ func (h *Handle) Spec() (*Spec, error) {
 		return nil, err
 	}
 
-	if !h.needsKernelBase {
-		return loadRawSpec(bytes.NewReader(btfBuffer), internal.NativeEndian, nil, nil)
-	}
-
-	base, fallback, err := kernelSpec()
-	if err != nil {
-		return nil, fmt.Errorf("load BTF base: %w", err)
-	}
-
-	if fallback {
-		return nil, fmt.Errorf("can't load split BTF without access to /sys")
+	if h.needsKernelBase && base == nil {
+		return nil, fmt.Errorf("missing base types")
 	}
 
-	return loadRawSpec(bytes.NewReader(btfBuffer), internal.NativeEndian, base.types, base.strings)
+	return loadRawSpec(bytes.NewReader(btfBuffer), internal.NativeEndian, base)
 }
 
 // Close destroys the handle.
@@ -200,7 +187,7 @@ func newHandleInfoFromFD(fd *sys.FD) (*HandleInfo, error) {
 	}, nil
 }
 
-// IsModule returns true if the BTF is for the kernel itself.
+// IsVmlinux returns true if the BTF is for the kernel itself.
 func (i *HandleInfo) IsVmlinux() bool {
 	return i.IsKernel && i.Name == "vmlinux"
 }
diff --git a/vendor/github.com/cilium/ebpf/btf/marshal.go b/vendor/github.com/cilium/ebpf/btf/marshal.go
index 4ae479bd..bfe53b41 100644
--- a/vendor/github.com/cilium/ebpf/btf/marshal.go
+++ b/vendor/github.com/cilium/ebpf/btf/marshal.go
@@ -6,141 +6,176 @@ import (
 	"errors"
 	"fmt"
 	"math"
+	"sync"
 
 	"github.com/cilium/ebpf/internal"
+
+	"golang.org/x/exp/slices"
 )
 
-type encoderOptions struct {
-	ByteOrder binary.ByteOrder
+type MarshalOptions struct {
+	// Target byte order. Defaults to the system's native endianness.
+	Order binary.ByteOrder
 	// Remove function linkage information for compatibility with <5.6 kernels.
 	StripFuncLinkage bool
 }
 
-// kernelEncoderOptions will generate BTF suitable for the current kernel.
-var kernelEncoderOptions encoderOptions
-
-func init() {
-	kernelEncoderOptions = encoderOptions{
-		ByteOrder:        internal.NativeEndian,
+// KernelMarshalOptions will generate BTF suitable for the current kernel.
+func KernelMarshalOptions() *MarshalOptions {
+	return &MarshalOptions{
+		Order:            internal.NativeEndian,
 		StripFuncLinkage: haveFuncLinkage() != nil,
 	}
 }
 
 // encoder turns Types into raw BTF.
 type encoder struct {
-	opts encoderOptions
+	MarshalOptions
 
-	buf          *bytes.Buffer
-	strings      *stringTableBuilder
-	allocatedIDs map[Type]TypeID
-	nextID       TypeID
-	// Temporary storage for Add.
 	pending internal.Deque[Type]
-	// Temporary storage for deflateType.
-	raw rawType
+	buf     *bytes.Buffer
+	strings *stringTableBuilder
+	ids     map[Type]TypeID
+	lastID  TypeID
 }
 
-// newEncoder returns a new builder for the given byte order.
-//
-// See [KernelEncoderOptions] to build BTF for the current system.
-func newEncoder(opts encoderOptions, strings *stringTableBuilder) *encoder {
-	enc := &encoder{
-		opts: opts,
-		buf:  bytes.NewBuffer(make([]byte, btfHeaderLen)),
-	}
-	enc.reset(strings)
-	return enc
+var bufferPool = sync.Pool{
+	New: func() any {
+		buf := make([]byte, btfHeaderLen+128)
+		return &buf
+	},
 }
 
-// Reset internal state to be able to reuse the Encoder.
-func (e *encoder) Reset() {
-	e.reset(nil)
+func getByteSlice() *[]byte {
+	return bufferPool.Get().(*[]byte)
 }
 
-func (e *encoder) reset(strings *stringTableBuilder) {
-	if strings == nil {
-		strings = newStringTableBuilder()
-	}
+func putByteSlice(buf *[]byte) {
+	*buf = (*buf)[:0]
+	bufferPool.Put(buf)
+}
 
-	e.buf.Truncate(btfHeaderLen)
-	e.strings = strings
-	e.allocatedIDs = make(map[Type]TypeID)
-	e.nextID = 1
+// Builder turns Types into raw BTF.
+//
+// The default value may be used and represents an empty BTF blob. Void is
+// added implicitly if necessary.
+type Builder struct {
+	// Explicitly added types.
+	types []Type
+	// IDs for all added types which the user knows about.
+	stableIDs map[Type]TypeID
+	// Explicitly added strings.
+	strings *stringTableBuilder
 }
 
-// Add a Type.
+// NewBuilder creates a Builder from a list of types.
 //
-// Adding the same Type multiple times is valid and will return a stable ID.
+// It is more efficient than calling [Add] individually.
 //
-// Calling the method has undefined behaviour if it previously returned an error.
-func (e *encoder) Add(typ Type) (TypeID, error) {
-	if typ == nil {
-		return 0, errors.New("cannot Add a nil Type")
+// Returns an error if adding any of the types fails.
+func NewBuilder(types []Type) (*Builder, error) {
+	b := &Builder{
+		make([]Type, 0, len(types)),
+		make(map[Type]TypeID, len(types)),
+		nil,
 	}
 
-	hasID := func(t Type) (skip bool) {
-		_, isVoid := t.(*Void)
-		_, alreadyEncoded := e.allocatedIDs[t]
-		return isVoid || alreadyEncoded
+	for _, typ := range types {
+		_, err := b.Add(typ)
+		if err != nil {
+			return nil, fmt.Errorf("add %s: %w", typ, err)
+		}
 	}
 
-	e.pending.Reset()
+	return b, nil
+}
 
-	allocateID := func(typ Type) {
-		e.pending.Push(typ)
-		e.allocatedIDs[typ] = e.nextID
-		e.nextID++
+// Add a Type and allocate a stable ID for it.
+//
+// Adding the identical Type multiple times is valid and will return the same ID.
+//
+// See [Type] for details on identity.
+func (b *Builder) Add(typ Type) (TypeID, error) {
+	if b.stableIDs == nil {
+		b.stableIDs = make(map[Type]TypeID)
 	}
 
-	iter := postorderTraversal(typ, hasID)
-	for iter.Next() {
-		if hasID(iter.Type) {
-			// This type is part of a cycle and we've already deflated it.
-			continue
-		}
+	if _, ok := typ.(*Void); ok {
+		// Equality is weird for void, since it is a zero sized type.
+		return 0, nil
+	}
 
-		// Allocate an ID for the next type.
-		allocateID(iter.Type)
+	if ds, ok := typ.(*Datasec); ok {
+		if err := datasecResolveWorkaround(b, ds); err != nil {
+			return 0, err
+		}
+	}
 
-		for !e.pending.Empty() {
-			t := e.pending.Shift()
+	id, ok := b.stableIDs[typ]
+	if ok {
+		return id, nil
+	}
 
-			// Ensure that all direct descendants have been allocated an ID
-			// before calling deflateType.
-			walkType(t, func(child *Type) {
-				if !hasID(*child) {
-					// t refers to a type which hasn't been allocated an ID
-					// yet, which only happens for circular types.
-					allocateID(*child)
-				}
-			})
+	b.types = append(b.types, typ)
 
-			if err := e.deflateType(t); err != nil {
-				return 0, fmt.Errorf("deflate %s: %w", t, err)
-			}
-		}
+	id = TypeID(len(b.types))
+	if int(id) != len(b.types) {
+		return 0, fmt.Errorf("no more type IDs")
 	}
 
-	return e.allocatedIDs[typ], nil
+	b.stableIDs[typ] = id
+	return id, nil
 }
 
-// Encode the raw BTF blob.
+// Marshal encodes all types in the Marshaler into BTF wire format.
 //
-// The returned slice is valid until the next call to Add.
-func (e *encoder) Encode() ([]byte, error) {
-	length := e.buf.Len()
+// opts may be nil.
+func (b *Builder) Marshal(buf []byte, opts *MarshalOptions) ([]byte, error) {
+	stb := b.strings
+	if stb == nil {
+		// Assume that most types are named. This makes encoding large BTF like
+		// vmlinux a lot cheaper.
+		stb = newStringTableBuilder(len(b.types))
+	} else {
+		// Avoid modifying the Builder's string table.
+		stb = b.strings.Copy()
+	}
+
+	if opts == nil {
+		opts = &MarshalOptions{Order: internal.NativeEndian}
+	}
 
-	// Truncate the string table on return to allow adding more types.
-	defer e.buf.Truncate(length)
+	// Reserve space for the BTF header.
+	buf = slices.Grow(buf, btfHeaderLen)[:btfHeaderLen]
 
+	w := internal.NewBuffer(buf)
+	defer internal.PutBuffer(w)
+
+	e := encoder{
+		MarshalOptions: *opts,
+		buf:            w,
+		strings:        stb,
+		lastID:         TypeID(len(b.types)),
+		ids:            make(map[Type]TypeID, len(b.types)),
+	}
+
+	// Ensure that types are marshaled in the exact order they were Add()ed.
+	// Otherwise the ID returned from Add() won't match.
+	e.pending.Grow(len(b.types))
+	for _, typ := range b.types {
+		e.pending.Push(typ)
+		e.ids[typ] = b.stableIDs[typ]
+	}
+
+	if err := e.deflatePending(); err != nil {
+		return nil, err
+	}
+
+	length := e.buf.Len()
 	typeLen := uint32(length - btfHeaderLen)
 
-	// Reserve space for the string table.
 	stringLen := e.strings.Length()
-	e.buf.Grow(stringLen)
-
-	buf := e.buf.Bytes()[:length+stringLen]
-	e.strings.MarshalBuffer(buf[length:])
+	buf = e.strings.AppendEncoded(e.buf.Bytes())
 
 	// Fill out the header, and write it out.
 	header := &btfHeader{
@@ -154,23 +189,116 @@ func (e *encoder) Encode() ([]byte, error) {
 		StringLen: uint32(stringLen),
 	}
 
-	err := binary.Write(sliceWriter(buf[:btfHeaderLen]), e.opts.ByteOrder, header)
+	err := binary.Write(sliceWriter(buf[:btfHeaderLen]), e.Order, header)
 	if err != nil {
-		return nil, fmt.Errorf("can't write header: %v", err)
+		return nil, fmt.Errorf("write header: %v", err)
 	}
 
 	return buf, nil
 }
 
+// addString adds a string to the resulting BTF.
+//
+// Adding the same string multiple times will return the same result.
+//
+// Returns an identifier into the string table or an error if the string
+// contains invalid characters.
+func (b *Builder) addString(str string) (uint32, error) {
+	if b.strings == nil {
+		b.strings = newStringTableBuilder(0)
+	}
+
+	return b.strings.Add(str)
+}
+
+func (e *encoder) allocateID(typ Type) error {
+	id := e.lastID + 1
+	if id < e.lastID {
+		return errors.New("type ID overflow")
+	}
+
+	e.pending.Push(typ)
+	e.ids[typ] = id
+	e.lastID = id
+	return nil
+}
+
+// id returns the ID for the given type or panics with an error.
+func (e *encoder) id(typ Type) TypeID {
+	if _, ok := typ.(*Void); ok {
+		return 0
+	}
+
+	id, ok := e.ids[typ]
+	if !ok {
+		panic(fmt.Errorf("no ID for type %v", typ))
+	}
+
+	return id
+}
+
+func (e *encoder) deflatePending() error {
+	// Declare root outside of the loop to avoid repeated heap allocations.
+	var root Type
+	skip := func(t Type) (skip bool) {
+		if t == root {
+			// Force descending into the current root type even if it already
+			// has an ID. Otherwise we miss children of types that have their
+			// ID pre-allocated via Add.
+			return false
+		}
+
+		_, isVoid := t.(*Void)
+		_, alreadyEncoded := e.ids[t]
+		return isVoid || alreadyEncoded
+	}
+
+	for !e.pending.Empty() {
+		root = e.pending.Shift()
+
+		// Allocate IDs for all children of typ, including transitive dependencies.
+		iter := postorderTraversal(root, skip)
+		for iter.Next() {
+			if iter.Type == root {
+				// The iterator yields root at the end, do not allocate another ID.
+				break
+			}
+
+			if err := e.allocateID(iter.Type); err != nil {
+				return err
+			}
+		}
+
+		if err := e.deflateType(root); err != nil {
+			id := e.ids[root]
+			return fmt.Errorf("deflate %v with ID %d: %w", root, id, err)
+		}
+	}
+
+	return nil
+}
+
 func (e *encoder) deflateType(typ Type) (err error) {
-	raw := &e.raw
-	*raw = rawType{}
+	defer func() {
+		if r := recover(); r != nil {
+			var ok bool
+			err, ok = r.(error)
+			if !ok {
+				panic(r)
+			}
+		}
+	}()
+
+	var raw rawType
 	raw.NameOff, err = e.strings.Add(typ.TypeName())
 	if err != nil {
 		return err
 	}
 
 	switch v := typ.(type) {
+	case *Void:
+		return errors.New("Void is implicit in BTF wire format")
+
 	case *Int:
 		raw.SetKind(kindInt)
 		raw.SetSize(v.Size)
@@ -184,13 +312,13 @@ func (e *encoder) deflateType(typ Type) (err error) {
 
 	case *Pointer:
 		raw.SetKind(kindPointer)
-		raw.SetType(e.allocatedIDs[v.Target])
+		raw.SetType(e.id(v.Target))
 
 	case *Array:
 		raw.SetKind(kindArray)
 		raw.data = &btfArray{
-			e.allocatedIDs[v.Type],
-			e.allocatedIDs[v.Index],
+			e.id(v.Type),
+			e.id(v.Index),
 			v.Nelems,
 		}
 
@@ -223,36 +351,36 @@ func (e *encoder) deflateType(typ Type) (err error) {
 
 	case *Typedef:
 		raw.SetKind(kindTypedef)
-		raw.SetType(e.allocatedIDs[v.Type])
+		raw.SetType(e.id(v.Type))
 
 	case *Volatile:
 		raw.SetKind(kindVolatile)
-		raw.SetType(e.allocatedIDs[v.Type])
+		raw.SetType(e.id(v.Type))
 
 	case *Const:
 		raw.SetKind(kindConst)
-		raw.SetType(e.allocatedIDs[v.Type])
+		raw.SetType(e.id(v.Type))
 
 	case *Restrict:
 		raw.SetKind(kindRestrict)
-		raw.SetType(e.allocatedIDs[v.Type])
+		raw.SetType(e.id(v.Type))
 
 	case *Func:
 		raw.SetKind(kindFunc)
-		raw.SetType(e.allocatedIDs[v.Type])
-		if !e.opts.StripFuncLinkage {
+		raw.SetType(e.id(v.Type))
+		if !e.StripFuncLinkage {
 			raw.SetLinkage(v.Linkage)
 		}
 
 	case *FuncProto:
 		raw.SetKind(kindFuncProto)
-		raw.SetType(e.allocatedIDs[v.Return])
+		raw.SetType(e.id(v.Return))
 		raw.SetVlen(len(v.Params))
 		raw.data, err = e.deflateFuncParams(v.Params)
 
 	case *Var:
 		raw.SetKind(kindVar)
-		raw.SetType(e.allocatedIDs[v.Type])
+		raw.SetType(e.id(v.Type))
 		raw.data = btfVariable{uint32(v.Linkage)}
 
 	case *Datasec:
@@ -267,10 +395,13 @@ func (e *encoder) deflateType(typ Type) (err error) {
 
 	case *declTag:
 		raw.SetKind(kindDeclTag)
+		raw.SetType(e.id(v.Type))
 		raw.data = &btfDeclTag{uint32(v.Index)}
+		raw.NameOff, err = e.strings.Add(v.Value)
 
 	case *typeTag:
 		raw.SetKind(kindTypeTag)
+		raw.SetType(e.id(v.Type))
 		raw.NameOff, err = e.strings.Add(v.Value)
 
 	default:
@@ -281,7 +412,7 @@ func (e *encoder) deflateType(typ Type) (err error) {
 		return err
 	}
 
-	return raw.Marshal(e.buf, e.opts.ByteOrder)
+	return raw.Marshal(e.buf, e.Order)
 }
 
 func (e *encoder) convertMembers(header *btfType, members []Member) ([]btfMember, error) {
@@ -302,7 +433,7 @@ func (e *encoder) convertMembers(header *btfType, members []Member) ([]btfMember
 
 		bms = append(bms, btfMember{
 			nameOff,
-			e.allocatedIDs[member.Type],
+			e.id(member.Type),
 			uint32(offset),
 		})
 	}
@@ -361,7 +492,7 @@ func (e *encoder) deflateFuncParams(params []FuncParam) ([]btfParam, error) {
 
 		bps = append(bps, btfParam{
 			nameOff,
-			e.allocatedIDs[param.Type],
+			e.id(param.Type),
 		})
 	}
 	return bps, nil
@@ -371,7 +502,7 @@ func (e *encoder) deflateVarSecinfos(vars []VarSecinfo) []btfVarSecinfo {
 	vsis := make([]btfVarSecinfo, 0, len(vars))
 	for _, v := range vars {
 		vsis = append(vsis, btfVarSecinfo{
-			e.allocatedIDs[v.Type],
+			e.id(v.Type),
 			v.Offset,
 			v.Size,
 		})
@@ -383,33 +514,24 @@ func (e *encoder) deflateVarSecinfos(vars []VarSecinfo) []btfVarSecinfo {
 //
 // The function is intended for the use of the ebpf package and may be removed
 // at any point in time.
-func MarshalMapKV(key, value Type) (_ *Handle, keyID, valueID TypeID, _ error) {
-	enc := nativeEncoderPool.Get().(*encoder)
-	defer nativeEncoderPool.Put(enc)
-
-	enc.Reset()
+func MarshalMapKV(key, value Type) (_ *Handle, keyID, valueID TypeID, err error) {
+	var b Builder
 
-	var err error
 	if key != nil {
-		keyID, err = enc.Add(key)
+		keyID, err = b.Add(key)
 		if err != nil {
-			return nil, 0, 0, fmt.Errorf("adding map key to BTF encoder: %w", err)
+			return nil, 0, 0, fmt.Errorf("add key type: %w", err)
 		}
 	}
 
 	if value != nil {
-		valueID, err = enc.Add(value)
+		valueID, err = b.Add(value)
 		if err != nil {
-			return nil, 0, 0, fmt.Errorf("adding map value to BTF encoder: %w", err)
+			return nil, 0, 0, fmt.Errorf("add value type: %w", err)
 		}
 	}
 
-	btf, err := enc.Encode()
-	if err != nil {
-		return nil, 0, 0, fmt.Errorf("marshal BTF: %w", err)
-	}
-
-	handle, err := newHandleFromRawBTF(btf)
+	handle, err := NewHandle(&b)
 	if err != nil {
 		// Check for 'full' map BTF support, since kernels between 4.18 and 5.2
 		// already support BTF blobs for maps without Var or Datasec just fine.
@@ -417,6 +539,5 @@ func MarshalMapKV(key, value Type) (_ *Handle, keyID, valueID TypeID, _ error) {
 			return nil, 0, 0, err
 		}
 	}
-
 	return handle, keyID, valueID, err
 }
diff --git a/vendor/github.com/cilium/ebpf/btf/strings.go b/vendor/github.com/cilium/ebpf/btf/strings.go
index deeaeaca..bc6aff28 100644
--- a/vendor/github.com/cilium/ebpf/btf/strings.go
+++ b/vendor/github.com/cilium/ebpf/btf/strings.go
@@ -7,6 +7,8 @@ import (
 	"fmt"
 	"io"
 	"strings"
+
+	"golang.org/x/exp/maps"
 )
 
 type stringTable struct {
@@ -89,15 +91,6 @@ func (st *stringTable) lookup(offset uint32) (string, error) {
 	return st.strings[i], nil
 }
 
-func (st *stringTable) Length() int {
-	if len(st.offsets) == 0 || len(st.strings) == 0 {
-		return 0
-	}
-
-	last := len(st.offsets) - 1
-	return int(st.offsets[last]) + len(st.strings[last]) + 1
-}
-
 func (st *stringTable) Marshal(w io.Writer) error {
 	for _, str := range st.strings {
 		_, err := io.WriteString(w, str)
@@ -112,6 +105,11 @@ func (st *stringTable) Marshal(w io.Writer) error {
 	return nil
 }
 
+// Num returns the number of strings in the table.
+func (st *stringTable) Num() int {
+	return len(st.strings)
+}
+
 // search is a copy of sort.Search specialised for uint32.
 //
 // Licensed under https://go.dev/LICENSE
@@ -141,25 +139,19 @@ type stringTableBuilder struct {
 // newStringTableBuilder creates a builder with the given capacity.
 //
 // capacity may be zero.
-func newStringTableBuilder() *stringTableBuilder {
-	stb := &stringTableBuilder{0, make(map[string]uint32)}
-	// Ensure that the empty string is at index 0.
-	stb.append("")
-	return stb
-}
+func newStringTableBuilder(capacity int) *stringTableBuilder {
+	var stb stringTableBuilder
 
-// newStringTableBuilderFromTable creates a new builder from an existing string table.
-func newStringTableBuilderFromTable(contents *stringTable) *stringTableBuilder {
-	stb := &stringTableBuilder{0, make(map[string]uint32, len(contents.strings)+1)}
-	stb.append("")
-
-	for _, str := range contents.strings {
-		if str != "" {
-			stb.append(str)
-		}
+	if capacity == 0 {
+		// Use the runtime's small default size.
+		stb.strings = make(map[string]uint32)
+	} else {
+		stb.strings = make(map[string]uint32, capacity)
 	}
 
-	return stb
+	// Ensure that the empty string is at index 0.
+	stb.append("")
+	return &stb
 }
 
 // Add a string to the table.
@@ -195,7 +187,6 @@ func (stb *stringTableBuilder) Lookup(str string) (uint32, error) {
 	}
 
 	return offset, nil
-
 }
 
 // Length returns the length in bytes.
@@ -203,19 +194,21 @@ func (stb *stringTableBuilder) Length() int {
 	return int(stb.length)
 }
 
-// Marshal a string table into its binary representation.
-func (stb *stringTableBuilder) Marshal() []byte {
-	buf := make([]byte, stb.Length())
-	stb.MarshalBuffer(buf)
+// AppendEncoded appends the string table to the end of the provided buffer.
+func (stb *stringTableBuilder) AppendEncoded(buf []byte) []byte {
+	n := len(buf)
+	buf = append(buf, make([]byte, stb.Length())...)
+	strings := buf[n:]
+	for str, offset := range stb.strings {
+		copy(strings[offset:], str)
+	}
 	return buf
 }
 
-// Marshal a string table into a pre-allocated buffer.
-//
-// The buffer must be at least of size Length().
-func (stb *stringTableBuilder) MarshalBuffer(buf []byte) {
-	for str, offset := range stb.strings {
-		n := copy(buf[offset:], str)
-		buf[offset+uint32(n)] = 0
+// Copy the string table builder.
+func (stb *stringTableBuilder) Copy() *stringTableBuilder {
+	return &stringTableBuilder{
+		stb.length,
+		maps.Clone(stb.strings),
 	}
 }
diff --git a/vendor/github.com/cilium/ebpf/btf/traversal.go b/vendor/github.com/cilium/ebpf/btf/traversal.go
index fa42815f..a3a9dec9 100644
--- a/vendor/github.com/cilium/ebpf/btf/traversal.go
+++ b/vendor/github.com/cilium/ebpf/btf/traversal.go
@@ -15,7 +15,7 @@ type postorderIterator struct {
 	// The root type. May be nil if skip(root) is true.
 	root Type
 
-	// Contains types which need to be either walked or passed to the callback.
+	// Contains types which need to be either walked or yielded.
 	types typeDeque
 	// Contains a boolean whether the type has been walked or not.
 	walked internal.Deque[bool]
@@ -26,9 +26,8 @@ type postorderIterator struct {
 	Type Type
 }
 
-// postorderTraversal calls fn for all types reachable from root.
-//
-// fn is invoked on children of root before root itself.
+// postorderTraversal iterates all types reachable from root by visiting the
+// leaves of the graph first.
 //
 // Types for which skip returns true are ignored. skip may be nil.
 func postorderTraversal(root Type, skip func(Type) (skip bool)) postorderIterator {
diff --git a/vendor/github.com/cilium/ebpf/btf/types.go b/vendor/github.com/cilium/ebpf/btf/types.go
index e344bbdc..68d4a175 100644
--- a/vendor/github.com/cilium/ebpf/btf/types.go
+++ b/vendor/github.com/cilium/ebpf/btf/types.go
@@ -1,6 +1,7 @@
 package btf
 
 import (
+	"errors"
 	"fmt"
 	"io"
 	"math"
@@ -9,14 +10,26 @@ import (
 
 	"github.com/cilium/ebpf/asm"
 	"github.com/cilium/ebpf/internal"
+	"github.com/cilium/ebpf/internal/sys"
 )
 
 const maxTypeDepth = 32
 
 // TypeID identifies a type in a BTF section.
-type TypeID uint32
+type TypeID = sys.TypeID
 
 // Type represents a type described by BTF.
+//
+// Identity of Type follows the [Go specification]: two Types are considered
+// equal if they have the same concrete type and the same dynamic value, aka
+// they point at the same location in memory. This means that the following
+// Types are considered distinct even though they have the same "shape".
+//
+//	a := &Int{Size: 1}
+//	b := &Int{Size: 1}
+//	a != b
+//
+// [Go specification]: https://go.dev/ref/spec#Comparison_operators
 type Type interface {
 	// Type can be formatted using the %s and %v verbs. %s outputs only the
 	// identity of the type, without any detail. %v outputs additional detail.
@@ -55,18 +68,6 @@ var (
 	_ Type = (*cycle)(nil)
 )
 
-// types is a list of Type.
-//
-// The order determines the ID of a type.
-type types []Type
-
-func (ts types) ByID(id TypeID) (Type, error) {
-	if int(id) > len(ts) {
-		return nil, fmt.Errorf("type ID %d: %w", id, ErrNotFound)
-	}
-	return ts[id], nil
-}
-
 // Void is the unit type of BTF.
 type Void struct{}
 
@@ -218,6 +219,7 @@ func copyMembers(orig []Member) []Member {
 }
 
 type composite interface {
+	Type
 	members() []Member
 }
 
@@ -592,6 +594,8 @@ var (
 	_ qualifier = (*typeTag)(nil)
 )
 
+var errUnsizedType = errors.New("type is unsized")
+
 // Sizeof returns the size of a type in bytes.
 //
 // Returns an error if the size can't be computed.
@@ -626,7 +630,7 @@ func Sizeof(typ Type) (int, error) {
 			continue
 
 		default:
-			return 0, fmt.Errorf("unsized type %T", typ)
+			return 0, fmt.Errorf("type %T: %w", typ, errUnsizedType)
 		}
 
 		if n > 0 && elem > math.MaxInt64/n {
@@ -646,16 +650,33 @@ func Sizeof(typ Type) (int, error) {
 
 // alignof returns the alignment of a type.
 //
-// Currently only supports the subset of types necessary for bitfield relocations.
+// Returns an error if the Type can't be aligned, like an integer with an uneven
+// size. Currently only supports the subset of types necessary for bitfield
+// relocations.
 func alignof(typ Type) (int, error) {
+	var n int
+
 	switch t := UnderlyingType(typ).(type) {
 	case *Enum:
-		return int(t.size()), nil
+		n = int(t.size())
 	case *Int:
-		return int(t.Size), nil
+		n = int(t.Size)
+	case *Array:
+		return alignof(t.Type)
 	default:
 		return 0, fmt.Errorf("can't calculate alignment of %T", t)
 	}
+
+	if !pow(n) {
+		return 0, fmt.Errorf("alignment value %d is not a power of two", n)
+	}
+
+	return n, nil
+}
+
+// pow returns true if n is a power of two.
+func pow(n int) bool {
+	return n != 0 && (n&(n-1)) == 0
 }
 
 // Transformer modifies a given Type and returns the result.
@@ -669,7 +690,7 @@ type Transformer func(Type) Type
 // typ may form a cycle. If transform is not nil, it is called with the
 // to be copied type, and the returned value is copied instead.
 func Copy(typ Type, transform Transformer) Type {
-	copies := make(copier)
+	copies := copier{copies: make(map[Type]Type)}
 	copies.copy(&typ, transform)
 	return typ
 }
@@ -681,7 +702,7 @@ func copyTypes(types []Type, transform Transformer) []Type {
 	result := make([]Type, len(types))
 	copy(result, types)
 
-	copies := make(copier)
+	copies := copier{copies: make(map[Type]Type, len(types))}
 	for i := range result {
 		copies.copy(&result[i], transform)
 	}
@@ -689,13 +710,15 @@ func copyTypes(types []Type, transform Transformer) []Type {
 	return result
 }
 
-type copier map[Type]Type
+type copier struct {
+	copies map[Type]Type
+	work   typeDeque
+}
 
-func (c copier) copy(typ *Type, transform Transformer) {
-	var work typeDeque
-	for t := typ; t != nil; t = work.Pop() {
+func (c *copier) copy(typ *Type, transform Transformer) {
+	for t := typ; t != nil; t = c.work.Pop() {
 		// *t is the identity of the type.
-		if cpy := c[*t]; cpy != nil {
+		if cpy := c.copies[*t]; cpy != nil {
 			*t = cpy
 			continue
 		}
@@ -707,11 +730,11 @@ func (c copier) copy(typ *Type, transform Transformer) {
 			cpy = (*t).copy()
 		}
 
-		c[*t] = cpy
+		c.copies[*t] = cpy
 		*t = cpy
 
 		// Mark any nested types for copying.
-		walkType(cpy, work.Push)
+		walkType(cpy, c.work.Push)
 	}
 }
 
@@ -720,23 +743,28 @@ type typeDeque = internal.Deque[*Type]
 // inflateRawTypes takes a list of raw btf types linked via type IDs, and turns
 // it into a graph of Types connected via pointers.
 //
-// If baseTypes are provided, then the raw types are
-// considered to be of a split BTF (e.g., a kernel module).
+// If base is provided, then the raw types are considered to be of a split BTF
+// (e.g., a kernel module).
 //
-// Returns  a slice of types indexed by TypeID. Since BTF ignores compilation
+// Returns a slice of types indexed by TypeID. Since BTF ignores compilation
 // units, multiple types may share the same name. A Type may form a cyclic graph
 // by pointing at itself.
-func inflateRawTypes(rawTypes []rawType, baseTypes types, rawStrings *stringTable) ([]Type, error) {
+func inflateRawTypes(rawTypes []rawType, rawStrings *stringTable, base *Spec) ([]Type, error) {
 	types := make([]Type, 0, len(rawTypes)+1) // +1 for Void added to base types
 
-	typeIDOffset := TypeID(1) // Void is TypeID(0), so the rest starts from TypeID(1)
+	// Void is defined to always be type ID 0, and is thus omitted from BTF.
+	types = append(types, (*Void)(nil))
 
-	if baseTypes == nil {
-		// Void is defined to always be type ID 0, and is thus omitted from BTF.
-		types = append(types, (*Void)(nil))
-	} else {
-		// For split BTF, the next ID is max base BTF type ID + 1
-		typeIDOffset = TypeID(len(baseTypes))
+	firstTypeID := TypeID(0)
+	if base != nil {
+		var err error
+		firstTypeID, err = base.nextTypeID()
+		if err != nil {
+			return nil, err
+		}
+
+		// Split BTF doesn't contain Void.
+		types = types[:0]
 	}
 
 	type fixupDef struct {
@@ -746,20 +774,20 @@ func inflateRawTypes(rawTypes []rawType, baseTypes types, rawStrings *stringTabl
 
 	var fixups []fixupDef
 	fixup := func(id TypeID, typ *Type) bool {
-		if id < TypeID(len(baseTypes)) {
-			*typ = baseTypes[id]
-			return true
+		if id < firstTypeID {
+			if baseType, err := base.TypeByID(id); err == nil {
+				*typ = baseType
+				return true
+			}
 		}
 
-		idx := id
-		if baseTypes != nil {
-			idx = id - TypeID(len(baseTypes))
-		}
-		if idx < TypeID(len(types)) {
+		idx := int(id - firstTypeID)
+		if idx < len(types) {
 			// We've already inflated this type, fix it up immediately.
 			*typ = types[idx]
 			return true
 		}
+
 		fixups = append(fixups, fixupDef{id, typ})
 		return false
 	}
@@ -849,12 +877,16 @@ func inflateRawTypes(rawTypes []rawType, baseTypes types, rawStrings *stringTabl
 	}
 
 	var declTags []*declTag
-	for i, raw := range rawTypes {
+	for _, raw := range rawTypes {
 		var (
-			id  = typeIDOffset + TypeID(i)
+			id  = firstTypeID + TypeID(len(types))
 			typ Type
 		)
 
+		if id < firstTypeID {
+			return nil, fmt.Errorf("no more type IDs")
+		}
+
 		name, err := rawStrings.Lookup(raw.NameOff)
 		if err != nil {
 			return nil, fmt.Errorf("get name for type id %d: %w", id, err)
@@ -1024,19 +1056,20 @@ func inflateRawTypes(rawTypes []rawType, baseTypes types, rawStrings *stringTabl
 	}
 
 	for _, fixup := range fixups {
-		i := int(fixup.id)
-		if i >= len(types)+len(baseTypes) {
-			return nil, fmt.Errorf("reference to invalid type id: %d", fixup.id)
+		if fixup.id < firstTypeID {
+			return nil, fmt.Errorf("fixup for base type id %d is not expected", fixup.id)
 		}
-		if i < len(baseTypes) {
-			return nil, fmt.Errorf("fixup for base type id %d is not expected", i)
+
+		idx := int(fixup.id - firstTypeID)
+		if idx >= len(types) {
+			return nil, fmt.Errorf("reference to invalid type id: %d", fixup.id)
 		}
 
-		*fixup.typ = types[i-len(baseTypes)]
+		*fixup.typ = types[idx]
 	}
 
 	for _, bitfieldFixup := range bitfieldFixups {
-		if bitfieldFixup.id < TypeID(len(baseTypes)) {
+		if bitfieldFixup.id < firstTypeID {
 			return nil, fmt.Errorf("bitfield fixup from split to base types is not expected")
 		}
 
@@ -1116,6 +1149,29 @@ func UnderlyingType(typ Type) Type {
 	return &cycle{typ}
 }
 
+// as returns typ if is of type T. Otherwise it peels qualifiers and Typedefs
+// until it finds a T.
+//
+// Returns the zero value and false if there is no T or if the type is nested
+// too deeply.
+func as[T Type](typ Type) (T, bool) {
+	for depth := 0; depth <= maxTypeDepth; depth++ {
+		switch v := (typ).(type) {
+		case T:
+			return v, true
+		case qualifier:
+			typ = v.qualify()
+		case *Typedef:
+			typ = v.Type
+		default:
+			goto notFound
+		}
+	}
+notFound:
+	var zero T
+	return zero, false
+}
+
 type formatState struct {
 	fmt.State
 	depth int
@@ -1138,10 +1194,7 @@ func formatType(f fmt.State, verb rune, t formattableType, extra ...interface{})
 		return
 	}
 
-	// This is the same as %T, but elides the package name. Assumes that
-	// formattableType is implemented by a pointer receiver.
-	goTypeName := reflect.TypeOf(t).Elem().Name()
-	_, _ = io.WriteString(f, goTypeName)
+	_, _ = io.WriteString(f, internal.GoTypeName(t))
 
 	if name := t.TypeName(); name != "" {
 		// Output BTF type name if present.
diff --git a/vendor/github.com/cilium/ebpf/btf/workarounds.go b/vendor/github.com/cilium/ebpf/btf/workarounds.go
new file mode 100644
index 00000000..12a89b87
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/btf/workarounds.go
@@ -0,0 +1,26 @@
+package btf
+
+// datasecResolveWorkaround ensures that certain vars in a Datasec are added
+// to a Spec before the Datasec. This avoids a bug in kernel BTF validation.
+//
+// See https://lore.kernel.org/bpf/20230302123440.1193507-1-lmb@isovalent.com/
+func datasecResolveWorkaround(b *Builder, ds *Datasec) error {
+	for _, vsi := range ds.Vars {
+		v, ok := vsi.Type.(*Var)
+		if !ok {
+			continue
+		}
+
+		switch v.Type.(type) {
+		case *Typedef, *Volatile, *Const, *Restrict, *typeTag:
+			// NB: We must never call Add on a Datasec, otherwise we risk
+			// infinite recursion.
+			_, err := b.Add(v.Type)
+			if err != nil {
+				return err
+			}
+		}
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/cilium/ebpf/collection.go b/vendor/github.com/cilium/ebpf/collection.go
index 729e5e9d..fb720beb 100644
--- a/vendor/github.com/cilium/ebpf/collection.go
+++ b/vendor/github.com/cilium/ebpf/collection.go
@@ -9,6 +9,8 @@ import (
 
 	"github.com/cilium/ebpf/asm"
 	"github.com/cilium/ebpf/btf"
+	"github.com/cilium/ebpf/internal"
+	"github.com/cilium/ebpf/internal/kconfig"
 )
 
 // CollectionOptions control loading a collection into the kernel.
@@ -107,6 +109,16 @@ func (cs *CollectionSpec) RewriteMaps(maps map[string]*Map) error {
 	return nil
 }
 
+// MissingConstantsError is returned by [CollectionSpec.RewriteConstants].
+type MissingConstantsError struct {
+	// The constants missing from .rodata.
+	Constants []string
+}
+
+func (m *MissingConstantsError) Error() string {
+	return fmt.Sprintf("some constants are missing from .rodata: %s", strings.Join(m.Constants, ", "))
+}
+
 // RewriteConstants replaces the value of multiple constants.
 //
 // The constant must be defined like so in the C program:
@@ -120,7 +132,7 @@ func (cs *CollectionSpec) RewriteMaps(maps map[string]*Map) error {
 //
 // From Linux 5.5 the verifier will use constants to eliminate dead code.
 //
-// Returns an error if a constant doesn't exist.
+// Returns an error wrapping [MissingConstantsError] if a constant doesn't exist.
 func (cs *CollectionSpec) RewriteConstants(consts map[string]interface{}) error {
 	replaced := make(map[string]bool)
 
@@ -184,7 +196,7 @@ func (cs *CollectionSpec) RewriteConstants(consts map[string]interface{}) error
 	}
 
 	if len(missing) != 0 {
-		return fmt.Errorf("spec is missing one or more constants: %s", strings.Join(missing, ","))
+		return fmt.Errorf("rewrite constants: %w", &MissingConstantsError{Constants: missing})
 	}
 
 	return nil
@@ -565,6 +577,95 @@ func (cl *collectionLoader) populateMaps() error {
 	return nil
 }
 
+// resolveKconfig resolves all variables declared in .kconfig and populates
+// m.Contents. Does nothing if the given m.Contents is non-empty.
+func resolveKconfig(m *MapSpec) error {
+	ds, ok := m.Value.(*btf.Datasec)
+	if !ok {
+		return errors.New("map value is not a Datasec")
+	}
+
+	type configInfo struct {
+		offset uint32
+		typ    btf.Type
+	}
+
+	configs := make(map[string]configInfo)
+
+	data := make([]byte, ds.Size)
+	for _, vsi := range ds.Vars {
+		v := vsi.Type.(*btf.Var)
+		n := v.TypeName()
+
+		switch n {
+		case "LINUX_KERNEL_VERSION":
+			if integer, ok := v.Type.(*btf.Int); !ok || integer.Size != 4 {
+				return fmt.Errorf("variable %s must be a 32 bits integer, got %s", n, v.Type)
+			}
+
+			kv, err := internal.KernelVersion()
+			if err != nil {
+				return fmt.Errorf("getting kernel version: %w", err)
+			}
+			internal.NativeEndian.PutUint32(data[vsi.Offset:], kv.Kernel())
+
+		case "LINUX_HAS_SYSCALL_WRAPPER":
+			if integer, ok := v.Type.(*btf.Int); !ok || integer.Size != 4 {
+				return fmt.Errorf("variable %s must be a 32 bits integer, got %s", n, v.Type)
+			}
+			var value uint32 = 1
+			if err := haveSyscallWrapper(); errors.Is(err, ErrNotSupported) {
+				value = 0
+			} else if err != nil {
+				return fmt.Errorf("unable to derive a value for LINUX_HAS_SYSCALL_WRAPPER: %w", err)
+			}
+
+			internal.NativeEndian.PutUint32(data[vsi.Offset:], value)
+
+		default: // Catch CONFIG_*.
+			configs[n] = configInfo{
+				offset: vsi.Offset,
+				typ:    v.Type,
+			}
+		}
+	}
+
+	// We only parse kconfig file if a CONFIG_* variable was found.
+	if len(configs) > 0 {
+		f, err := kconfig.Find()
+		if err != nil {
+			return fmt.Errorf("cannot find a kconfig file: %w", err)
+		}
+		defer f.Close()
+
+		filter := make(map[string]struct{}, len(configs))
+		for config := range configs {
+			filter[config] = struct{}{}
+		}
+
+		kernelConfig, err := kconfig.Parse(f, filter)
+		if err != nil {
+			return fmt.Errorf("cannot parse kconfig file: %w", err)
+		}
+
+		for n, info := range configs {
+			value, ok := kernelConfig[n]
+			if !ok {
+				return fmt.Errorf("config option %q does not exists for this kernel", n)
+			}
+
+			err := kconfig.PutValue(data[info.offset:], info.typ, value)
+			if err != nil {
+				return fmt.Errorf("problem adding value for %s: %w", n, err)
+			}
+		}
+	}
+
+	m.Contents = []MapKV{{uint32(0), data}}
+
+	return nil
+}
+
 // LoadCollection reads an object file and creates and loads its declared
 // resources into the kernel.
 //
diff --git a/vendor/github.com/cilium/ebpf/elf_reader.go b/vendor/github.com/cilium/ebpf/elf_reader.go
index 9dc87787..8d92672e 100644
--- a/vendor/github.com/cilium/ebpf/elf_reader.go
+++ b/vendor/github.com/cilium/ebpf/elf_reader.go
@@ -18,6 +18,15 @@ import (
 	"github.com/cilium/ebpf/internal/unix"
 )
 
+type kconfigMetaKey struct{}
+
+type kconfigMeta struct {
+	Map    *MapSpec
+	Offset uint32
+}
+
+type kfuncMeta struct{}
+
 // elfCode is a convenience to reduce the amount of arguments that have to
 // be passed around explicitly. You should treat its contents as immutable.
 type elfCode struct {
@@ -27,6 +36,9 @@ type elfCode struct {
 	version  uint32
 	btf      *btf.Spec
 	extInfo  *btf.ExtInfos
+	maps     map[string]*MapSpec
+	kfuncs   map[string]*btf.Func
+	kconfig  *MapSpec
 }
 
 // LoadCollectionSpec parses an ELF file into a CollectionSpec.
@@ -113,6 +125,8 @@ func LoadCollectionSpecFromReader(rd io.ReaderAt) (*CollectionSpec, error) {
 		version:     version,
 		btf:         btfSpec,
 		extInfo:     btfExtInfo,
+		maps:        make(map[string]*MapSpec),
+		kfuncs:      make(map[string]*btf.Func),
 	}
 
 	symbols, err := f.Symbols()
@@ -126,27 +140,33 @@ func LoadCollectionSpecFromReader(rd io.ReaderAt) (*CollectionSpec, error) {
 		return nil, fmt.Errorf("load relocations: %w", err)
 	}
 
-	// Collect all the various ways to define maps.
-	maps := make(map[string]*MapSpec)
-	if err := ec.loadMaps(maps); err != nil {
+	if err := ec.loadMaps(); err != nil {
 		return nil, fmt.Errorf("load maps: %w", err)
 	}
 
-	if err := ec.loadBTFMaps(maps); err != nil {
+	if err := ec.loadBTFMaps(); err != nil {
 		return nil, fmt.Errorf("load BTF maps: %w", err)
 	}
 
-	if err := ec.loadDataSections(maps); err != nil {
+	if err := ec.loadDataSections(); err != nil {
 		return nil, fmt.Errorf("load data sections: %w", err)
 	}
 
+	if err := ec.loadKconfigSection(); err != nil {
+		return nil, fmt.Errorf("load virtual .kconfig section: %w", err)
+	}
+
+	if err := ec.loadKsymsSection(); err != nil {
+		return nil, fmt.Errorf("load virtual .ksyms section: %w", err)
+	}
+
 	// Finally, collect programs and link them.
 	progs, err := ec.loadProgramSections()
 	if err != nil {
 		return nil, fmt.Errorf("load programs: %w", err)
 	}
 
-	return &CollectionSpec{maps, progs, btfSpec, ec.ByteOrder}, nil
+	return &CollectionSpec{ec.maps, progs, btfSpec, ec.ByteOrder}, nil
 }
 
 func loadLicense(sec *elf.Section) (string, error) {
@@ -566,6 +586,10 @@ func (ec *elfCode) relocateInstruction(ins *asm.Instruction, rel elf.Symbol) err
 			return fmt.Errorf("neither a call nor a load instruction: %v", ins)
 		}
 
+	// The Undefined section is used for 'virtual' symbols that aren't backed by
+	// an ELF section. This includes symbol references from inline asm, forward
+	// function declarations, as well as extern kfunc declarations using __ksym
+	// and extern kconfig variables declared using __kconfig.
 	case undefSection:
 		if bind != elf.STB_GLOBAL {
 			return fmt.Errorf("asm relocation: %s: unsupported binding: %s", name, bind)
@@ -575,7 +599,36 @@ func (ec *elfCode) relocateInstruction(ins *asm.Instruction, rel elf.Symbol) err
 			return fmt.Errorf("asm relocation: %s: unsupported type %s", name, typ)
 		}
 
-		// There is nothing to do here but set ins.Reference.
+		kf := ec.kfuncs[name]
+		switch {
+		// If a Call instruction is found and the datasec has a btf.Func with a Name
+		// that matches the symbol name we mark the instruction as a call to a kfunc.
+		case kf != nil && ins.OpCode.JumpOp() == asm.Call:
+			ins.Metadata.Set(kfuncMeta{}, kf)
+			ins.Src = asm.PseudoKfuncCall
+			ins.Constant = -1
+
+		// If no kconfig map is found, this must be a symbol reference from inline
+		// asm (see testdata/loader.c:asm_relocation()) or a call to a forward
+		// function declaration (see testdata/fwd_decl.c). Don't interfere, These
+		// remain standard symbol references.
+		// extern __kconfig reads are represented as dword loads that need to be
+		// rewritten to pseudo map loads from .kconfig. If the map is present,
+		// require it to contain the symbol to disambiguate between inline asm
+		// relos and kconfigs.
+		case ec.kconfig != nil && ins.OpCode.IsDWordLoad():
+			for _, vsi := range ec.kconfig.Value.(*btf.Datasec).Vars {
+				if vsi.Type.(*btf.Var).Name != rel.Name {
+					continue
+				}
+
+				ins.Src = asm.PseudoMapValue
+				ins.Metadata.Set(kconfigMetaKey{}, &kconfigMeta{ec.kconfig, vsi.Offset})
+				return nil
+			}
+
+			return fmt.Errorf("kconfig %s not found in .kconfig", rel.Name)
+		}
 
 	default:
 		return fmt.Errorf("relocation to %q: %w", target.Name, ErrNotSupported)
@@ -585,7 +638,7 @@ func (ec *elfCode) relocateInstruction(ins *asm.Instruction, rel elf.Symbol) err
 	return nil
 }
 
-func (ec *elfCode) loadMaps(maps map[string]*MapSpec) error {
+func (ec *elfCode) loadMaps() error {
 	for _, sec := range ec.sections {
 		if sec.kind != mapSection {
 			continue
@@ -611,7 +664,7 @@ func (ec *elfCode) loadMaps(maps map[string]*MapSpec) error {
 			}
 
 			mapName := mapSym.Name
-			if maps[mapName] != nil {
+			if ec.maps[mapName] != nil {
 				return fmt.Errorf("section %v: map %v already exists", sec.Name, mapSym)
 			}
 
@@ -645,7 +698,7 @@ func (ec *elfCode) loadMaps(maps map[string]*MapSpec) error {
 				return fmt.Errorf("map %s: %w", mapName, err)
 			}
 
-			maps[mapName] = &spec
+			ec.maps[mapName] = &spec
 		}
 	}
 
@@ -655,7 +708,7 @@ func (ec *elfCode) loadMaps(maps map[string]*MapSpec) error {
 // loadBTFMaps iterates over all ELF sections marked as BTF map sections
 // (like .maps) and parses them into MapSpecs. Dump the .maps section and
 // any relocations with `readelf -x .maps -r <elf_file>`.
-func (ec *elfCode) loadBTFMaps(maps map[string]*MapSpec) error {
+func (ec *elfCode) loadBTFMaps() error {
 	for _, sec := range ec.sections {
 		if sec.kind != btfMapSection {
 			continue
@@ -694,7 +747,7 @@ func (ec *elfCode) loadBTFMaps(maps map[string]*MapSpec) error {
 				return fmt.Errorf("section %v: map %s: initializing BTF map definitions: %w", sec.Name, name, internal.ErrNotSupported)
 			}
 
-			if maps[name] != nil {
+			if ec.maps[name] != nil {
 				return fmt.Errorf("section %v: map %s already exists", sec.Name, name)
 			}
 
@@ -713,7 +766,7 @@ func (ec *elfCode) loadBTFMaps(maps map[string]*MapSpec) error {
 				return fmt.Errorf("map %v: %w", name, err)
 			}
 
-			maps[name] = mapSpec
+			ec.maps[name] = mapSpec
 		}
 
 		// Drain the ELF section reader to make sure all bytes are accounted for
@@ -1001,14 +1054,14 @@ func resolveBTFValuesContents(es *elfSection, vs *btf.VarSecinfo, member btf.Mem
 		case elf.STT_OBJECT:
 			contents = append(contents, MapKV{uint32(k), r.Name})
 		default:
-			return nil, fmt.Errorf("unknown relocation type %v", t)
+			return nil, fmt.Errorf("unknown relocation type %v for symbol %s", t, r.Name)
 		}
 	}
 
 	return contents, nil
 }
 
-func (ec *elfCode) loadDataSections(maps map[string]*MapSpec) error {
+func (ec *elfCode) loadDataSections() error {
 	for _, sec := range ec.sections {
 		if sec.kind != dataSection {
 			continue
@@ -1065,8 +1118,68 @@ func (ec *elfCode) loadDataSections(maps map[string]*MapSpec) error {
 			mapSpec.Freeze = true
 		}
 
-		maps[sec.Name] = mapSpec
+		ec.maps[sec.Name] = mapSpec
 	}
+
+	return nil
+}
+
+// loadKconfigSection handles the 'virtual' Datasec .kconfig that doesn't
+// have a corresponding ELF section and exist purely in BTF.
+func (ec *elfCode) loadKconfigSection() error {
+	if ec.btf == nil {
+		return nil
+	}
+
+	var ds *btf.Datasec
+	err := ec.btf.TypeByName(".kconfig", &ds)
+	if errors.Is(err, btf.ErrNotFound) {
+		return nil
+	}
+	if err != nil {
+		return err
+	}
+
+	if ds.Size == 0 {
+		return errors.New("zero-length .kconfig")
+	}
+
+	ec.kconfig = &MapSpec{
+		Name:       ".kconfig",
+		Type:       Array,
+		KeySize:    uint32(4),
+		ValueSize:  ds.Size,
+		MaxEntries: 1,
+		Flags:      unix.BPF_F_RDONLY_PROG | unix.BPF_F_MMAPABLE,
+		Freeze:     true,
+		Key:        &btf.Int{Size: 4},
+		Value:      ds,
+	}
+
+	return nil
+}
+
+// loadKsymsSection handles the 'virtual' Datasec .ksyms that doesn't
+// have a corresponding ELF section and exist purely in BTF.
+func (ec *elfCode) loadKsymsSection() error {
+	if ec.btf == nil {
+		return nil
+	}
+
+	var ds *btf.Datasec
+	err := ec.btf.TypeByName(".ksyms", &ds)
+	if errors.Is(err, btf.ErrNotFound) {
+		return nil
+	}
+	if err != nil {
+		return err
+	}
+
+	for _, v := range ds.Vars {
+		// we have already checked the .ksyms Datasec to only contain Func Vars.
+		ec.kfuncs[v.Type.TypeName()] = v.Type.(*btf.Func)
+	}
+
 	return nil
 }
 
@@ -1108,8 +1221,11 @@ func getProgType(sectionName string) (ProgramType, AttachType, uint32, string) {
 		{"iter/", Tracing, AttachTraceIter, 0},
 		{"iter.s/", Tracing, AttachTraceIter, unix.BPF_F_SLEEPABLE},
 		{"syscall", Syscall, AttachNone, 0},
+		{"xdp.frags_devmap/", XDP, AttachXDPDevMap, unix.BPF_F_XDP_HAS_FRAGS},
 		{"xdp_devmap/", XDP, AttachXDPDevMap, 0},
+		{"xdp.frags_cpumap/", XDP, AttachXDPCPUMap, unix.BPF_F_XDP_HAS_FRAGS},
 		{"xdp_cpumap/", XDP, AttachXDPCPUMap, 0},
+		{"xdp.frags", XDP, AttachNone, unix.BPF_F_XDP_HAS_FRAGS},
 		{"xdp", XDP, AttachNone, 0},
 		{"perf_event", PerfEvent, AttachNone, 0},
 		{"lwt_in", LWTIn, AttachNone, 0},
diff --git a/vendor/github.com/cilium/ebpf/info.go b/vendor/github.com/cilium/ebpf/info.go
index 8f42173a..a02e8a41 100644
--- a/vendor/github.com/cilium/ebpf/info.go
+++ b/vendor/github.com/cilium/ebpf/info.go
@@ -94,8 +94,10 @@ type ProgramInfo struct {
 	// Name as supplied by user space at load time. Available from 4.15.
 	Name string
 
-	btf   btf.ID
-	stats *programStats
+	createdByUID     uint32
+	haveCreatedByUID bool
+	btf              btf.ID
+	stats            *programStats
 
 	maps  []MapID
 	insns []byte
@@ -130,6 +132,18 @@ func newProgramInfoFromFd(fd *sys.FD) (*ProgramInfo, error) {
 		pi.maps = make([]MapID, info.NrMapIds)
 		info2.NrMapIds = info.NrMapIds
 		info2.MapIds = sys.NewPointer(unsafe.Pointer(&pi.maps[0]))
+	} else if haveProgramInfoMapIDs() == nil {
+		// This program really has no associated maps.
+		pi.maps = make([]MapID, 0)
+	} else {
+		// The kernel doesn't report associated maps.
+		pi.maps = nil
+	}
+
+	// createdByUID and NrMapIds were introduced in the same kernel version.
+	if pi.maps != nil {
+		pi.createdByUID = info.CreatedByUid
+		pi.haveCreatedByUID = true
 	}
 
 	if info.XlatedProgLen > 0 {
@@ -175,6 +189,15 @@ func (pi *ProgramInfo) ID() (ProgramID, bool) {
 	return pi.id, pi.id > 0
 }
 
+// CreatedByUID returns the Uid that created the program.
+//
+// Available from 4.15.
+//
+// The bool return value indicates whether this optional field is available.
+func (pi *ProgramInfo) CreatedByUID() (uint32, bool) {
+	return pi.createdByUID, pi.haveCreatedByUID
+}
+
 // BTFID returns the BTF ID associated with the program.
 //
 // The ID is only valid as long as the associated program is kept alive.
@@ -321,3 +344,30 @@ func EnableStats(which uint32) (io.Closer, error) {
 	}
 	return fd, nil
 }
+
+var haveProgramInfoMapIDs = internal.NewFeatureTest("map IDs in program info", "4.15", func() error {
+	prog, err := progLoad(asm.Instructions{
+		asm.LoadImm(asm.R0, 0, asm.DWord),
+		asm.Return(),
+	}, SocketFilter, "MIT")
+	if err != nil {
+		return err
+	}
+	defer prog.Close()
+
+	err = sys.ObjInfo(prog, &sys.ProgInfo{
+		// NB: Don't need to allocate MapIds since the program isn't using
+		// any maps.
+		NrMapIds: 1,
+	})
+	if errors.Is(err, unix.EINVAL) {
+		// Most likely the syscall doesn't exist.
+		return internal.ErrNotSupported
+	}
+	if errors.Is(err, unix.E2BIG) {
+		// We've hit check_uarg_tail_zero on older kernels.
+		return internal.ErrNotSupported
+	}
+
+	return err
+})
diff --git a/vendor/github.com/cilium/ebpf/internal/align.go b/vendor/github.com/cilium/ebpf/internal/align.go
index 8b4f2658..edc898fa 100644
--- a/vendor/github.com/cilium/ebpf/internal/align.go
+++ b/vendor/github.com/cilium/ebpf/internal/align.go
@@ -1,6 +1,8 @@
 package internal
 
+import "golang.org/x/exp/constraints"
+
 // Align returns 'n' updated to 'alignment' boundary.
-func Align(n, alignment int) int {
-	return (int(n) + alignment - 1) / alignment * alignment
+func Align[I constraints.Integer](n, alignment I) I {
+	return (n + alignment - 1) / alignment * alignment
 }
diff --git a/vendor/github.com/cilium/ebpf/internal/buffer.go b/vendor/github.com/cilium/ebpf/internal/buffer.go
new file mode 100644
index 00000000..81c65443
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/buffer.go
@@ -0,0 +1,31 @@
+package internal
+
+import (
+	"bytes"
+	"sync"
+)
+
+var bytesBufferPool = sync.Pool{
+	New: func() interface{} {
+		return new(bytes.Buffer)
+	},
+}
+
+// NewBuffer retrieves a [bytes.Buffer] from a pool an re-initialises it.
+//
+// The returned buffer should be passed to [PutBuffer].
+func NewBuffer(buf []byte) *bytes.Buffer {
+	wr := bytesBufferPool.Get().(*bytes.Buffer)
+	// Reinitialize the Buffer with a new backing slice since it is returned to
+	// the caller by wr.Bytes() below. Pooling is faster despite calling
+	// NewBuffer. The pooled alloc is still reused, it only needs to be zeroed.
+	*wr = *bytes.NewBuffer(buf)
+	return wr
+}
+
+// PutBuffer releases a buffer to the pool.
+func PutBuffer(buf *bytes.Buffer) {
+	// Release reference to the backing buffer.
+	*buf = *bytes.NewBuffer(nil)
+	bytesBufferPool.Put(buf)
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/cpu.go b/vendor/github.com/cilium/ebpf/internal/cpu.go
index 3affa1ef..9e908b61 100644
--- a/vendor/github.com/cilium/ebpf/internal/cpu.go
+++ b/vendor/github.com/cilium/ebpf/internal/cpu.go
@@ -4,24 +4,13 @@ import (
 	"fmt"
 	"os"
 	"strings"
-	"sync"
 )
 
-var sysCPU struct {
-	once sync.Once
-	err  error
-	num  int
-}
-
 // PossibleCPUs returns the max number of CPUs a system may possibly have
 // Logical CPU numbers must be of the form 0-n
-func PossibleCPUs() (int, error) {
-	sysCPU.once.Do(func() {
-		sysCPU.num, sysCPU.err = parseCPUsFromFile("/sys/devices/system/cpu/possible")
-	})
-
-	return sysCPU.num, sysCPU.err
-}
+var PossibleCPUs = Memoize(func() (int, error) {
+	return parseCPUsFromFile("/sys/devices/system/cpu/possible")
+})
 
 func parseCPUsFromFile(path string) (int, error) {
 	spec, err := os.ReadFile(path)
diff --git a/vendor/github.com/cilium/ebpf/internal/deque.go b/vendor/github.com/cilium/ebpf/internal/deque.go
index 05be23e6..e3a30502 100644
--- a/vendor/github.com/cilium/ebpf/internal/deque.go
+++ b/vendor/github.com/cilium/ebpf/internal/deque.go
@@ -24,24 +24,11 @@ func (dq *Deque[T]) Empty() bool {
 	return dq.read == dq.write
 }
 
-func (dq *Deque[T]) remainingCap() int {
-	return len(dq.elems) - int(dq.write-dq.read)
-}
-
 // Push adds an element to the end.
 func (dq *Deque[T]) Push(e T) {
-	if dq.remainingCap() >= 1 {
-		dq.elems[dq.write&dq.mask] = e
-		dq.write++
-		return
-	}
-
-	elems := dq.linearise(1)
-	elems = append(elems, e)
-
-	dq.elems = elems[:cap(elems)]
-	dq.mask = uint64(cap(elems)) - 1
-	dq.read, dq.write = 0, uint64(len(elems))
+	dq.Grow(1)
+	dq.elems[dq.write&dq.mask] = e
+	dq.write++
 }
 
 // Shift returns the first element or the zero value.
@@ -74,16 +61,17 @@ func (dq *Deque[T]) Pop() T {
 	return t
 }
 
-// linearise the contents of the deque.
-//
-// The returned slice has space for at least n more elements and has power
-// of two capacity.
-func (dq *Deque[T]) linearise(n int) []T {
-	length := dq.write - dq.read
-	need := length + uint64(n)
-	if need < length {
+// Grow the deque's capacity, if necessary, to guarantee space for another n
+// elements.
+func (dq *Deque[T]) Grow(n int) {
+	have := dq.write - dq.read
+	need := have + uint64(n)
+	if need < have {
 		panic("overflow")
 	}
+	if uint64(len(dq.elems)) >= need {
+		return
+	}
 
 	// Round up to the new power of two which is at least 8.
 	// See https://jameshfisher.com/2018/03/30/round-up-power-2/
@@ -92,9 +80,12 @@ func (dq *Deque[T]) linearise(n int) []T {
 		capacity = 8
 	}
 
-	types := make([]T, length, capacity)
+	elems := make([]T, have, capacity)
 	pivot := dq.read & dq.mask
-	copied := copy(types, dq.elems[pivot:])
-	copy(types[copied:], dq.elems[:pivot])
-	return types
+	copied := copy(elems, dq.elems[pivot:])
+	copy(elems[copied:], dq.elems[:pivot])
+
+	dq.elems = elems[:capacity]
+	dq.mask = uint64(capacity) - 1
+	dq.read, dq.write = 0, have
 }
diff --git a/vendor/github.com/cilium/ebpf/internal/endian_be.go b/vendor/github.com/cilium/ebpf/internal/endian_be.go
index ad33cda8..96a2ac0d 100644
--- a/vendor/github.com/cilium/ebpf/internal/endian_be.go
+++ b/vendor/github.com/cilium/ebpf/internal/endian_be.go
@@ -1,5 +1,4 @@
 //go:build armbe || arm64be || mips || mips64 || mips64p32 || ppc64 || s390 || s390x || sparc || sparc64
-// +build armbe arm64be mips mips64 mips64p32 ppc64 s390 s390x sparc sparc64
 
 package internal
 
diff --git a/vendor/github.com/cilium/ebpf/internal/endian_le.go b/vendor/github.com/cilium/ebpf/internal/endian_le.go
index 41a68224..fde4c55a 100644
--- a/vendor/github.com/cilium/ebpf/internal/endian_le.go
+++ b/vendor/github.com/cilium/ebpf/internal/endian_le.go
@@ -1,5 +1,4 @@
-//go:build 386 || amd64 || amd64p32 || arm || arm64 || mipsle || mips64le || mips64p32le || ppc64le || riscv64
-// +build 386 amd64 amd64p32 arm arm64 mipsle mips64le mips64p32le ppc64le riscv64
+//go:build 386 || amd64 || amd64p32 || arm || arm64 || loong64 || mipsle || mips64le || mips64p32le || ppc64le || riscv64
 
 package internal
 
diff --git a/vendor/github.com/cilium/ebpf/internal/epoll/poller.go b/vendor/github.com/cilium/ebpf/internal/epoll/poller.go
index 689adb83..48d9c1f7 100644
--- a/vendor/github.com/cilium/ebpf/internal/epoll/poller.go
+++ b/vendor/github.com/cilium/ebpf/internal/epoll/poller.go
@@ -173,7 +173,7 @@ type temporaryError interface {
 	Temporary() bool
 }
 
-// waitWait unblocks Wait if it's epoll_wait.
+// wakeWait unblocks Wait if it's epoll_wait.
 func (p *Poller) wakeWait() error {
 	p.eventMu.Lock()
 	defer p.eventMu.Unlock()
diff --git a/vendor/github.com/cilium/ebpf/internal/io.go b/vendor/github.com/cilium/ebpf/internal/io.go
index 30b6641f..1eaf4775 100644
--- a/vendor/github.com/cilium/ebpf/internal/io.go
+++ b/vendor/github.com/cilium/ebpf/internal/io.go
@@ -2,10 +2,14 @@ package internal
 
 import (
 	"bufio"
+	"bytes"
 	"compress/gzip"
 	"errors"
+	"fmt"
 	"io"
 	"os"
+	"path/filepath"
+	"sync"
 )
 
 // NewBufferedSectionReader wraps an io.ReaderAt in an appropriately-sized
@@ -60,3 +64,65 @@ func ReadAllCompressed(file string) ([]byte, error) {
 
 	return io.ReadAll(gz)
 }
+
+// ReadUint64FromFile reads a uint64 from a file.
+//
+// format specifies the contents of the file in fmt.Scanf syntax.
+func ReadUint64FromFile(format string, path ...string) (uint64, error) {
+	filename := filepath.Join(path...)
+	data, err := os.ReadFile(filename)
+	if err != nil {
+		return 0, fmt.Errorf("reading file %q: %w", filename, err)
+	}
+
+	var value uint64
+	n, err := fmt.Fscanf(bytes.NewReader(data), format, &value)
+	if err != nil {
+		return 0, fmt.Errorf("parsing file %q: %w", filename, err)
+	}
+	if n != 1 {
+		return 0, fmt.Errorf("parsing file %q: expected 1 item, got %d", filename, n)
+	}
+
+	return value, nil
+}
+
+type uint64FromFileKey struct {
+	format, path string
+}
+
+var uint64FromFileCache = struct {
+	sync.RWMutex
+	values map[uint64FromFileKey]uint64
+}{
+	values: map[uint64FromFileKey]uint64{},
+}
+
+// ReadUint64FromFileOnce is like readUint64FromFile but memoizes the result.
+func ReadUint64FromFileOnce(format string, path ...string) (uint64, error) {
+	filename := filepath.Join(path...)
+	key := uint64FromFileKey{format, filename}
+
+	uint64FromFileCache.RLock()
+	if value, ok := uint64FromFileCache.values[key]; ok {
+		uint64FromFileCache.RUnlock()
+		return value, nil
+	}
+	uint64FromFileCache.RUnlock()
+
+	value, err := ReadUint64FromFile(format, filename)
+	if err != nil {
+		return 0, err
+	}
+
+	uint64FromFileCache.Lock()
+	defer uint64FromFileCache.Unlock()
+
+	if value, ok := uint64FromFileCache.values[key]; ok {
+		// Someone else got here before us, use what is cached.
+		return value, nil
+	}
+
+	uint64FromFileCache.values[key] = value
+	return value, nil
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/kconfig/kconfig.go b/vendor/github.com/cilium/ebpf/internal/kconfig/kconfig.go
new file mode 100644
index 00000000..d95e7eb0
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/kconfig/kconfig.go
@@ -0,0 +1,267 @@
+package kconfig
+
+import (
+	"bufio"
+	"bytes"
+	"compress/gzip"
+	"fmt"
+	"io"
+	"math"
+	"os"
+	"strconv"
+	"strings"
+
+	"github.com/cilium/ebpf/btf"
+	"github.com/cilium/ebpf/internal"
+)
+
+// Find find a kconfig file on the host.
+// It first reads from /boot/config- of the current running kernel and tries
+// /proc/config.gz if nothing was found in /boot.
+// If none of the file provide a kconfig, it returns an error.
+func Find() (*os.File, error) {
+	kernelRelease, err := internal.KernelRelease()
+	if err != nil {
+		return nil, fmt.Errorf("cannot get kernel release: %w", err)
+	}
+
+	path := "/boot/config-" + kernelRelease
+	f, err := os.Open(path)
+	if err == nil {
+		return f, nil
+	}
+
+	f, err = os.Open("/proc/config.gz")
+	if err == nil {
+		return f, nil
+	}
+
+	return nil, fmt.Errorf("neither %s nor /proc/config.gz provide a kconfig", path)
+}
+
+// Parse parses the kconfig file for which a reader is given.
+// All the CONFIG_* which are in filter and which are set set will be
+// put in the returned map as key with their corresponding value as map value.
+// If filter is nil, no filtering will occur.
+// If the kconfig file is not valid, error will be returned.
+func Parse(source io.ReaderAt, filter map[string]struct{}) (map[string]string, error) {
+	var r io.Reader
+	zr, err := gzip.NewReader(io.NewSectionReader(source, 0, math.MaxInt64))
+	if err != nil {
+		r = io.NewSectionReader(source, 0, math.MaxInt64)
+	} else {
+		// Source is gzip compressed, transparently decompress.
+		r = zr
+	}
+
+	ret := make(map[string]string, len(filter))
+
+	s := bufio.NewScanner(r)
+
+	for s.Scan() {
+		line := s.Bytes()
+		err = processKconfigLine(line, ret, filter)
+		if err != nil {
+			return nil, fmt.Errorf("cannot parse line: %w", err)
+		}
+
+		if filter != nil && len(ret) == len(filter) {
+			break
+		}
+	}
+
+	if err := s.Err(); err != nil {
+		return nil, fmt.Errorf("cannot parse: %w", err)
+	}
+
+	if zr != nil {
+		return ret, zr.Close()
+	}
+
+	return ret, nil
+}
+
+// Golang translation of libbpf bpf_object__process_kconfig_line():
+// https://github.com/libbpf/libbpf/blob/fbd60dbff51c870f5e80a17c4f2fd639eb80af90/src/libbpf.c#L1874
+// It does the same checks but does not put the data inside the BPF map.
+func processKconfigLine(line []byte, m map[string]string, filter map[string]struct{}) error {
+	// Ignore empty lines and "# CONFIG_* is not set".
+	if !bytes.HasPrefix(line, []byte("CONFIG_")) {
+		return nil
+	}
+
+	key, value, found := bytes.Cut(line, []byte{'='})
+	if !found {
+		return fmt.Errorf("line %q does not contain separator '='", line)
+	}
+
+	if len(value) == 0 {
+		return fmt.Errorf("line %q has no value", line)
+	}
+
+	if filter != nil {
+		// NB: map[string(key)] gets special optimisation help from the compiler
+		// and doesn't allocate. Don't turn this into a variable.
+		_, ok := filter[string(key)]
+		if !ok {
+			return nil
+		}
+	}
+
+	// This can seem odd, but libbpf only sets the value the first time the key is
+	// met:
+	// https://github.com/torvalds/linux/blob/0d85b27b0cc6/tools/lib/bpf/libbpf.c#L1906-L1908
+	_, ok := m[string(key)]
+	if !ok {
+		m[string(key)] = string(value)
+	}
+
+	return nil
+}
+
+// PutValue translates the value given as parameter depending on the BTF
+// type, the translated value is then written to the byte array.
+func PutValue(data []byte, typ btf.Type, value string) error {
+	typ = btf.UnderlyingType(typ)
+
+	switch value {
+	case "y", "n", "m":
+		return putValueTri(data, typ, value)
+	default:
+		if strings.HasPrefix(value, `"`) {
+			return putValueString(data, typ, value)
+		}
+		return putValueNumber(data, typ, value)
+	}
+}
+
+// Golang translation of libbpf_tristate enum:
+// https://github.com/libbpf/libbpf/blob/fbd60dbff51c870f5e80a17c4f2fd639eb80af90/src/bpf_helpers.h#L169
+type triState int
+
+const (
+	TriNo     triState = 0
+	TriYes    triState = 1
+	TriModule triState = 2
+)
+
+func putValueTri(data []byte, typ btf.Type, value string) error {
+	switch v := typ.(type) {
+	case *btf.Int:
+		if v.Encoding != btf.Bool {
+			return fmt.Errorf("cannot add tri value, expected btf.Bool, got: %v", v.Encoding)
+		}
+
+		if v.Size != 1 {
+			return fmt.Errorf("cannot add tri value, expected size of 1 byte, got: %d", v.Size)
+		}
+
+		switch value {
+		case "y":
+			data[0] = 1
+		case "n":
+			data[0] = 0
+		default:
+			return fmt.Errorf("cannot use %q for btf.Bool", value)
+		}
+	case *btf.Enum:
+		if v.Name != "libbpf_tristate" {
+			return fmt.Errorf("cannot use enum %q, only libbpf_tristate is supported", v.Name)
+		}
+
+		var tri triState
+		switch value {
+		case "y":
+			tri = TriYes
+		case "m":
+			tri = TriModule
+		case "n":
+			tri = TriNo
+		default:
+			return fmt.Errorf("value %q is not support for libbpf_tristate", value)
+		}
+
+		internal.NativeEndian.PutUint64(data, uint64(tri))
+	default:
+		return fmt.Errorf("cannot add number value, expected btf.Int or btf.Enum, got: %T", v)
+	}
+
+	return nil
+}
+
+func putValueString(data []byte, typ btf.Type, value string) error {
+	array, ok := typ.(*btf.Array)
+	if !ok {
+		return fmt.Errorf("cannot add string value, expected btf.Array, got %T", array)
+	}
+
+	contentType, ok := btf.UnderlyingType(array.Type).(*btf.Int)
+	if !ok {
+		return fmt.Errorf("cannot add string value, expected array of btf.Int, got %T", contentType)
+	}
+
+	// Any Int, which is not bool, of one byte could be used to store char:
+	// https://github.com/torvalds/linux/blob/1a5304fecee5/tools/lib/bpf/libbpf.c#L3637-L3638
+	if contentType.Size != 1 && contentType.Encoding != btf.Bool {
+		return fmt.Errorf("cannot add string value, expected array of btf.Int of size 1, got array of btf.Int of size: %v", contentType.Size)
+	}
+
+	if !strings.HasPrefix(value, `"`) || !strings.HasSuffix(value, `"`) {
+		return fmt.Errorf(`value %q must start and finish with '"'`, value)
+	}
+
+	str := strings.Trim(value, `"`)
+
+	// We need to trim string if the bpf array is smaller.
+	if uint32(len(str)) >= array.Nelems {
+		str = str[:array.Nelems]
+	}
+
+	// Write the string content to .kconfig.
+	copy(data, str)
+
+	return nil
+}
+
+func putValueNumber(data []byte, typ btf.Type, value string) error {
+	integer, ok := typ.(*btf.Int)
+	if !ok {
+		return fmt.Errorf("cannot add number value, expected *btf.Int, got: %T", integer)
+	}
+
+	size := integer.Size
+	sizeInBits := size * 8
+
+	var n uint64
+	var err error
+	if integer.Encoding == btf.Signed {
+		parsed, e := strconv.ParseInt(value, 0, int(sizeInBits))
+
+		n = uint64(parsed)
+		err = e
+	} else {
+		parsed, e := strconv.ParseUint(value, 0, int(sizeInBits))
+
+		n = uint64(parsed)
+		err = e
+	}
+
+	if err != nil {
+		return fmt.Errorf("cannot parse value: %w", err)
+	}
+
+	switch size {
+	case 1:
+		data[0] = byte(n)
+	case 2:
+		internal.NativeEndian.PutUint16(data, uint16(n))
+	case 4:
+		internal.NativeEndian.PutUint32(data, uint32(n))
+	case 8:
+		internal.NativeEndian.PutUint64(data, uint64(n))
+	default:
+		return fmt.Errorf("size (%d) is not valid, expected: 1, 2, 4 or 8", size)
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/memoize.go b/vendor/github.com/cilium/ebpf/internal/memoize.go
new file mode 100644
index 00000000..3de0a3fb
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/memoize.go
@@ -0,0 +1,26 @@
+package internal
+
+import (
+	"sync"
+)
+
+type memoizedFunc[T any] struct {
+	once   sync.Once
+	fn     func() (T, error)
+	result T
+	err    error
+}
+
+func (mf *memoizedFunc[T]) do() (T, error) {
+	mf.once.Do(func() {
+		mf.result, mf.err = mf.fn()
+	})
+	return mf.result, mf.err
+}
+
+// Memoize the result of a function call.
+//
+// fn is only ever called once, even if it returns an error.
+func Memoize[T any](fn func() (T, error)) func() (T, error) {
+	return (&memoizedFunc[T]{fn: fn}).do
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/output.go b/vendor/github.com/cilium/ebpf/internal/output.go
index aeab37fc..dd6e6cba 100644
--- a/vendor/github.com/cilium/ebpf/internal/output.go
+++ b/vendor/github.com/cilium/ebpf/internal/output.go
@@ -6,6 +6,7 @@ import (
 	"go/format"
 	"go/scanner"
 	"io"
+	"reflect"
 	"strings"
 	"unicode"
 )
@@ -82,3 +83,15 @@ func WriteFormatted(src []byte, out io.Writer) error {
 
 	return nel
 }
+
+// GoTypeName is like %T, but elides the package name.
+//
+// Pointers to a type are peeled off.
+func GoTypeName(t any) string {
+	rT := reflect.TypeOf(t)
+	for rT.Kind() == reflect.Pointer {
+		rT = rT.Elem()
+	}
+	// Doesn't return the correct Name for generic types due to https://github.com/golang/go/issues/55924
+	return rT.Name()
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/pinning.go b/vendor/github.com/cilium/ebpf/internal/pinning.go
index c711353c..01d892f9 100644
--- a/vendor/github.com/cilium/ebpf/internal/pinning.go
+++ b/vendor/github.com/cilium/ebpf/internal/pinning.go
@@ -6,15 +6,12 @@ import (
 	"os"
 	"path/filepath"
 	"runtime"
-	"unsafe"
 
 	"github.com/cilium/ebpf/internal/sys"
 	"github.com/cilium/ebpf/internal/unix"
 )
 
 func Pin(currentPath, newPath string, fd *sys.FD) error {
-	const bpfFSType = 0xcafe4a11
-
 	if newPath == "" {
 		return errors.New("given pinning path cannot be empty")
 	}
@@ -22,20 +19,11 @@ func Pin(currentPath, newPath string, fd *sys.FD) error {
 		return nil
 	}
 
-	var statfs unix.Statfs_t
-	if err := unix.Statfs(filepath.Dir(newPath), &statfs); err != nil {
+	fsType, err := FSType(filepath.Dir(newPath))
+	if err != nil {
 		return err
 	}
-
-	fsType := int64(statfs.Type)
-	if unsafe.Sizeof(statfs.Type) == 4 {
-		// We're on a 32 bit arch, where statfs.Type is int32. bpfFSType is a
-		// negative number when interpreted as int32 so we need to cast via
-		// uint32 to avoid sign extension.
-		fsType = int64(uint32(statfs.Type))
-	}
-
-	if fsType != bpfFSType {
+	if fsType != unix.BPF_FS_MAGIC {
 		return fmt.Errorf("%s is not on a bpf filesystem", newPath)
 	}
 
@@ -50,7 +38,7 @@ func Pin(currentPath, newPath string, fd *sys.FD) error {
 
 	// Renameat2 is used instead of os.Rename to disallow the new path replacing
 	// an existing path.
-	err := unix.Renameat2(unix.AT_FDCWD, currentPath, unix.AT_FDCWD, newPath, unix.RENAME_NOREPLACE)
+	err = unix.Renameat2(unix.AT_FDCWD, currentPath, unix.AT_FDCWD, newPath, unix.RENAME_NOREPLACE)
 	if err == nil {
 		// Object is now moved to the new pinning path.
 		return nil
diff --git a/vendor/github.com/cilium/ebpf/internal/platform.go b/vendor/github.com/cilium/ebpf/internal/platform.go
new file mode 100644
index 00000000..6e90f2ef
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/platform.go
@@ -0,0 +1,43 @@
+package internal
+
+import (
+	"runtime"
+)
+
+// PlatformPrefix returns the platform-dependent syscall wrapper prefix used by
+// the linux kernel.
+//
+// Based on https://github.com/golang/go/blob/master/src/go/build/syslist.go
+// and https://github.com/libbpf/libbpf/blob/master/src/libbpf.c#L10047
+func PlatformPrefix() string {
+	switch runtime.GOARCH {
+	case "386":
+		return "__ia32_"
+	case "amd64", "amd64p32":
+		return "__x64_"
+
+	case "arm", "armbe":
+		return "__arm_"
+	case "arm64", "arm64be":
+		return "__arm64_"
+
+	case "mips", "mipsle", "mips64", "mips64le", "mips64p32", "mips64p32le":
+		return "__mips_"
+
+	case "s390":
+		return "__s390_"
+	case "s390x":
+		return "__s390x_"
+
+	case "riscv", "riscv64":
+		return "__riscv_"
+
+	case "ppc":
+		return "__powerpc_"
+	case "ppc64", "ppc64le":
+		return "__powerpc64_"
+
+	default:
+		return ""
+	}
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/statfs.go b/vendor/github.com/cilium/ebpf/internal/statfs.go
new file mode 100644
index 00000000..44c02d67
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/statfs.go
@@ -0,0 +1,23 @@
+package internal
+
+import (
+	"unsafe"
+
+	"github.com/cilium/ebpf/internal/unix"
+)
+
+func FSType(path string) (int64, error) {
+	var statfs unix.Statfs_t
+	if err := unix.Statfs(path, &statfs); err != nil {
+		return 0, err
+	}
+
+	fsType := int64(statfs.Type)
+	if unsafe.Sizeof(statfs.Type) == 4 {
+		// We're on a 32 bit arch, where statfs.Type is int32. bpfFSType is a
+		// negative number when interpreted as int32 so we need to cast via
+		// uint32 to avoid sign extension.
+		fsType = int64(uint32(statfs.Type))
+	}
+	return fsType, nil
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/sys/fd.go b/vendor/github.com/cilium/ebpf/internal/sys/fd.go
index 65517d45..941a56fb 100644
--- a/vendor/github.com/cilium/ebpf/internal/sys/fd.go
+++ b/vendor/github.com/cilium/ebpf/internal/sys/fd.go
@@ -17,11 +17,39 @@ type FD struct {
 }
 
 func newFD(value int) *FD {
+	if onLeakFD != nil {
+		// Attempt to store the caller's stack for the given fd value.
+		// Panic if fds contains an existing stack for the fd.
+		old, exist := fds.LoadOrStore(value, callersFrames())
+		if exist {
+			f := old.(*runtime.Frames)
+			panic(fmt.Sprintf("found existing stack for fd %d:\n%s", value, FormatFrames(f)))
+		}
+	}
+
 	fd := &FD{value}
-	runtime.SetFinalizer(fd, (*FD).Close)
+	runtime.SetFinalizer(fd, (*FD).finalize)
 	return fd
 }
 
+// finalize is set as the FD's runtime finalizer and
+// sends a leak trace before calling FD.Close().
+func (fd *FD) finalize() {
+	if fd.raw < 0 {
+		return
+	}
+
+	// Invoke the fd leak callback. Calls LoadAndDelete to guarantee the callback
+	// is invoked at most once for one sys.FD allocation, runtime.Frames can only
+	// be unwound once.
+	f, ok := fds.LoadAndDelete(fd.Int())
+	if ok && onLeakFD != nil {
+		onLeakFD(f.(*runtime.Frames))
+	}
+
+	_ = fd.Close()
+}
+
 // NewFD wraps a raw fd with a finalizer.
 //
 // You must not use the raw fd after calling this function, since the underlying
@@ -64,15 +92,16 @@ func (fd *FD) Close() error {
 		return nil
 	}
 
+	return unix.Close(fd.disown())
+}
+
+func (fd *FD) disown() int {
 	value := int(fd.raw)
+	fds.Delete(int(value))
 	fd.raw = -1
 
-	fd.Forget()
-	return unix.Close(value)
-}
-
-func (fd *FD) Forget() {
 	runtime.SetFinalizer(fd, nil)
+	return value
 }
 
 func (fd *FD) Dup() (*FD, error) {
@@ -90,7 +119,15 @@ func (fd *FD) Dup() (*FD, error) {
 	return newFD(dup), nil
 }
 
+// File takes ownership of FD and turns it into an [*os.File].
+//
+// You must not use the FD after the call returns.
+//
+// Returns nil if the FD is not valid.
 func (fd *FD) File(name string) *os.File {
-	fd.Forget()
-	return os.NewFile(uintptr(fd.raw), name)
+	if fd.raw < 0 {
+		return nil
+	}
+
+	return os.NewFile(uintptr(fd.disown()), name)
 }
diff --git a/vendor/github.com/cilium/ebpf/internal/sys/fd_trace.go b/vendor/github.com/cilium/ebpf/internal/sys/fd_trace.go
new file mode 100644
index 00000000..cd50dd1f
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/sys/fd_trace.go
@@ -0,0 +1,93 @@
+package sys
+
+import (
+	"bytes"
+	"fmt"
+	"runtime"
+	"sync"
+)
+
+// OnLeakFD controls tracing [FD] lifetime to detect resources that are not
+// closed by Close().
+//
+// If fn is not nil, tracing is enabled for all FDs created going forward. fn is
+// invoked for all FDs that are closed by the garbage collector instead of an
+// explicit Close() by a caller. Calling OnLeakFD twice with a non-nil fn
+// (without disabling tracing in the meantime) will cause a panic.
+//
+// If fn is nil, tracing will be disabled. Any FDs that have not been closed are
+// considered to be leaked, fn will be invoked for them, and the process will be
+// terminated.
+//
+// fn will be invoked at most once for every unique sys.FD allocation since a
+// runtime.Frames can only be unwound once.
+func OnLeakFD(fn func(*runtime.Frames)) {
+	// Enable leak tracing if new fn is provided.
+	if fn != nil {
+		if onLeakFD != nil {
+			panic("OnLeakFD called twice with non-nil fn")
+		}
+
+		onLeakFD = fn
+		return
+	}
+
+	// fn is nil past this point.
+
+	if onLeakFD == nil {
+		return
+	}
+
+	// Call onLeakFD for all open fds.
+	if fs := flushFrames(); len(fs) != 0 {
+		for _, f := range fs {
+			onLeakFD(f)
+		}
+	}
+
+	onLeakFD = nil
+}
+
+var onLeakFD func(*runtime.Frames)
+
+// fds is a registry of all file descriptors wrapped into sys.fds that were
+// created while an fd tracer was active.
+var fds sync.Map // map[int]*runtime.Frames
+
+// flushFrames removes all elements from fds and returns them as a slice. This
+// deals with the fact that a runtime.Frames can only be unwound once using
+// Next().
+func flushFrames() []*runtime.Frames {
+	var frames []*runtime.Frames
+	fds.Range(func(key, value any) bool {
+		frames = append(frames, value.(*runtime.Frames))
+		fds.Delete(key)
+		return true
+	})
+	return frames
+}
+
+func callersFrames() *runtime.Frames {
+	c := make([]uintptr, 32)
+
+	// Skip runtime.Callers and this function.
+	i := runtime.Callers(2, c)
+	if i == 0 {
+		return nil
+	}
+
+	return runtime.CallersFrames(c)
+}
+
+// FormatFrames formats a runtime.Frames as a human-readable string.
+func FormatFrames(fs *runtime.Frames) string {
+	var b bytes.Buffer
+	for {
+		f, more := fs.Next()
+		b.WriteString(fmt.Sprintf("\t%s+%#x\n\t\t%s:%d\n", f.Function, f.PC-f.Entry, f.File, f.Line))
+		if !more {
+			break
+		}
+	}
+	return b.String()
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/sys/ptr.go b/vendor/github.com/cilium/ebpf/internal/sys/ptr.go
index bc7ebb44..e9bb5905 100644
--- a/vendor/github.com/cilium/ebpf/internal/sys/ptr.go
+++ b/vendor/github.com/cilium/ebpf/internal/sys/ptr.go
@@ -20,7 +20,7 @@ func NewSlicePointer(buf []byte) Pointer {
 	return Pointer{ptr: unsafe.Pointer(&buf[0])}
 }
 
-// NewSlicePointer creates a 64-bit pointer from a byte slice.
+// NewSlicePointerLen creates a 64-bit pointer from a byte slice.
 //
 // Useful to assign both the pointer and the length in one go.
 func NewSlicePointerLen(buf []byte) (Pointer, uint32) {
diff --git a/vendor/github.com/cilium/ebpf/internal/sys/ptr_32_be.go b/vendor/github.com/cilium/ebpf/internal/sys/ptr_32_be.go
index df903d78..6278c79c 100644
--- a/vendor/github.com/cilium/ebpf/internal/sys/ptr_32_be.go
+++ b/vendor/github.com/cilium/ebpf/internal/sys/ptr_32_be.go
@@ -1,5 +1,4 @@
 //go:build armbe || mips || mips64p32
-// +build armbe mips mips64p32
 
 package sys
 
diff --git a/vendor/github.com/cilium/ebpf/internal/sys/ptr_32_le.go b/vendor/github.com/cilium/ebpf/internal/sys/ptr_32_le.go
index a6a51edb..c27b537e 100644
--- a/vendor/github.com/cilium/ebpf/internal/sys/ptr_32_le.go
+++ b/vendor/github.com/cilium/ebpf/internal/sys/ptr_32_le.go
@@ -1,5 +1,4 @@
 //go:build 386 || amd64p32 || arm || mipsle || mips64p32le
-// +build 386 amd64p32 arm mipsle mips64p32le
 
 package sys
 
diff --git a/vendor/github.com/cilium/ebpf/internal/sys/ptr_64.go b/vendor/github.com/cilium/ebpf/internal/sys/ptr_64.go
index 7c0279e4..2d782823 100644
--- a/vendor/github.com/cilium/ebpf/internal/sys/ptr_64.go
+++ b/vendor/github.com/cilium/ebpf/internal/sys/ptr_64.go
@@ -1,5 +1,4 @@
 //go:build !386 && !amd64p32 && !arm && !mipsle && !mips64p32le && !armbe && !mips && !mips64p32
-// +build !386,!amd64p32,!arm,!mipsle,!mips64p32le,!armbe,!mips,!mips64p32
 
 package sys
 
diff --git a/vendor/github.com/cilium/ebpf/internal/sys/signals.go b/vendor/github.com/cilium/ebpf/internal/sys/signals.go
index 84d63313..7494c030 100644
--- a/vendor/github.com/cilium/ebpf/internal/sys/signals.go
+++ b/vendor/github.com/cilium/ebpf/internal/sys/signals.go
@@ -8,19 +8,20 @@ import (
 	"github.com/cilium/ebpf/internal/unix"
 )
 
+// A sigset containing only SIGPROF.
 var profSet unix.Sigset_t
 
 func init() {
-	if err := sigsetAdd(&profSet, unix.SIGPROF); err != nil {
-		panic(fmt.Errorf("creating signal set: %w", err))
-	}
+	// See sigsetAdd for details on the implementation. Open coded here so
+	// that the compiler will check the constant calculations for us.
+	profSet.Val[sigprofBit/wordBits] |= 1 << (sigprofBit % wordBits)
 }
 
 // maskProfilerSignal locks the calling goroutine to its underlying OS thread
 // and adds SIGPROF to the thread's signal mask. This prevents pprof from
 // interrupting expensive syscalls like e.g. BPF_PROG_LOAD.
 //
-// The caller must defer sys.UnmaskProfilerSignal() to reverse the operation.
+// The caller must defer unmaskProfilerSignal() to reverse the operation.
 func maskProfilerSignal() {
 	runtime.LockOSThread()
 
@@ -43,11 +44,10 @@ func unmaskProfilerSignal() {
 }
 
 const (
-	wordBytes = int(unsafe.Sizeof(unix.Sigset_t{}.Val[0]))
-	wordBits  = wordBytes * 8
-
-	setBytes = int(unsafe.Sizeof(unix.Sigset_t{}))
-	setBits  = setBytes * 8
+	// Signal is the nth bit in the bitfield.
+	sigprofBit = int(unix.SIGPROF - 1)
+	// The number of bits in one Sigset_t word.
+	wordBits = int(unsafe.Sizeof(unix.Sigset_t{}.Val[0])) * 8
 )
 
 // sigsetAdd adds signal to set.
@@ -59,9 +59,6 @@ func sigsetAdd(set *unix.Sigset_t, signal unix.Signal) error {
 	if signal < 1 {
 		return fmt.Errorf("signal %d must be larger than 0", signal)
 	}
-	if int(signal) > setBits {
-		return fmt.Errorf("signal %d does not fit within unix.Sigset_t", signal)
-	}
 
 	// For amd64, runtime.sigaddset() performs the following operation:
 	// set[(signal-1)/32] |= 1 << ((uint32(signal) - 1) & 31)
@@ -75,6 +72,10 @@ func sigsetAdd(set *unix.Sigset_t, signal unix.Signal) error {
 	// Word within the sigset the bit needs to be written to.
 	word := bit / wordBits
 
+	if word >= len(set.Val) {
+		return fmt.Errorf("signal %d does not fit within unix.Sigset_t", signal)
+	}
+
 	// Write the signal bit into its corresponding word at the corrected offset.
 	set.Val[word] |= 1 << (bit % wordBits)
 
diff --git a/vendor/github.com/cilium/ebpf/internal/sys/syscall.go b/vendor/github.com/cilium/ebpf/internal/sys/syscall.go
index 3c7ce5dd..4fae04db 100644
--- a/vendor/github.com/cilium/ebpf/internal/sys/syscall.go
+++ b/vendor/github.com/cilium/ebpf/internal/sys/syscall.go
@@ -117,6 +117,9 @@ type LinkID uint32
 // BTFID uniquely identifies a BTF blob loaded into the kernel.
 type BTFID uint32
 
+// TypeID identifies a type in a BTF blob.
+type TypeID uint32
+
 // MapFlags control map behaviour.
 type MapFlags uint32
 
diff --git a/vendor/github.com/cilium/ebpf/internal/sys/types.go b/vendor/github.com/cilium/ebpf/internal/sys/types.go
index bb778790..2af7759e 100644
--- a/vendor/github.com/cilium/ebpf/internal/sys/types.go
+++ b/vendor/github.com/cilium/ebpf/internal/sys/types.go
@@ -301,7 +301,17 @@ const (
 	BPF_FUNC_copy_from_user_task            FunctionId = 191
 	BPF_FUNC_skb_set_tstamp                 FunctionId = 192
 	BPF_FUNC_ima_file_hash                  FunctionId = 193
-	__BPF_FUNC_MAX_ID                       FunctionId = 194
+	BPF_FUNC_kptr_xchg                      FunctionId = 194
+	BPF_FUNC_map_lookup_percpu_elem         FunctionId = 195
+	BPF_FUNC_skc_to_mptcp_sock              FunctionId = 196
+	BPF_FUNC_dynptr_from_mem                FunctionId = 197
+	BPF_FUNC_ringbuf_reserve_dynptr         FunctionId = 198
+	BPF_FUNC_ringbuf_submit_dynptr          FunctionId = 199
+	BPF_FUNC_ringbuf_discard_dynptr         FunctionId = 200
+	BPF_FUNC_dynptr_read                    FunctionId = 201
+	BPF_FUNC_dynptr_write                   FunctionId = 202
+	BPF_FUNC_dynptr_data                    FunctionId = 203
+	__BPF_FUNC_MAX_ID                       FunctionId = 204
 )
 
 type HdrStartOff uint32
@@ -323,7 +333,8 @@ const (
 	BPF_LINK_TYPE_XDP            LinkType = 6
 	BPF_LINK_TYPE_PERF_EVENT     LinkType = 7
 	BPF_LINK_TYPE_KPROBE_MULTI   LinkType = 8
-	MAX_BPF_LINK_TYPE            LinkType = 9
+	BPF_LINK_TYPE_STRUCT_OPS     LinkType = 9
+	MAX_BPF_LINK_TYPE            LinkType = 10
 )
 
 type MapType uint32
@@ -477,12 +488,12 @@ type MapInfo struct {
 	MapFlags              MapFlags
 	Name                  ObjName
 	Ifindex               uint32
-	BtfVmlinuxValueTypeId uint32
+	BtfVmlinuxValueTypeId TypeID
 	NetnsDev              uint64
 	NetnsIno              uint64
 	BtfId                 uint32
-	BtfKeyTypeId          uint32
-	BtfValueTypeId        uint32
+	BtfKeyTypeId          TypeID
+	BtfValueTypeId        TypeID
 	_                     [4]byte
 	MapExtra              uint64
 }
@@ -508,7 +519,7 @@ type ProgInfo struct {
 	NrJitedFuncLens      uint32
 	JitedKsyms           uint64
 	JitedFuncLens        uint64
-	BtfId                uint32
+	BtfId                BTFID
 	FuncInfoRecSize      uint32
 	FuncInfo             uint64
 	NrFuncInfo           uint32
@@ -616,7 +627,7 @@ type LinkCreateAttr struct {
 	TargetFd    uint32
 	AttachType  AttachType
 	Flags       uint32
-	TargetBtfId uint32
+	TargetBtfId TypeID
 	_           [28]byte
 }
 
@@ -683,6 +694,25 @@ func LinkCreatePerfEvent(attr *LinkCreatePerfEventAttr) (*FD, error) {
 	return NewFD(int(fd))
 }
 
+type LinkCreateTracingAttr struct {
+	ProgFd      uint32
+	TargetFd    uint32
+	AttachType  AttachType
+	Flags       uint32
+	TargetBtfId BTFID
+	_           [4]byte
+	Cookie      uint64
+	_           [16]byte
+}
+
+func LinkCreateTracing(attr *LinkCreateTracingAttr) (*FD, error) {
+	fd, err := BPF(BPF_LINK_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+	if err != nil {
+		return nil, err
+	}
+	return NewFD(int(fd))
+}
+
 type LinkUpdateAttr struct {
 	LinkFd    uint32
 	NewProgFd uint32
@@ -706,9 +736,9 @@ type MapCreateAttr struct {
 	MapName               ObjName
 	MapIfindex            uint32
 	BtfFd                 uint32
-	BtfKeyTypeId          uint32
-	BtfValueTypeId        uint32
-	BtfVmlinuxValueTypeId uint32
+	BtfKeyTypeId          TypeID
+	BtfValueTypeId        TypeID
+	BtfVmlinuxValueTypeId TypeID
 	MapExtra              uint64
 }
 
@@ -986,7 +1016,7 @@ type ProgLoadAttr struct {
 	LineInfoRecSize    uint32
 	LineInfo           Pointer
 	LineInfoCnt        uint32
-	AttachBtfId        uint32
+	AttachBtfId        TypeID
 	AttachBtfObjFd     uint32
 	CoreReloCnt        uint32
 	FdArray            Pointer
@@ -1081,7 +1111,7 @@ type RawTracepointLinkInfo struct {
 type TracingLinkInfo struct {
 	AttachType  AttachType
 	TargetObjId uint32
-	TargetBtfId uint32
+	TargetBtfId TypeID
 }
 
 type XDPLinkInfo struct{ Ifindex uint32 }
diff --git a/vendor/github.com/cilium/ebpf/internal/tracefs/kprobe.go b/vendor/github.com/cilium/ebpf/internal/tracefs/kprobe.go
new file mode 100644
index 00000000..4059a099
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/tracefs/kprobe.go
@@ -0,0 +1,359 @@
+package tracefs
+
+import (
+	"crypto/rand"
+	"errors"
+	"fmt"
+	"os"
+	"path/filepath"
+	"runtime"
+	"strings"
+	"syscall"
+
+	"github.com/cilium/ebpf/internal"
+	"github.com/cilium/ebpf/internal/unix"
+)
+
+var (
+	ErrInvalidInput = errors.New("invalid input")
+
+	ErrInvalidMaxActive = errors.New("can only set maxactive on kretprobes")
+)
+
+//go:generate stringer -type=ProbeType -linecomment
+
+type ProbeType uint8
+
+const (
+	Kprobe ProbeType = iota // kprobe
+	Uprobe                  // uprobe
+)
+
+func (pt ProbeType) eventsFile() (*os.File, error) {
+	path, err := sanitizeTracefsPath(fmt.Sprintf("%s_events", pt.String()))
+	if err != nil {
+		return nil, err
+	}
+
+	return os.OpenFile(path, os.O_APPEND|os.O_WRONLY, 0666)
+}
+
+type ProbeArgs struct {
+	Type                         ProbeType
+	Symbol, Group, Path          string
+	Offset, RefCtrOffset, Cookie uint64
+	Pid, RetprobeMaxActive       int
+	Ret                          bool
+}
+
+// RandomGroup generates a pseudorandom string for use as a tracefs group name.
+// Returns an error when the output string would exceed 63 characters (kernel
+// limitation), when rand.Read() fails or when prefix contains characters not
+// allowed by IsValidTraceID.
+func RandomGroup(prefix string) (string, error) {
+	if !validIdentifier(prefix) {
+		return "", fmt.Errorf("prefix '%s' must be alphanumeric or underscore: %w", prefix, ErrInvalidInput)
+	}
+
+	b := make([]byte, 8)
+	if _, err := rand.Read(b); err != nil {
+		return "", fmt.Errorf("reading random bytes: %w", err)
+	}
+
+	group := fmt.Sprintf("%s_%x", prefix, b)
+	if len(group) > 63 {
+		return "", fmt.Errorf("group name '%s' cannot be longer than 63 characters: %w", group, ErrInvalidInput)
+	}
+
+	return group, nil
+}
+
+// validIdentifier implements the equivalent of a regex match
+// against "^[a-zA-Z_][0-9a-zA-Z_]*$".
+//
+// Trace event groups, names and kernel symbols must adhere to this set
+// of characters. Non-empty, first character must not be a number, all
+// characters must be alphanumeric or underscore.
+func validIdentifier(s string) bool {
+	if len(s) < 1 {
+		return false
+	}
+	for i, c := range []byte(s) {
+		switch {
+		case c >= 'a' && c <= 'z':
+		case c >= 'A' && c <= 'Z':
+		case c == '_':
+		case i > 0 && c >= '0' && c <= '9':
+
+		default:
+			return false
+		}
+	}
+
+	return true
+}
+
+func sanitizeTracefsPath(path ...string) (string, error) {
+	base, err := getTracefsPath()
+	if err != nil {
+		return "", err
+	}
+	l := filepath.Join(path...)
+	p := filepath.Join(base, l)
+	if !strings.HasPrefix(p, base) {
+		return "", fmt.Errorf("path '%s' attempts to escape base path '%s': %w", l, base, ErrInvalidInput)
+	}
+	return p, nil
+}
+
+// getTracefsPath will return a correct path to the tracefs mount point.
+// Since kernel 4.1 tracefs should be mounted by default at /sys/kernel/tracing,
+// but may be also be available at /sys/kernel/debug/tracing if debugfs is mounted.
+// The available tracefs paths will depends on distribution choices.
+var getTracefsPath = internal.Memoize(func() (string, error) {
+	for _, p := range []struct {
+		path   string
+		fsType int64
+	}{
+		{"/sys/kernel/tracing", unix.TRACEFS_MAGIC},
+		{"/sys/kernel/debug/tracing", unix.TRACEFS_MAGIC},
+		// RHEL/CentOS
+		{"/sys/kernel/debug/tracing", unix.DEBUGFS_MAGIC},
+	} {
+		if fsType, err := internal.FSType(p.path); err == nil && fsType == p.fsType {
+			return p.path, nil
+		}
+	}
+
+	return "", errors.New("neither debugfs nor tracefs are mounted")
+})
+
+// sanitizeIdentifier replaces every invalid character for the tracefs api with an underscore.
+//
+// It is equivalent to calling regexp.MustCompile("[^a-zA-Z0-9]+").ReplaceAllString("_").
+func sanitizeIdentifier(s string) string {
+	var skip bool
+	return strings.Map(func(c rune) rune {
+		switch {
+		case c >= 'a' && c <= 'z',
+			c >= 'A' && c <= 'Z',
+			c >= '0' && c <= '9':
+			skip = false
+			return c
+
+		case skip:
+			return -1
+
+		default:
+			skip = true
+			return '_'
+		}
+	}, s)
+}
+
+// EventID reads a trace event's ID from tracefs given its group and name.
+// The kernel requires group and name to be alphanumeric or underscore.
+func EventID(group, name string) (uint64, error) {
+	if !validIdentifier(group) {
+		return 0, fmt.Errorf("invalid tracefs group: %q", group)
+	}
+
+	if !validIdentifier(name) {
+		return 0, fmt.Errorf("invalid tracefs name: %q", name)
+	}
+
+	path, err := sanitizeTracefsPath("events", group, name, "id")
+	if err != nil {
+		return 0, err
+	}
+	tid, err := internal.ReadUint64FromFile("%d\n", path)
+	if errors.Is(err, os.ErrNotExist) {
+		return 0, err
+	}
+	if err != nil {
+		return 0, fmt.Errorf("reading trace event ID of %s/%s: %w", group, name, err)
+	}
+
+	return tid, nil
+}
+
+func probePrefix(ret bool, maxActive int) string {
+	if ret {
+		if maxActive > 0 {
+			return fmt.Sprintf("r%d", maxActive)
+		}
+		return "r"
+	}
+	return "p"
+}
+
+// Event represents an entry in a tracefs probe events file.
+type Event struct {
+	typ         ProbeType
+	group, name string
+	// event id allocated by the kernel. 0 if the event has already been removed.
+	id uint64
+}
+
+// NewEvent creates a new ephemeral trace event.
+//
+// Returns os.ErrNotExist if symbol is not a valid
+// kernel symbol, or if it is not traceable with kprobes. Returns os.ErrExist
+// if a probe with the same group and symbol already exists. Returns an error if
+// args.RetprobeMaxActive is used on non kprobe types. Returns ErrNotSupported if
+// the kernel is too old to support kretprobe maxactive.
+func NewEvent(args ProbeArgs) (*Event, error) {
+	// Before attempting to create a trace event through tracefs,
+	// check if an event with the same group and name already exists.
+	// Kernels 4.x and earlier don't return os.ErrExist on writing a duplicate
+	// entry, so we need to rely on reads for detecting uniqueness.
+	eventName := sanitizeIdentifier(args.Symbol)
+	_, err := EventID(args.Group, eventName)
+	if err == nil {
+		return nil, fmt.Errorf("trace event %s/%s: %w", args.Group, eventName, os.ErrExist)
+	}
+	if err != nil && !errors.Is(err, os.ErrNotExist) {
+		return nil, fmt.Errorf("checking trace event %s/%s: %w", args.Group, eventName, err)
+	}
+
+	// Open the kprobe_events file in tracefs.
+	f, err := args.Type.eventsFile()
+	if err != nil {
+		return nil, err
+	}
+	defer f.Close()
+
+	var pe, token string
+	switch args.Type {
+	case Kprobe:
+		// The kprobe_events syntax is as follows (see Documentation/trace/kprobetrace.txt):
+		// p[:[GRP/]EVENT] [MOD:]SYM[+offs]|MEMADDR [FETCHARGS] : Set a probe
+		// r[MAXACTIVE][:[GRP/]EVENT] [MOD:]SYM[+0] [FETCHARGS] : Set a return probe
+		// -:[GRP/]EVENT                                        : Clear a probe
+		//
+		// Some examples:
+		// r:ebpf_1234/r_my_kretprobe nf_conntrack_destroy
+		// p:ebpf_5678/p_my_kprobe __x64_sys_execve
+		//
+		// Leaving the kretprobe's MAXACTIVE set to 0 (or absent) will make the
+		// kernel default to NR_CPUS. This is desired in most eBPF cases since
+		// subsampling or rate limiting logic can be more accurately implemented in
+		// the eBPF program itself.
+		// See Documentation/kprobes.txt for more details.
+		if args.RetprobeMaxActive != 0 && !args.Ret {
+			return nil, ErrInvalidMaxActive
+		}
+		token = KprobeToken(args)
+		pe = fmt.Sprintf("%s:%s/%s %s", probePrefix(args.Ret, args.RetprobeMaxActive), args.Group, eventName, token)
+	case Uprobe:
+		// The uprobe_events syntax is as follows:
+		// p[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS] : Set a probe
+		// r[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS] : Set a return probe
+		// -:[GRP/]EVENT                           : Clear a probe
+		//
+		// Some examples:
+		// r:ebpf_1234/readline /bin/bash:0x12345
+		// p:ebpf_5678/main_mySymbol /bin/mybin:0x12345(0x123)
+		//
+		// See Documentation/trace/uprobetracer.txt for more details.
+		if args.RetprobeMaxActive != 0 {
+			return nil, ErrInvalidMaxActive
+		}
+		token = UprobeToken(args)
+		pe = fmt.Sprintf("%s:%s/%s %s", probePrefix(args.Ret, 0), args.Group, eventName, token)
+	}
+	_, err = f.WriteString(pe)
+
+	// Since commit 97c753e62e6c, ENOENT is correctly returned instead of EINVAL
+	// when trying to create a retprobe for a missing symbol.
+	if errors.Is(err, os.ErrNotExist) {
+		return nil, fmt.Errorf("token %s: not found: %w", token, err)
+	}
+	// Since commit ab105a4fb894, EILSEQ is returned when a kprobe sym+offset is resolved
+	// to an invalid insn boundary. The exact conditions that trigger this error are
+	// arch specific however.
+	if errors.Is(err, syscall.EILSEQ) {
+		return nil, fmt.Errorf("token %s: bad insn boundary: %w", token, os.ErrNotExist)
+	}
+	// ERANGE is returned when the `SYM[+offs]` token is too big and cannot
+	// be resolved.
+	if errors.Is(err, syscall.ERANGE) {
+		return nil, fmt.Errorf("token %s: offset too big: %w", token, os.ErrNotExist)
+	}
+
+	if err != nil {
+		return nil, fmt.Errorf("token %s: writing '%s': %w", token, pe, err)
+	}
+
+	// Get the newly-created trace event's id.
+	tid, err := EventID(args.Group, eventName)
+	if args.RetprobeMaxActive != 0 && errors.Is(err, os.ErrNotExist) {
+		// Kernels < 4.12 don't support maxactive and therefore auto generate
+		// group and event names from the symbol and offset. The symbol is used
+		// without any sanitization.
+		// See https://elixir.bootlin.com/linux/v4.10/source/kernel/trace/trace_kprobe.c#L712
+		event := fmt.Sprintf("kprobes/r_%s_%d", args.Symbol, args.Offset)
+		if err := removeEvent(args.Type, event); err != nil {
+			return nil, fmt.Errorf("failed to remove spurious maxactive event: %s", err)
+		}
+		return nil, fmt.Errorf("create trace event with non-default maxactive: %w", internal.ErrNotSupported)
+	}
+	if err != nil {
+		return nil, fmt.Errorf("get trace event id: %w", err)
+	}
+
+	evt := &Event{args.Type, args.Group, eventName, tid}
+	runtime.SetFinalizer(evt, (*Event).Close)
+	return evt, nil
+}
+
+// Close removes the event from tracefs.
+//
+// Returns os.ErrClosed if the event has already been closed before.
+func (evt *Event) Close() error {
+	if evt.id == 0 {
+		return os.ErrClosed
+	}
+
+	evt.id = 0
+	runtime.SetFinalizer(evt, nil)
+	pe := fmt.Sprintf("%s/%s", evt.group, evt.name)
+	return removeEvent(evt.typ, pe)
+}
+
+func removeEvent(typ ProbeType, pe string) error {
+	f, err := typ.eventsFile()
+	if err != nil {
+		return err
+	}
+	defer f.Close()
+
+	// See [k,u]probe_events syntax above. The probe type does not need to be specified
+	// for removals.
+	if _, err = f.WriteString("-:" + pe); err != nil {
+		return fmt.Errorf("remove event %q from %s: %w", pe, f.Name(), err)
+	}
+
+	return nil
+}
+
+// ID returns the tracefs ID associated with the event.
+func (evt *Event) ID() uint64 {
+	return evt.id
+}
+
+// Group returns the tracefs group used by the event.
+func (evt *Event) Group() string {
+	return evt.group
+}
+
+// KprobeToken creates the SYM[+offs] token for the tracefs api.
+func KprobeToken(args ProbeArgs) string {
+	po := args.Symbol
+
+	if args.Offset != 0 {
+		po += fmt.Sprintf("+%#x", args.Offset)
+	}
+
+	return po
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/tracefs/probetype_string.go b/vendor/github.com/cilium/ebpf/internal/tracefs/probetype_string.go
new file mode 100644
index 00000000..87cb0a05
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/tracefs/probetype_string.go
@@ -0,0 +1,24 @@
+// Code generated by "stringer -type=ProbeType -linecomment"; DO NOT EDIT.
+
+package tracefs
+
+import "strconv"
+
+func _() {
+	// An "invalid array index" compiler error signifies that the constant values have changed.
+	// Re-run the stringer command to generate them again.
+	var x [1]struct{}
+	_ = x[Kprobe-0]
+	_ = x[Uprobe-1]
+}
+
+const _ProbeType_name = "kprobeuprobe"
+
+var _ProbeType_index = [...]uint8{0, 6, 12}
+
+func (i ProbeType) String() string {
+	if i >= ProbeType(len(_ProbeType_index)-1) {
+		return "ProbeType(" + strconv.FormatInt(int64(i), 10) + ")"
+	}
+	return _ProbeType_name[_ProbeType_index[i]:_ProbeType_index[i+1]]
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/tracefs/uprobe.go b/vendor/github.com/cilium/ebpf/internal/tracefs/uprobe.go
new file mode 100644
index 00000000..994f3126
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/tracefs/uprobe.go
@@ -0,0 +1,16 @@
+package tracefs
+
+import "fmt"
+
+// UprobeToken creates the PATH:OFFSET(REF_CTR_OFFSET) token for the tracefs api.
+func UprobeToken(args ProbeArgs) string {
+	po := fmt.Sprintf("%s:%#x", args.Path, args.Offset)
+
+	if args.RefCtrOffset != 0 {
+		// This is not documented in Documentation/trace/uprobetracer.txt.
+		// elixir.bootlin.com/linux/v5.15-rc7/source/kernel/trace/trace.c#L5564
+		po += fmt.Sprintf("(%#x)", args.RefCtrOffset)
+	}
+
+	return po
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/unix/types_linux.go b/vendor/github.com/cilium/ebpf/internal/unix/types_linux.go
index 04b828b4..7c970591 100644
--- a/vendor/github.com/cilium/ebpf/internal/unix/types_linux.go
+++ b/vendor/github.com/cilium/ebpf/internal/unix/types_linux.go
@@ -35,6 +35,7 @@ const (
 	BPF_F_RDONLY_PROG         = linux.BPF_F_RDONLY_PROG
 	BPF_F_WRONLY_PROG         = linux.BPF_F_WRONLY_PROG
 	BPF_F_SLEEPABLE           = linux.BPF_F_SLEEPABLE
+	BPF_F_XDP_HAS_FRAGS       = linux.BPF_F_XDP_HAS_FRAGS
 	BPF_F_MMAPABLE            = linux.BPF_F_MMAPABLE
 	BPF_F_INNER_MAP           = linux.BPF_F_INNER_MAP
 	BPF_F_KPROBE_MULTI_RETURN = linux.BPF_F_KPROBE_MULTI_RETURN
@@ -49,9 +50,12 @@ const (
 	EPOLL_CLOEXEC             = linux.EPOLL_CLOEXEC
 	O_CLOEXEC                 = linux.O_CLOEXEC
 	O_NONBLOCK                = linux.O_NONBLOCK
+	PROT_NONE                 = linux.PROT_NONE
 	PROT_READ                 = linux.PROT_READ
 	PROT_WRITE                = linux.PROT_WRITE
+	MAP_ANON                  = linux.MAP_ANON
 	MAP_SHARED                = linux.MAP_SHARED
+	MAP_PRIVATE               = linux.MAP_PRIVATE
 	PERF_ATTR_SIZE_VER1       = linux.PERF_ATTR_SIZE_VER1
 	PERF_TYPE_SOFTWARE        = linux.PERF_TYPE_SOFTWARE
 	PERF_TYPE_TRACEPOINT      = linux.PERF_TYPE_TRACEPOINT
@@ -60,6 +64,7 @@ const (
 	PERF_EVENT_IOC_ENABLE     = linux.PERF_EVENT_IOC_ENABLE
 	PERF_EVENT_IOC_SET_BPF    = linux.PERF_EVENT_IOC_SET_BPF
 	PerfBitWatermark          = linux.PerfBitWatermark
+	PerfBitWriteBackward      = linux.PerfBitWriteBackward
 	PERF_SAMPLE_RAW           = linux.PERF_SAMPLE_RAW
 	PERF_FLAG_FD_CLOEXEC      = linux.PERF_FLAG_FD_CLOEXEC
 	RLIM_INFINITY             = linux.RLIM_INFINITY
@@ -77,6 +82,9 @@ const (
 	SIG_UNBLOCK               = linux.SIG_UNBLOCK
 	EM_NONE                   = linux.EM_NONE
 	EM_BPF                    = linux.EM_BPF
+	BPF_FS_MAGIC              = linux.BPF_FS_MAGIC
+	TRACEFS_MAGIC             = linux.TRACEFS_MAGIC
+	DEBUGFS_MAGIC             = linux.DEBUGFS_MAGIC
 )
 
 type Statfs_t = linux.Statfs_t
@@ -188,3 +196,7 @@ func Open(path string, mode int, perm uint32) (int, error) {
 func Fstat(fd int, stat *Stat_t) error {
 	return linux.Fstat(fd, stat)
 }
+
+func SetsockoptInt(fd, level, opt, value int) error {
+	return linux.SetsockoptInt(fd, level, opt, value)
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/unix/types_other.go b/vendor/github.com/cilium/ebpf/internal/unix/types_other.go
index e87bf830..5e86b505 100644
--- a/vendor/github.com/cilium/ebpf/internal/unix/types_other.go
+++ b/vendor/github.com/cilium/ebpf/internal/unix/types_other.go
@@ -41,6 +41,7 @@ const (
 	BPF_F_MMAPABLE
 	BPF_F_INNER_MAP
 	BPF_F_KPROBE_MULTI_RETURN
+	BPF_F_XDP_HAS_FRAGS
 	BPF_OBJ_NAME_LEN
 	BPF_TAG_SIZE
 	BPF_RINGBUF_BUSY_BIT
@@ -53,9 +54,12 @@ const (
 	EPOLL_CLOEXEC
 	O_CLOEXEC
 	O_NONBLOCK
+	PROT_NONE
 	PROT_READ
 	PROT_WRITE
+	MAP_ANON
 	MAP_SHARED
+	MAP_PRIVATE
 	PERF_ATTR_SIZE_VER1
 	PERF_TYPE_SOFTWARE
 	PERF_TYPE_TRACEPOINT
@@ -64,6 +68,7 @@ const (
 	PERF_EVENT_IOC_ENABLE
 	PERF_EVENT_IOC_SET_BPF
 	PerfBitWatermark
+	PerfBitWriteBackward
 	PERF_SAMPLE_RAW
 	PERF_FLAG_FD_CLOEXEC
 	RLIM_INFINITY
@@ -81,6 +86,9 @@ const (
 	SIG_UNBLOCK
 	EM_NONE
 	EM_BPF
+	BPF_FS_MAGIC
+	TRACEFS_MAGIC
+	DEBUGFS_MAGIC
 )
 
 type Statfs_t struct {
@@ -98,7 +106,19 @@ type Statfs_t struct {
 	Spare   [4]int64
 }
 
-type Stat_t struct{}
+type Stat_t struct {
+	Dev     uint64
+	Ino     uint64
+	Nlink   uint64
+	Mode    uint32
+	Uid     uint32
+	Gid     uint32
+	_       int32
+	Rdev    uint64
+	Size    int64
+	Blksize int64
+	Blocks  int64
+}
 
 type Rlimit struct {
 	Cur uint64
@@ -268,3 +288,7 @@ func Open(path string, mode int, perm uint32) (int, error) {
 func Fstat(fd int, stat *Stat_t) error {
 	return errNonLinux
 }
+
+func SetsockoptInt(fd, level, opt, value int) error {
+	return errNonLinux
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/vdso.go b/vendor/github.com/cilium/ebpf/internal/vdso.go
index aaffa3cb..10e639bf 100644
--- a/vendor/github.com/cilium/ebpf/internal/vdso.go
+++ b/vendor/github.com/cilium/ebpf/internal/vdso.go
@@ -120,7 +120,7 @@ func vdsoLinuxVersionCode(r io.ReaderAt) (uint32, error) {
 			var name string
 			if n.NameSize > 0 {
 				// Read the note name, aligned to 4 bytes.
-				buf := make([]byte, Align(int(n.NameSize), 4))
+				buf := make([]byte, Align(n.NameSize, 4))
 				if err := binary.Read(sr, hdr.ByteOrder, &buf); err != nil {
 					return 0, fmt.Errorf("reading note name: %w", err)
 				}
@@ -142,7 +142,7 @@ func vdsoLinuxVersionCode(r io.ReaderAt) (uint32, error) {
 				}
 
 				// Discard the note descriptor if it exists but we're not interested in it.
-				if _, err := io.CopyN(io.Discard, sr, int64(Align(int(n.DescSize), 4))); err != nil {
+				if _, err := io.CopyN(io.Discard, sr, int64(Align(n.DescSize, 4))); err != nil {
 					return 0, err
 				}
 			}
diff --git a/vendor/github.com/cilium/ebpf/internal/version.go b/vendor/github.com/cilium/ebpf/internal/version.go
index 370e01e4..9b17ffb4 100644
--- a/vendor/github.com/cilium/ebpf/internal/version.go
+++ b/vendor/github.com/cilium/ebpf/internal/version.go
@@ -2,7 +2,6 @@ package internal
 
 import (
 	"fmt"
-	"sync"
 
 	"github.com/cilium/ebpf/internal/unix"
 )
@@ -15,14 +14,6 @@ const (
 	MagicKernelVersion = 0xFFFFFFFE
 )
 
-var (
-	kernelVersion = struct {
-		once    sync.Once
-		version Version
-		err     error
-	}{}
-)
-
 // A Version in the form Major.Minor.Patch.
 type Version [3]uint16
 
@@ -88,16 +79,9 @@ func (v Version) Kernel() uint32 {
 }
 
 // KernelVersion returns the version of the currently running kernel.
-func KernelVersion() (Version, error) {
-	kernelVersion.once.Do(func() {
-		kernelVersion.version, kernelVersion.err = detectKernelVersion()
-	})
-
-	if kernelVersion.err != nil {
-		return Version{}, kernelVersion.err
-	}
-	return kernelVersion.version, nil
-}
+var KernelVersion = Memoize(func() (Version, error) {
+	return detectKernelVersion()
+})
 
 // detectKernelVersion returns the version of the running kernel.
 func detectKernelVersion() (Version, error) {
diff --git a/vendor/github.com/cilium/ebpf/link/cgroup.go b/vendor/github.com/cilium/ebpf/link/cgroup.go
index bfad1cce..58e85fe9 100644
--- a/vendor/github.com/cilium/ebpf/link/cgroup.go
+++ b/vendor/github.com/cilium/ebpf/link/cgroup.go
@@ -10,10 +10,15 @@ import (
 
 type cgroupAttachFlags uint32
 
-// cgroup attach flags
 const (
+	// Allow programs attached to sub-cgroups to override the verdict of this
+	// program.
 	flagAllowOverride cgroupAttachFlags = 1 << iota
+	// Allow attaching multiple programs to the cgroup. Only works if the cgroup
+	// has zero or more programs attached using the Multi flag. Implies override.
 	flagAllowMulti
+	// Set automatically by progAttachCgroup.Update(). Used for updating a
+	// specific given program attached in multi-mode.
 	flagReplace
 )
 
@@ -27,29 +32,39 @@ type CgroupOptions struct {
 }
 
 // AttachCgroup links a BPF program to a cgroup.
-func AttachCgroup(opts CgroupOptions) (Link, error) {
+//
+// If the running kernel doesn't support bpf_link, attempts to emulate its
+// semantics using the legacy PROG_ATTACH mechanism. If bpf_link is not
+// available, the returned [Link] will not support pinning to bpffs.
+//
+// If you need more control over attachment flags or the attachment mechanism
+// used, look at [RawAttachProgram] and [AttachRawLink] instead.
+func AttachCgroup(opts CgroupOptions) (cg Link, err error) {
 	cgroup, err := os.Open(opts.Path)
 	if err != nil {
 		return nil, fmt.Errorf("can't open cgroup: %s", err)
 	}
-
-	clone, err := opts.Program.Clone()
-	if err != nil {
+	defer func() {
+		if _, ok := cg.(*progAttachCgroup); ok {
+			// Skip closing the cgroup handle if we return a valid progAttachCgroup,
+			// where the handle is retained to implement Update().
+			return
+		}
 		cgroup.Close()
-		return nil, err
+	}()
+
+	cg, err = newLinkCgroup(cgroup, opts.Attach, opts.Program)
+	if err == nil {
+		return cg, nil
 	}
 
-	var cg Link
-	cg, err = newLinkCgroup(cgroup, opts.Attach, clone)
 	if errors.Is(err, ErrNotSupported) {
-		cg, err = newProgAttachCgroup(cgroup, opts.Attach, clone, flagAllowMulti)
+		cg, err = newProgAttachCgroup(cgroup, opts.Attach, opts.Program, flagAllowMulti)
 	}
 	if errors.Is(err, ErrNotSupported) {
-		cg, err = newProgAttachCgroup(cgroup, opts.Attach, clone, flagAllowOverride)
+		cg, err = newProgAttachCgroup(cgroup, opts.Attach, opts.Program, flagAllowOverride)
 	}
 	if err != nil {
-		cgroup.Close()
-		clone.Close()
 		return nil, err
 	}
 
@@ -67,6 +82,8 @@ var _ Link = (*progAttachCgroup)(nil)
 
 func (cg *progAttachCgroup) isLink() {}
 
+// newProgAttachCgroup attaches prog to cgroup using BPF_PROG_ATTACH.
+// cgroup and prog are retained by [progAttachCgroup].
 func newProgAttachCgroup(cgroup *os.File, attach ebpf.AttachType, prog *ebpf.Program, flags cgroupAttachFlags) (*progAttachCgroup, error) {
 	if flags&flagAllowMulti > 0 {
 		if err := haveProgAttachReplace(); err != nil {
@@ -74,17 +91,24 @@ func newProgAttachCgroup(cgroup *os.File, attach ebpf.AttachType, prog *ebpf.Pro
 		}
 	}
 
-	err := RawAttachProgram(RawAttachProgramOptions{
+	// Use a program handle that cannot be closed by the caller.
+	clone, err := prog.Clone()
+	if err != nil {
+		return nil, err
+	}
+
+	err = RawAttachProgram(RawAttachProgramOptions{
 		Target:  int(cgroup.Fd()),
-		Program: prog,
+		Program: clone,
 		Flags:   uint32(flags),
 		Attach:  attach,
 	})
 	if err != nil {
+		clone.Close()
 		return nil, fmt.Errorf("cgroup: %w", err)
 	}
 
-	return &progAttachCgroup{cgroup, prog, attach, flags}, nil
+	return &progAttachCgroup{cgroup, clone, attach, flags}, nil
 }
 
 func (cg *progAttachCgroup) Close() error {
@@ -151,6 +175,7 @@ type linkCgroup struct {
 
 var _ Link = (*linkCgroup)(nil)
 
+// newLinkCgroup attaches prog to cgroup using BPF_LINK_CREATE.
 func newLinkCgroup(cgroup *os.File, attach ebpf.AttachType, prog *ebpf.Program) (*linkCgroup, error) {
 	link, err := AttachRawLink(RawLinkOptions{
 		Target:  int(cgroup.Fd()),
diff --git a/vendor/github.com/cilium/ebpf/link/kprobe.go b/vendor/github.com/cilium/ebpf/link/kprobe.go
index 9ce7eb4a..b54ca908 100644
--- a/vendor/github.com/cilium/ebpf/link/kprobe.go
+++ b/vendor/github.com/cilium/ebpf/link/kprobe.go
@@ -1,34 +1,20 @@
 package link
 
 import (
-	"crypto/rand"
 	"errors"
 	"fmt"
 	"os"
-	"path/filepath"
 	"runtime"
 	"strings"
-	"syscall"
 	"unsafe"
 
 	"github.com/cilium/ebpf"
+	"github.com/cilium/ebpf/internal"
 	"github.com/cilium/ebpf/internal/sys"
+	"github.com/cilium/ebpf/internal/tracefs"
 	"github.com/cilium/ebpf/internal/unix"
 )
 
-var (
-	kprobeEventsPath = filepath.Join(tracefsPath, "kprobe_events")
-)
-
-type probeType uint8
-
-type probeArgs struct {
-	symbol, group, path          string
-	offset, refCtrOffset, cookie uint64
-	pid, retprobeMaxActive       int
-	ret                          bool
-}
-
 // KprobeOptions defines additional parameters that will be used
 // when loading Kprobes.
 type KprobeOptions struct {
@@ -47,38 +33,17 @@ type KprobeOptions struct {
 	// Deprecated: this setting forces the use of an outdated kernel API and is not portable
 	// across kernel versions.
 	RetprobeMaxActive int
+	// Prefix used for the event name if the kprobe must be attached using tracefs.
+	// The group name will be formatted as `<prefix>_<randomstr>`.
+	// The default empty string is equivalent to "ebpf" as the prefix.
+	TraceFSPrefix string
 }
 
-const (
-	kprobeType probeType = iota
-	uprobeType
-)
-
-func (pt probeType) String() string {
-	if pt == kprobeType {
-		return "kprobe"
-	}
-	return "uprobe"
-}
-
-func (pt probeType) EventsPath() string {
-	if pt == kprobeType {
-		return kprobeEventsPath
+func (ko *KprobeOptions) cookie() uint64 {
+	if ko == nil {
+		return 0
 	}
-	return uprobeEventsPath
-}
-
-func (pt probeType) PerfEventType(ret bool) perfEventType {
-	if pt == kprobeType {
-		if ret {
-			return kretprobeEvent
-		}
-		return kprobeEvent
-	}
-	if ret {
-		return uretprobeEvent
-	}
-	return uprobeEvent
+	return ko.Cookie
 }
 
 // Kprobe attaches the given eBPF program to a perf event that fires when the
@@ -90,13 +55,17 @@ func (pt probeType) PerfEventType(ret bool) perfEventType {
 // Losing the reference to the resulting Link (kp) will close the Kprobe
 // and prevent further execution of prog. The Link must be Closed during
 // program shutdown to avoid leaking system resources.
+//
+// If attaching to symbol fails, automatically retries with the running
+// platform's syscall prefix (e.g. __x64_) to support attaching to syscalls
+// in a portable fashion.
 func Kprobe(symbol string, prog *ebpf.Program, opts *KprobeOptions) (Link, error) {
 	k, err := kprobe(symbol, prog, opts, false)
 	if err != nil {
 		return nil, err
 	}
 
-	lnk, err := attachPerfEvent(k, prog)
+	lnk, err := attachPerfEvent(k, prog, opts.cookie())
 	if err != nil {
 		k.Close()
 		return nil, err
@@ -115,6 +84,10 @@ func Kprobe(symbol string, prog *ebpf.Program, opts *KprobeOptions) (Link, error
 // and prevent further execution of prog. The Link must be Closed during
 // program shutdown to avoid leaking system resources.
 //
+// If attaching to symbol fails, automatically retries with the running
+// platform's syscall prefix (e.g. __x64_) to support attaching to syscalls
+// in a portable fashion.
+//
 // On kernels 5.10 and earlier, setting a kretprobe on a nonexistent symbol
 // incorrectly returns unix.EINVAL instead of os.ErrNotExist.
 func Kretprobe(symbol string, prog *ebpf.Program, opts *KprobeOptions) (Link, error) {
@@ -123,7 +96,7 @@ func Kretprobe(symbol string, prog *ebpf.Program, opts *KprobeOptions) (Link, er
 		return nil, err
 	}
 
-	lnk, err := attachPerfEvent(k, prog)
+	lnk, err := attachPerfEvent(k, prog, opts.cookie())
 	if err != nil {
 		k.Close()
 		return nil, err
@@ -175,51 +148,51 @@ func kprobe(symbol string, prog *ebpf.Program, opts *KprobeOptions, ret bool) (*
 		return nil, fmt.Errorf("eBPF program type %s is not a Kprobe: %w", prog.Type(), errInvalidInput)
 	}
 
-	args := probeArgs{
-		pid:    perfAllThreads,
-		symbol: symbol,
-		ret:    ret,
+	args := tracefs.ProbeArgs{
+		Type:   tracefs.Kprobe,
+		Pid:    perfAllThreads,
+		Symbol: symbol,
+		Ret:    ret,
 	}
 
 	if opts != nil {
-		args.retprobeMaxActive = opts.RetprobeMaxActive
-		args.cookie = opts.Cookie
-		args.offset = opts.Offset
+		args.RetprobeMaxActive = opts.RetprobeMaxActive
+		args.Cookie = opts.Cookie
+		args.Offset = opts.Offset
+		args.Group = opts.TraceFSPrefix
 	}
 
 	// Use kprobe PMU if the kernel has it available.
-	tp, err := pmuKprobe(args)
+	tp, err := pmuProbe(args)
 	if errors.Is(err, os.ErrNotExist) || errors.Is(err, unix.EINVAL) {
-		args.symbol = platformPrefix(symbol)
-		tp, err = pmuKprobe(args)
+		if prefix := internal.PlatformPrefix(); prefix != "" {
+			args.Symbol = prefix + symbol
+			tp, err = pmuProbe(args)
+		}
 	}
 	if err == nil {
 		return tp, nil
 	}
 	if err != nil && !errors.Is(err, ErrNotSupported) {
-		return nil, fmt.Errorf("creating perf_kprobe PMU: %w", err)
+		return nil, fmt.Errorf("creating perf_kprobe PMU (arch-specific fallback for %q): %w", symbol, err)
 	}
 
 	// Use tracefs if kprobe PMU is missing.
-	args.symbol = symbol
-	tp, err = tracefsKprobe(args)
+	args.Symbol = symbol
+	tp, err = tracefsProbe(args)
 	if errors.Is(err, os.ErrNotExist) || errors.Is(err, unix.EINVAL) {
-		args.symbol = platformPrefix(symbol)
-		tp, err = tracefsKprobe(args)
+		if prefix := internal.PlatformPrefix(); prefix != "" {
+			args.Symbol = prefix + symbol
+			tp, err = tracefsProbe(args)
+		}
 	}
 	if err != nil {
-		return nil, fmt.Errorf("creating trace event '%s' in tracefs: %w", symbol, err)
+		return nil, fmt.Errorf("creating tracefs event (arch-specific fallback for %q): %w", symbol, err)
 	}
 
 	return tp, nil
 }
 
-// pmuKprobe opens a perf event based on the kprobe PMU.
-// Returns os.ErrNotExist if the given symbol does not exist in the kernel.
-func pmuKprobe(args probeArgs) (*perfEvent, error) {
-	return pmuProbe(kprobeType, args)
-}
-
 // pmuProbe opens a perf event based on a Performance Monitoring Unit.
 //
 // Requires at least a 4.17 kernel.
@@ -227,25 +200,25 @@ func pmuKprobe(args probeArgs) (*perfEvent, error) {
 // 33ea4b24277b "perf/core: Implement the 'perf_uprobe' PMU"
 //
 // Returns ErrNotSupported if the kernel doesn't support perf_[k,u]probe PMU
-func pmuProbe(typ probeType, args probeArgs) (*perfEvent, error) {
+func pmuProbe(args tracefs.ProbeArgs) (*perfEvent, error) {
 	// Getting the PMU type will fail if the kernel doesn't support
 	// the perf_[k,u]probe PMU.
-	et, err := readUint64FromFileOnce("%d\n", "/sys/bus/event_source/devices", typ.String(), "type")
+	eventType, err := internal.ReadUint64FromFileOnce("%d\n", "/sys/bus/event_source/devices", args.Type.String(), "type")
 	if errors.Is(err, os.ErrNotExist) {
-		return nil, fmt.Errorf("%s: %w", typ, ErrNotSupported)
+		return nil, fmt.Errorf("%s: %w", args.Type, ErrNotSupported)
 	}
 	if err != nil {
 		return nil, err
 	}
 
 	// Use tracefs if we want to set kretprobe's retprobeMaxActive.
-	if args.retprobeMaxActive != 0 {
+	if args.RetprobeMaxActive != 0 {
 		return nil, fmt.Errorf("pmu probe: non-zero retprobeMaxActive: %w", ErrNotSupported)
 	}
 
 	var config uint64
-	if args.ret {
-		bit, err := readUint64FromFileOnce("config:%d\n", "/sys/bus/event_source/devices", typ.String(), "/format/retprobe")
+	if args.Ret {
+		bit, err := internal.ReadUint64FromFileOnce("config:%d\n", "/sys/bus/event_source/devices", args.Type.String(), "/format/retprobe")
 		if err != nil {
 			return nil, err
 		}
@@ -257,36 +230,36 @@ func pmuProbe(typ probeType, args probeArgs) (*perfEvent, error) {
 		sp    unsafe.Pointer
 		token string
 	)
-	switch typ {
-	case kprobeType:
+	switch args.Type {
+	case tracefs.Kprobe:
 		// Create a pointer to a NUL-terminated string for the kernel.
-		sp, err = unsafeStringPtr(args.symbol)
+		sp, err = unsafeStringPtr(args.Symbol)
 		if err != nil {
 			return nil, err
 		}
 
-		token = kprobeToken(args)
+		token = tracefs.KprobeToken(args)
 
 		attr = unix.PerfEventAttr{
 			// The minimum size required for PMU kprobes is PERF_ATTR_SIZE_VER1,
 			// since it added the config2 (Ext2) field. Use Ext2 as probe_offset.
 			Size:   unix.PERF_ATTR_SIZE_VER1,
-			Type:   uint32(et),          // PMU event type read from sysfs
+			Type:   uint32(eventType),   // PMU event type read from sysfs
 			Ext1:   uint64(uintptr(sp)), // Kernel symbol to trace
-			Ext2:   args.offset,         // Kernel symbol offset
+			Ext2:   args.Offset,         // Kernel symbol offset
 			Config: config,              // Retprobe flag
 		}
-	case uprobeType:
-		sp, err = unsafeStringPtr(args.path)
+	case tracefs.Uprobe:
+		sp, err = unsafeStringPtr(args.Path)
 		if err != nil {
 			return nil, err
 		}
 
-		if args.refCtrOffset != 0 {
-			config |= args.refCtrOffset << uprobeRefCtrOffsetShift
+		if args.RefCtrOffset != 0 {
+			config |= args.RefCtrOffset << uprobeRefCtrOffsetShift
 		}
 
-		token = uprobeToken(args)
+		token = tracefs.UprobeToken(args)
 
 		attr = unix.PerfEventAttr{
 			// The minimum size required for PMU uprobes is PERF_ATTR_SIZE_VER1,
@@ -294,19 +267,19 @@ func pmuProbe(typ probeType, args probeArgs) (*perfEvent, error) {
 			// size of the internal buffer the kernel allocates for reading the
 			// perf_event_attr argument from userspace.
 			Size:   unix.PERF_ATTR_SIZE_VER1,
-			Type:   uint32(et),          // PMU event type read from sysfs
+			Type:   uint32(eventType),   // PMU event type read from sysfs
 			Ext1:   uint64(uintptr(sp)), // Uprobe path
-			Ext2:   args.offset,         // Uprobe offset
+			Ext2:   args.Offset,         // Uprobe offset
 			Config: config,              // RefCtrOffset, Retprobe flag
 		}
 	}
 
-	rawFd, err := unix.PerfEventOpen(&attr, args.pid, 0, -1, unix.PERF_FLAG_FD_CLOEXEC)
+	rawFd, err := unix.PerfEventOpen(&attr, args.Pid, 0, -1, unix.PERF_FLAG_FD_CLOEXEC)
 
 	// On some old kernels, kprobe PMU doesn't allow `.` in symbol names and
 	// return -EINVAL. Return ErrNotSupported to allow falling back to tracefs.
 	// https://github.com/torvalds/linux/blob/94710cac0ef4/kernel/trace/trace_kprobe.c#L340-L343
-	if errors.Is(err, unix.EINVAL) && strings.Contains(args.symbol, ".") {
+	if errors.Is(err, unix.EINVAL) && strings.Contains(args.Symbol, ".") {
 		return nil, fmt.Errorf("token %s: older kernels don't accept dots: %w", token, ErrNotSupported)
 	}
 	// Since commit 97c753e62e6c, ENOENT is correctly returned instead of EINVAL
@@ -323,7 +296,7 @@ func pmuProbe(typ probeType, args probeArgs) (*perfEvent, error) {
 	// Since at least commit cb9a19fe4aa51, ENOTSUPP is returned
 	// when attempting to set a uprobe on a trap instruction.
 	if errors.Is(err, sys.ENOTSUPP) {
-		return nil, fmt.Errorf("token %s: failed setting uprobe on offset %#x (possible trap insn): %w", token, args.offset, err)
+		return nil, fmt.Errorf("token %s: failed setting uprobe on offset %#x (possible trap insn): %w", token, args.Offset, err)
 	}
 
 	if err != nil {
@@ -339,18 +312,7 @@ func pmuProbe(typ probeType, args probeArgs) (*perfEvent, error) {
 	}
 
 	// Kernel has perf_[k,u]probe PMU available, initialize perf event.
-	return &perfEvent{
-		typ:    typ.PerfEventType(args.ret),
-		name:   args.symbol,
-		pmuID:  et,
-		cookie: args.cookie,
-		fd:     fd,
-	}, nil
-}
-
-// tracefsKprobe creates a Kprobe tracefs entry.
-func tracefsKprobe(args probeArgs) (*perfEvent, error) {
-	return tracefsProbe(kprobeType, args)
+	return newPerfEvent(fd, nil), nil
 }
 
 // tracefsProbe creates a trace event by writing an entry to <tracefs>/[k,u]probe_events.
@@ -359,216 +321,37 @@ func tracefsKprobe(args probeArgs) (*perfEvent, error) {
 // Path and offset are only set in the case of uprobe(s) and are used to set
 // the executable/library path on the filesystem and the offset where the probe is inserted.
 // A perf event is then opened on the newly-created trace event and returned to the caller.
-func tracefsProbe(typ probeType, args probeArgs) (*perfEvent, error) {
+func tracefsProbe(args tracefs.ProbeArgs) (*perfEvent, error) {
+	groupPrefix := "ebpf"
+	if args.Group != "" {
+		groupPrefix = args.Group
+	}
+
 	// Generate a random string for each trace event we attempt to create.
 	// This value is used as the 'group' token in tracefs to allow creating
 	// multiple kprobe trace events with the same name.
-	group, err := randomGroup("ebpf")
+	group, err := tracefs.RandomGroup(groupPrefix)
 	if err != nil {
 		return nil, fmt.Errorf("randomizing group name: %w", err)
 	}
-	args.group = group
+	args.Group = group
 
 	// Create the [k,u]probe trace event using tracefs.
-	tid, err := createTraceFSProbeEvent(typ, args)
+	evt, err := tracefs.NewEvent(args)
 	if err != nil {
 		return nil, fmt.Errorf("creating probe entry on tracefs: %w", err)
 	}
 
 	// Kprobes are ephemeral tracepoints and share the same perf event type.
-	fd, err := openTracepointPerfEvent(tid, args.pid)
+	fd, err := openTracepointPerfEvent(evt.ID(), args.Pid)
 	if err != nil {
 		// Make sure we clean up the created tracefs event when we return error.
 		// If a livepatch handler is already active on the symbol, the write to
 		// tracefs will succeed, a trace event will show up, but creating the
 		// perf event will fail with EBUSY.
-		_ = closeTraceFSProbeEvent(typ, args.group, args.symbol)
+		_ = evt.Close()
 		return nil, err
 	}
 
-	return &perfEvent{
-		typ:       typ.PerfEventType(args.ret),
-		group:     group,
-		name:      args.symbol,
-		tracefsID: tid,
-		cookie:    args.cookie,
-		fd:        fd,
-	}, nil
-}
-
-var errInvalidMaxActive = errors.New("can only set maxactive on kretprobes")
-
-// createTraceFSProbeEvent creates a new ephemeral trace event.
-//
-// Returns os.ErrNotExist if symbol is not a valid
-// kernel symbol, or if it is not traceable with kprobes. Returns os.ErrExist
-// if a probe with the same group and symbol already exists. Returns an error if
-// args.retprobeMaxActive is used on non kprobe types. Returns ErrNotSupported if
-// the kernel is too old to support kretprobe maxactive.
-func createTraceFSProbeEvent(typ probeType, args probeArgs) (uint64, error) {
-	// Before attempting to create a trace event through tracefs,
-	// check if an event with the same group and name already exists.
-	// Kernels 4.x and earlier don't return os.ErrExist on writing a duplicate
-	// entry, so we need to rely on reads for detecting uniqueness.
-	_, err := getTraceEventID(args.group, args.symbol)
-	if err == nil {
-		return 0, fmt.Errorf("trace event %s/%s: %w", args.group, args.symbol, os.ErrExist)
-	}
-	if err != nil && !errors.Is(err, os.ErrNotExist) {
-		return 0, fmt.Errorf("checking trace event %s/%s: %w", args.group, args.symbol, err)
-	}
-
-	// Open the kprobe_events file in tracefs.
-	f, err := os.OpenFile(typ.EventsPath(), os.O_APPEND|os.O_WRONLY, 0666)
-	if err != nil {
-		return 0, fmt.Errorf("error opening '%s': %w", typ.EventsPath(), err)
-	}
-	defer f.Close()
-
-	var pe, token string
-	switch typ {
-	case kprobeType:
-		// The kprobe_events syntax is as follows (see Documentation/trace/kprobetrace.txt):
-		// p[:[GRP/]EVENT] [MOD:]SYM[+offs]|MEMADDR [FETCHARGS] : Set a probe
-		// r[MAXACTIVE][:[GRP/]EVENT] [MOD:]SYM[+0] [FETCHARGS] : Set a return probe
-		// -:[GRP/]EVENT                                        : Clear a probe
-		//
-		// Some examples:
-		// r:ebpf_1234/r_my_kretprobe nf_conntrack_destroy
-		// p:ebpf_5678/p_my_kprobe __x64_sys_execve
-		//
-		// Leaving the kretprobe's MAXACTIVE set to 0 (or absent) will make the
-		// kernel default to NR_CPUS. This is desired in most eBPF cases since
-		// subsampling or rate limiting logic can be more accurately implemented in
-		// the eBPF program itself.
-		// See Documentation/kprobes.txt for more details.
-		if args.retprobeMaxActive != 0 && !args.ret {
-			return 0, errInvalidMaxActive
-		}
-		token = kprobeToken(args)
-		pe = fmt.Sprintf("%s:%s/%s %s", probePrefix(args.ret, args.retprobeMaxActive), args.group, sanitizeSymbol(args.symbol), token)
-	case uprobeType:
-		// The uprobe_events syntax is as follows:
-		// p[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS] : Set a probe
-		// r[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS] : Set a return probe
-		// -:[GRP/]EVENT                           : Clear a probe
-		//
-		// Some examples:
-		// r:ebpf_1234/readline /bin/bash:0x12345
-		// p:ebpf_5678/main_mySymbol /bin/mybin:0x12345(0x123)
-		//
-		// See Documentation/trace/uprobetracer.txt for more details.
-		if args.retprobeMaxActive != 0 {
-			return 0, errInvalidMaxActive
-		}
-		token = uprobeToken(args)
-		pe = fmt.Sprintf("%s:%s/%s %s", probePrefix(args.ret, 0), args.group, args.symbol, token)
-	}
-	_, err = f.WriteString(pe)
-
-	// Since commit 97c753e62e6c, ENOENT is correctly returned instead of EINVAL
-	// when trying to create a retprobe for a missing symbol.
-	if errors.Is(err, os.ErrNotExist) {
-		return 0, fmt.Errorf("token %s: not found: %w", token, err)
-	}
-	// Since commit ab105a4fb894, EILSEQ is returned when a kprobe sym+offset is resolved
-	// to an invalid insn boundary. The exact conditions that trigger this error are
-	// arch specific however.
-	if errors.Is(err, syscall.EILSEQ) {
-		return 0, fmt.Errorf("token %s: bad insn boundary: %w", token, os.ErrNotExist)
-	}
-	// ERANGE is returned when the `SYM[+offs]` token is too big and cannot
-	// be resolved.
-	if errors.Is(err, syscall.ERANGE) {
-		return 0, fmt.Errorf("token %s: offset too big: %w", token, os.ErrNotExist)
-	}
-
-	if err != nil {
-		return 0, fmt.Errorf("token %s: writing '%s': %w", token, pe, err)
-	}
-
-	// Get the newly-created trace event's id.
-	tid, err := getTraceEventID(args.group, args.symbol)
-	if args.retprobeMaxActive != 0 && errors.Is(err, os.ErrNotExist) {
-		// Kernels < 4.12 don't support maxactive and therefore auto generate
-		// group and event names from the symbol and offset. The symbol is used
-		// without any sanitization.
-		// See https://elixir.bootlin.com/linux/v4.10/source/kernel/trace/trace_kprobe.c#L712
-		event := fmt.Sprintf("kprobes/r_%s_%d", args.symbol, args.offset)
-		if err := removeTraceFSProbeEvent(typ, event); err != nil {
-			return 0, fmt.Errorf("failed to remove spurious maxactive event: %s", err)
-		}
-		return 0, fmt.Errorf("create trace event with non-default maxactive: %w", ErrNotSupported)
-	}
-	if err != nil {
-		return 0, fmt.Errorf("get trace event id: %w", err)
-	}
-
-	return tid, nil
-}
-
-// closeTraceFSProbeEvent removes the [k,u]probe with the given type, group and symbol
-// from <tracefs>/[k,u]probe_events.
-func closeTraceFSProbeEvent(typ probeType, group, symbol string) error {
-	pe := fmt.Sprintf("%s/%s", group, sanitizeSymbol(symbol))
-	return removeTraceFSProbeEvent(typ, pe)
-}
-
-func removeTraceFSProbeEvent(typ probeType, pe string) error {
-	f, err := os.OpenFile(typ.EventsPath(), os.O_APPEND|os.O_WRONLY, 0666)
-	if err != nil {
-		return fmt.Errorf("error opening %s: %w", typ.EventsPath(), err)
-	}
-	defer f.Close()
-
-	// See [k,u]probe_events syntax above. The probe type does not need to be specified
-	// for removals.
-	if _, err = f.WriteString("-:" + pe); err != nil {
-		return fmt.Errorf("remove event %q from %s: %w", pe, typ.EventsPath(), err)
-	}
-
-	return nil
-}
-
-// randomGroup generates a pseudorandom string for use as a tracefs group name.
-// Returns an error when the output string would exceed 63 characters (kernel
-// limitation), when rand.Read() fails or when prefix contains characters not
-// allowed by isValidTraceID.
-func randomGroup(prefix string) (string, error) {
-	if !isValidTraceID(prefix) {
-		return "", fmt.Errorf("prefix '%s' must be alphanumeric or underscore: %w", prefix, errInvalidInput)
-	}
-
-	b := make([]byte, 8)
-	if _, err := rand.Read(b); err != nil {
-		return "", fmt.Errorf("reading random bytes: %w", err)
-	}
-
-	group := fmt.Sprintf("%s_%x", prefix, b)
-	if len(group) > 63 {
-		return "", fmt.Errorf("group name '%s' cannot be longer than 63 characters: %w", group, errInvalidInput)
-	}
-
-	return group, nil
-}
-
-func probePrefix(ret bool, maxActive int) string {
-	if ret {
-		if maxActive > 0 {
-			return fmt.Sprintf("r%d", maxActive)
-		}
-		return "r"
-	}
-	return "p"
-}
-
-// kprobeToken creates the SYM[+offs] token for the tracefs api.
-func kprobeToken(args probeArgs) string {
-	po := args.symbol
-
-	if args.offset != 0 {
-		po += fmt.Sprintf("+%#x", args.offset)
-	}
-
-	return po
+	return newPerfEvent(fd, evt), nil
 }
diff --git a/vendor/github.com/cilium/ebpf/link/kprobe_multi.go b/vendor/github.com/cilium/ebpf/link/kprobe_multi.go
index 151f47d6..697c6d73 100644
--- a/vendor/github.com/cilium/ebpf/link/kprobe_multi.go
+++ b/vendor/github.com/cilium/ebpf/link/kprobe_multi.go
@@ -28,7 +28,7 @@ type KprobeMultiOptions struct {
 	// limits the attach point to the function entry or return.
 	//
 	// Mutually exclusive with Symbols.
-	Addresses []uint64
+	Addresses []uintptr
 
 	// Cookies specifies arbitrary values that can be fetched from an eBPF
 	// program via `bpf_get_attach_cookie()`.
diff --git a/vendor/github.com/cilium/ebpf/link/link.go b/vendor/github.com/cilium/ebpf/link/link.go
index d4eeb92d..36acd6ee 100644
--- a/vendor/github.com/cilium/ebpf/link/link.go
+++ b/vendor/github.com/cilium/ebpf/link/link.go
@@ -46,6 +46,18 @@ type Link interface {
 	isLink()
 }
 
+// NewLinkFromFD creates a link from a raw fd.
+//
+// You should not use fd after calling this function.
+func NewLinkFromFD(fd int) (Link, error) {
+	sysFD, err := sys.NewFD(fd)
+	if err != nil {
+		return nil, err
+	}
+
+	return wrapRawLink(&RawLink{fd: sysFD})
+}
+
 // LoadPinnedLink loads a link that was persisted into a bpffs.
 func LoadPinnedLink(fileName string, opts *ebpf.LoadPinOptions) (Link, error) {
 	raw, err := loadPinnedRawLink(fileName, opts)
@@ -59,10 +71,15 @@ func LoadPinnedLink(fileName string, opts *ebpf.LoadPinOptions) (Link, error) {
 // wrap a RawLink in a more specific type if possible.
 //
 // The function takes ownership of raw and closes it on error.
-func wrapRawLink(raw *RawLink) (Link, error) {
+func wrapRawLink(raw *RawLink) (_ Link, err error) {
+	defer func() {
+		if err != nil {
+			raw.Close()
+		}
+	}()
+
 	info, err := raw.Info()
 	if err != nil {
-		raw.Close()
 		return nil, err
 	}
 
@@ -77,6 +94,10 @@ func wrapRawLink(raw *RawLink) (Link, error) {
 		return &Iter{*raw}, nil
 	case NetNsType:
 		return &NetNsLink{*raw}, nil
+	case KprobeMultiType:
+		return &kprobeMultiLink{*raw}, nil
+	case PerfEventType:
+		return nil, fmt.Errorf("recovering perf event fd: %w", ErrNotSupported)
 	default:
 		return raw, nil
 	}
@@ -172,7 +193,7 @@ func AttachRawLink(opts RawLinkOptions) (*RawLink, error) {
 		TargetFd:    uint32(opts.Target),
 		ProgFd:      uint32(progFd),
 		AttachType:  sys.AttachType(opts.Attach),
-		TargetBtfId: uint32(opts.BTF),
+		TargetBtfId: opts.BTF,
 		Flags:       opts.Flags,
 	}
 	fd, err := sys.LinkCreate(&attr)
diff --git a/vendor/github.com/cilium/ebpf/link/perf_event.go b/vendor/github.com/cilium/ebpf/link/perf_event.go
index 61f80627..5f7a628b 100644
--- a/vendor/github.com/cilium/ebpf/link/perf_event.go
+++ b/vendor/github.com/cilium/ebpf/link/perf_event.go
@@ -1,20 +1,16 @@
 package link
 
 import (
-	"bytes"
 	"errors"
 	"fmt"
-	"os"
-	"path/filepath"
 	"runtime"
-	"strings"
-	"sync"
 	"unsafe"
 
 	"github.com/cilium/ebpf"
 	"github.com/cilium/ebpf/asm"
 	"github.com/cilium/ebpf/internal"
 	"github.com/cilium/ebpf/internal/sys"
+	"github.com/cilium/ebpf/internal/tracefs"
 	"github.com/cilium/ebpf/internal/unix"
 )
 
@@ -42,67 +38,41 @@ import (
 //   stops any further invocations of the attached eBPF program.
 
 var (
-	tracefsPath = "/sys/kernel/debug/tracing"
-
-	errInvalidInput = errors.New("invalid input")
+	errInvalidInput = tracefs.ErrInvalidInput
 )
 
 const (
 	perfAllThreads = -1
 )
 
-type perfEventType uint8
-
-const (
-	tracepointEvent perfEventType = iota
-	kprobeEvent
-	kretprobeEvent
-	uprobeEvent
-	uretprobeEvent
-)
-
 // A perfEvent represents a perf event kernel object. Exactly one eBPF program
 // can be attached to it. It is created based on a tracefs trace event or a
 // Performance Monitoring Unit (PMU).
 type perfEvent struct {
-	// The event type determines the types of programs that can be attached.
-	typ perfEventType
-
-	// Group and name of the tracepoint/kprobe/uprobe.
-	group string
-	name  string
-
-	// PMU event ID read from sysfs. Valid IDs are non-zero.
-	pmuID uint64
-	// ID of the trace event read from tracefs. Valid IDs are non-zero.
-	tracefsID uint64
-
-	// User provided arbitrary value.
-	cookie uint64
+	// Trace event backing this perfEvent. May be nil.
+	tracefsEvent *tracefs.Event
 
 	// This is the perf event FD.
 	fd *sys.FD
 }
 
+func newPerfEvent(fd *sys.FD, event *tracefs.Event) *perfEvent {
+	pe := &perfEvent{event, fd}
+	// Both event and fd have their own finalizer, but we want to
+	// guarantee that they are closed in a certain order.
+	runtime.SetFinalizer(pe, (*perfEvent).Close)
+	return pe
+}
+
 func (pe *perfEvent) Close() error {
+	runtime.SetFinalizer(pe, nil)
+
 	if err := pe.fd.Close(); err != nil {
 		return fmt.Errorf("closing perf event fd: %w", err)
 	}
 
-	switch pe.typ {
-	case kprobeEvent, kretprobeEvent:
-		// Clean up kprobe tracefs entry.
-		if pe.tracefsID != 0 {
-			return closeTraceFSProbeEvent(kprobeType, pe.group, pe.name)
-		}
-	case uprobeEvent, uretprobeEvent:
-		// Clean up uprobe tracefs entry.
-		if pe.tracefsID != 0 {
-			return closeTraceFSProbeEvent(uprobeType, pe.group, pe.name)
-		}
-	case tracepointEvent:
-		// Tracepoint trace events don't hold any extra resources.
-		return nil
+	if pe.tracefsEvent != nil {
+		return pe.tracefsEvent.Close()
 	}
 
 	return nil
@@ -136,10 +106,14 @@ func (pl *perfEventLink) Unpin() error {
 }
 
 func (pl *perfEventLink) Close() error {
+	if err := pl.fd.Close(); err != nil {
+		return fmt.Errorf("perf link close: %w", err)
+	}
+
 	if err := pl.pe.Close(); err != nil {
-		return fmt.Errorf("perf event link close: %w", err)
+		return fmt.Errorf("perf event close: %w", err)
 	}
-	return pl.fd.Close()
+	return nil
 }
 
 func (pl *perfEventLink) Update(prog *ebpf.Program) error {
@@ -183,7 +157,7 @@ func (pi *perfEventIoctl) Info() (*Info, error) {
 // attach the given eBPF prog to the perf event stored in pe.
 // pe must contain a valid perf event fd.
 // prog's type must match the program type stored in pe.
-func attachPerfEvent(pe *perfEvent, prog *ebpf.Program) (Link, error) {
+func attachPerfEvent(pe *perfEvent, prog *ebpf.Program, cookie uint64) (Link, error) {
 	if prog == nil {
 		return nil, errors.New("cannot attach a nil program")
 	}
@@ -191,30 +165,18 @@ func attachPerfEvent(pe *perfEvent, prog *ebpf.Program) (Link, error) {
 		return nil, fmt.Errorf("invalid program: %w", sys.ErrClosedFd)
 	}
 
-	switch pe.typ {
-	case kprobeEvent, kretprobeEvent, uprobeEvent, uretprobeEvent:
-		if t := prog.Type(); t != ebpf.Kprobe {
-			return nil, fmt.Errorf("invalid program type (expected %s): %s", ebpf.Kprobe, t)
-		}
-	case tracepointEvent:
-		if t := prog.Type(); t != ebpf.TracePoint {
-			return nil, fmt.Errorf("invalid program type (expected %s): %s", ebpf.TracePoint, t)
-		}
-	default:
-		return nil, fmt.Errorf("unknown perf event type: %d", pe.typ)
+	if err := haveBPFLinkPerfEvent(); err == nil {
+		return attachPerfEventLink(pe, prog, cookie)
 	}
 
-	if err := haveBPFLinkPerfEvent(); err == nil {
-		return attachPerfEventLink(pe, prog)
+	if cookie != 0 {
+		return nil, fmt.Errorf("cookies are not supported: %w", ErrNotSupported)
 	}
+
 	return attachPerfEventIoctl(pe, prog)
 }
 
 func attachPerfEventIoctl(pe *perfEvent, prog *ebpf.Program) (*perfEventIoctl, error) {
-	if pe.cookie != 0 {
-		return nil, fmt.Errorf("cookies are not supported: %w", ErrNotSupported)
-	}
-
 	// Assign the eBPF program to the perf event.
 	err := unix.IoctlSetInt(pe.fd.Int(), unix.PERF_EVENT_IOC_SET_BPF, prog.FD())
 	if err != nil {
@@ -226,32 +188,24 @@ func attachPerfEventIoctl(pe *perfEvent, prog *ebpf.Program) (*perfEventIoctl, e
 		return nil, fmt.Errorf("enable perf event: %s", err)
 	}
 
-	pi := &perfEventIoctl{pe}
-
-	// Close the perf event when its reference is lost to avoid leaking system resources.
-	runtime.SetFinalizer(pi, (*perfEventIoctl).Close)
-	return pi, nil
+	return &perfEventIoctl{pe}, nil
 }
 
 // Use the bpf api to attach the perf event (BPF_LINK_TYPE_PERF_EVENT, 5.15+).
 //
 // https://github.com/torvalds/linux/commit/b89fbfbb854c9afc3047e8273cc3a694650b802e
-func attachPerfEventLink(pe *perfEvent, prog *ebpf.Program) (*perfEventLink, error) {
+func attachPerfEventLink(pe *perfEvent, prog *ebpf.Program, cookie uint64) (*perfEventLink, error) {
 	fd, err := sys.LinkCreatePerfEvent(&sys.LinkCreatePerfEventAttr{
 		ProgFd:     uint32(prog.FD()),
 		TargetFd:   pe.fd.Uint(),
 		AttachType: sys.BPF_PERF_EVENT,
-		BpfCookie:  pe.cookie,
+		BpfCookie:  cookie,
 	})
 	if err != nil {
 		return nil, fmt.Errorf("cannot create bpf perf link: %v", err)
 	}
 
-	pl := &perfEventLink{RawLink{fd: fd}, pe}
-
-	// Close the perf event when its reference is lost to avoid leaking system resources.
-	runtime.SetFinalizer(pl, (*perfEventLink).Close)
-	return pl, nil
+	return &perfEventLink{RawLink{fd: fd}, pe}, nil
 }
 
 // unsafeStringPtr returns an unsafe.Pointer to a NUL-terminated copy of str.
@@ -263,28 +217,6 @@ func unsafeStringPtr(str string) (unsafe.Pointer, error) {
 	return unsafe.Pointer(p), nil
 }
 
-// getTraceEventID reads a trace event's ID from tracefs given its group and name.
-// The kernel requires group and name to be alphanumeric or underscore.
-//
-// name automatically has its invalid symbols converted to underscores so the caller
-// can pass a raw symbol name, e.g. a kernel symbol containing dots.
-func getTraceEventID(group, name string) (uint64, error) {
-	name = sanitizeSymbol(name)
-	path, err := sanitizePath(tracefsPath, "events", group, name, "id")
-	if err != nil {
-		return 0, err
-	}
-	tid, err := readUint64FromFile("%d\n", path)
-	if errors.Is(err, os.ErrNotExist) {
-		return 0, err
-	}
-	if err != nil {
-		return 0, fmt.Errorf("reading trace event ID of %s/%s: %w", group, name, err)
-	}
-
-	return tid, nil
-}
-
 // openTracepointPerfEvent opens a tracepoint-type perf event. System-wide
 // [k,u]probes created by writing to <tracefs>/[k,u]probe_events are tracepoints
 // behind the scenes, and can be attached to using these perf events.
@@ -305,77 +237,6 @@ func openTracepointPerfEvent(tid uint64, pid int) (*sys.FD, error) {
 	return sys.NewFD(fd)
 }
 
-func sanitizePath(base string, path ...string) (string, error) {
-	l := filepath.Join(path...)
-	p := filepath.Join(base, l)
-	if !strings.HasPrefix(p, base) {
-		return "", fmt.Errorf("path '%s' attempts to escape base path '%s': %w", l, base, errInvalidInput)
-	}
-	return p, nil
-}
-
-// readUint64FromFile reads a uint64 from a file.
-//
-// format specifies the contents of the file in fmt.Scanf syntax.
-func readUint64FromFile(format string, path ...string) (uint64, error) {
-	filename := filepath.Join(path...)
-	data, err := os.ReadFile(filename)
-	if err != nil {
-		return 0, fmt.Errorf("reading file %q: %w", filename, err)
-	}
-
-	var value uint64
-	n, err := fmt.Fscanf(bytes.NewReader(data), format, &value)
-	if err != nil {
-		return 0, fmt.Errorf("parsing file %q: %w", filename, err)
-	}
-	if n != 1 {
-		return 0, fmt.Errorf("parsing file %q: expected 1 item, got %d", filename, n)
-	}
-
-	return value, nil
-}
-
-type uint64FromFileKey struct {
-	format, path string
-}
-
-var uint64FromFileCache = struct {
-	sync.RWMutex
-	values map[uint64FromFileKey]uint64
-}{
-	values: map[uint64FromFileKey]uint64{},
-}
-
-// readUint64FromFileOnce is like readUint64FromFile but memoizes the result.
-func readUint64FromFileOnce(format string, path ...string) (uint64, error) {
-	filename := filepath.Join(path...)
-	key := uint64FromFileKey{format, filename}
-
-	uint64FromFileCache.RLock()
-	if value, ok := uint64FromFileCache.values[key]; ok {
-		uint64FromFileCache.RUnlock()
-		return value, nil
-	}
-	uint64FromFileCache.RUnlock()
-
-	value, err := readUint64FromFile(format, filename)
-	if err != nil {
-		return 0, err
-	}
-
-	uint64FromFileCache.Lock()
-	defer uint64FromFileCache.Unlock()
-
-	if value, ok := uint64FromFileCache.values[key]; ok {
-		// Someone else got here before us, use what is cached.
-		return value, nil
-	}
-
-	uint64FromFileCache.values[key] = value
-	return value, nil
-}
-
 // Probe BPF perf link.
 //
 // https://elixir.bootlin.com/linux/v5.16.8/source/kernel/bpf/syscall.c#L4307
@@ -407,28 +268,3 @@ var haveBPFLinkPerfEvent = internal.NewFeatureTest("bpf_link_perf_event", "5.15"
 	}
 	return err
 })
-
-// isValidTraceID implements the equivalent of a regex match
-// against "^[a-zA-Z_][0-9a-zA-Z_]*$".
-//
-// Trace event groups, names and kernel symbols must adhere to this set
-// of characters. Non-empty, first character must not be a number, all
-// characters must be alphanumeric or underscore.
-func isValidTraceID(s string) bool {
-	if len(s) < 1 {
-		return false
-	}
-	for i, c := range []byte(s) {
-		switch {
-		case c >= 'a' && c <= 'z':
-		case c >= 'A' && c <= 'Z':
-		case c == '_':
-		case i > 0 && c >= '0' && c <= '9':
-
-		default:
-			return false
-		}
-	}
-
-	return true
-}
diff --git a/vendor/github.com/cilium/ebpf/link/platform.go b/vendor/github.com/cilium/ebpf/link/platform.go
deleted file mode 100644
index eb6f7b7a..00000000
--- a/vendor/github.com/cilium/ebpf/link/platform.go
+++ /dev/null
@@ -1,25 +0,0 @@
-package link
-
-import (
-	"fmt"
-	"runtime"
-)
-
-func platformPrefix(symbol string) string {
-
-	prefix := runtime.GOARCH
-
-	// per https://github.com/golang/go/blob/master/src/go/build/syslist.go
-	switch prefix {
-	case "386":
-		prefix = "ia32"
-	case "amd64", "amd64p32":
-		prefix = "x64"
-	case "arm64", "arm64be":
-		prefix = "arm64"
-	default:
-		return symbol
-	}
-
-	return fmt.Sprintf("__%s_%s", prefix, symbol)
-}
diff --git a/vendor/github.com/cilium/ebpf/link/query.go b/vendor/github.com/cilium/ebpf/link/query.go
index 8c882414..c0565651 100644
--- a/vendor/github.com/cilium/ebpf/link/query.go
+++ b/vendor/github.com/cilium/ebpf/link/query.go
@@ -21,9 +21,9 @@ type QueryOptions struct {
 
 // QueryPrograms retrieves ProgramIDs associated with the AttachType.
 //
-// It only returns IDs of programs that were attached using PROG_ATTACH and not bpf_link.
-// Returns (nil, nil) if there are no programs attached to the queried kernel resource.
-// Calling QueryPrograms on a kernel missing PROG_QUERY will result in ErrNotSupported.
+// Returns (nil, nil) if there are no programs attached to the queried kernel
+// resource. Calling QueryPrograms on a kernel missing PROG_QUERY will result in
+// ErrNotSupported.
 func QueryPrograms(opts QueryOptions) ([]ebpf.ProgramID, error) {
 	if haveProgQuery() != nil {
 		return nil, fmt.Errorf("can't query program IDs: %w", ErrNotSupported)
diff --git a/vendor/github.com/cilium/ebpf/link/socket_filter.go b/vendor/github.com/cilium/ebpf/link/socket_filter.go
index 94f3958c..84f0b656 100644
--- a/vendor/github.com/cilium/ebpf/link/socket_filter.go
+++ b/vendor/github.com/cilium/ebpf/link/socket_filter.go
@@ -15,7 +15,7 @@ func AttachSocketFilter(conn syscall.Conn, program *ebpf.Program) error {
 	}
 	var ssoErr error
 	err = rawConn.Control(func(fd uintptr) {
-		ssoErr = syscall.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_ATTACH_BPF, program.FD())
+		ssoErr = unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_ATTACH_BPF, program.FD())
 	})
 	if ssoErr != nil {
 		return ssoErr
@@ -31,7 +31,7 @@ func DetachSocketFilter(conn syscall.Conn) error {
 	}
 	var ssoErr error
 	err = rawConn.Control(func(fd uintptr) {
-		ssoErr = syscall.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_DETACH_BPF, 0)
+		ssoErr = unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_DETACH_BPF, 0)
 	})
 	if ssoErr != nil {
 		return ssoErr
diff --git a/vendor/github.com/cilium/ebpf/link/syscalls.go b/vendor/github.com/cilium/ebpf/link/syscalls.go
index 38f7ae9b..c9c998c2 100644
--- a/vendor/github.com/cilium/ebpf/link/syscalls.go
+++ b/vendor/github.com/cilium/ebpf/link/syscalls.go
@@ -46,7 +46,7 @@ var haveProgAttach = internal.NewFeatureTest("BPF_PROG_ATTACH", "4.10", func() e
 	return nil
 })
 
-var haveProgAttachReplace = internal.NewFeatureTest("BPF_PROG_ATTACH atomic replacement", "5.5", func() error {
+var haveProgAttachReplace = internal.NewFeatureTest("BPF_PROG_ATTACH atomic replacement of MULTI progs", "5.5", func() error {
 	if err := haveProgAttach(); err != nil {
 		return err
 	}
diff --git a/vendor/github.com/cilium/ebpf/link/tracepoint.go b/vendor/github.com/cilium/ebpf/link/tracepoint.go
index a59ef9d1..95f5fae3 100644
--- a/vendor/github.com/cilium/ebpf/link/tracepoint.go
+++ b/vendor/github.com/cilium/ebpf/link/tracepoint.go
@@ -4,6 +4,7 @@ import (
 	"fmt"
 
 	"github.com/cilium/ebpf"
+	"github.com/cilium/ebpf/internal/tracefs"
 )
 
 // TracepointOptions defines additional parameters that will be used
@@ -17,7 +18,7 @@ type TracepointOptions struct {
 }
 
 // Tracepoint attaches the given eBPF program to the tracepoint with the given
-// group and name. See /sys/kernel/debug/tracing/events to find available
+// group and name. See /sys/kernel/tracing/events to find available
 // tracepoints. The top-level directory is the group, the event's subdirectory
 // is the name. Example:
 //
@@ -36,14 +37,11 @@ func Tracepoint(group, name string, prog *ebpf.Program, opts *TracepointOptions)
 	if prog == nil {
 		return nil, fmt.Errorf("prog cannot be nil: %w", errInvalidInput)
 	}
-	if !isValidTraceID(group) || !isValidTraceID(name) {
-		return nil, fmt.Errorf("group and name '%s/%s' must be alphanumeric or underscore: %w", group, name, errInvalidInput)
-	}
 	if prog.Type() != ebpf.TracePoint {
 		return nil, fmt.Errorf("eBPF program type %s is not a Tracepoint: %w", prog.Type(), errInvalidInput)
 	}
 
-	tid, err := getTraceEventID(group, name)
+	tid, err := tracefs.EventID(group, name)
 	if err != nil {
 		return nil, err
 	}
@@ -58,16 +56,9 @@ func Tracepoint(group, name string, prog *ebpf.Program, opts *TracepointOptions)
 		cookie = opts.Cookie
 	}
 
-	pe := &perfEvent{
-		typ:       tracepointEvent,
-		group:     group,
-		name:      name,
-		tracefsID: tid,
-		cookie:    cookie,
-		fd:        fd,
-	}
+	pe := newPerfEvent(fd, nil)
 
-	lnk, err := attachPerfEvent(pe, prog)
+	lnk, err := attachPerfEvent(pe, prog, cookie)
 	if err != nil {
 		pe.Close()
 		return nil, err
diff --git a/vendor/github.com/cilium/ebpf/link/tracing.go b/vendor/github.com/cilium/ebpf/link/tracing.go
index e26cc914..1e1a7834 100644
--- a/vendor/github.com/cilium/ebpf/link/tracing.go
+++ b/vendor/github.com/cilium/ebpf/link/tracing.go
@@ -7,6 +7,7 @@ import (
 	"github.com/cilium/ebpf"
 	"github.com/cilium/ebpf/btf"
 	"github.com/cilium/ebpf/internal/sys"
+	"github.com/cilium/ebpf/internal/unix"
 )
 
 type tracing struct {
@@ -48,7 +49,7 @@ func AttachFreplace(targetProg *ebpf.Program, name string, prog *ebpf.Program) (
 		}
 		defer btfHandle.Close()
 
-		spec, err := btfHandle.Spec()
+		spec, err := btfHandle.Spec(nil)
 		if err != nil {
 			return nil, err
 		}
@@ -87,29 +88,71 @@ type TracingOptions struct {
 	// AttachTraceFEntry/AttachTraceFExit/AttachModifyReturn or
 	// AttachTraceRawTp.
 	Program *ebpf.Program
+	// Program attach type. Can be one of:
+	// 	- AttachTraceFEntry
+	// 	- AttachTraceFExit
+	// 	- AttachModifyReturn
+	// 	- AttachTraceRawTp
+	// This field is optional.
+	AttachType ebpf.AttachType
+	// Arbitrary value that can be fetched from an eBPF program
+	// via `bpf_get_attach_cookie()`.
+	Cookie uint64
 }
 
 type LSMOptions struct {
 	// Program must be of type LSM with attach type
 	// AttachLSMMac.
 	Program *ebpf.Program
+	// Arbitrary value that can be fetched from an eBPF program
+	// via `bpf_get_attach_cookie()`.
+	Cookie uint64
 }
 
 // attachBTFID links all BPF program types (Tracing/LSM) that they attach to a btf_id.
-func attachBTFID(program *ebpf.Program) (Link, error) {
+func attachBTFID(program *ebpf.Program, at ebpf.AttachType, cookie uint64) (Link, error) {
 	if program.FD() < 0 {
 		return nil, fmt.Errorf("invalid program %w", sys.ErrClosedFd)
 	}
 
-	fd, err := sys.RawTracepointOpen(&sys.RawTracepointOpenAttr{
-		ProgFd: uint32(program.FD()),
-	})
-	if errors.Is(err, sys.ENOTSUPP) {
-		// This may be returned by bpf_tracing_prog_attach via bpf_arch_text_poke.
-		return nil, fmt.Errorf("create raw tracepoint: %w", ErrNotSupported)
-	}
-	if err != nil {
-		return nil, fmt.Errorf("create raw tracepoint: %w", err)
+	var (
+		fd  *sys.FD
+		err error
+	)
+	switch at {
+	case ebpf.AttachTraceFEntry, ebpf.AttachTraceFExit, ebpf.AttachTraceRawTp,
+		ebpf.AttachModifyReturn, ebpf.AttachLSMMac:
+		// Attach via BPF link
+		fd, err = sys.LinkCreateTracing(&sys.LinkCreateTracingAttr{
+			ProgFd:     uint32(program.FD()),
+			AttachType: sys.AttachType(at),
+			Cookie:     cookie,
+		})
+		if err == nil {
+			break
+		}
+		if !errors.Is(err, unix.EINVAL) && !errors.Is(err, sys.ENOTSUPP) {
+			return nil, fmt.Errorf("create tracing link: %w", err)
+		}
+		fallthrough
+	case ebpf.AttachNone:
+		// Attach via RawTracepointOpen
+		if cookie > 0 {
+			return nil, fmt.Errorf("create raw tracepoint with cookie: %w", ErrNotSupported)
+		}
+
+		fd, err = sys.RawTracepointOpen(&sys.RawTracepointOpenAttr{
+			ProgFd: uint32(program.FD()),
+		})
+		if errors.Is(err, sys.ENOTSUPP) {
+			// This may be returned by bpf_tracing_prog_attach via bpf_arch_text_poke.
+			return nil, fmt.Errorf("create raw tracepoint: %w", ErrNotSupported)
+		}
+		if err != nil {
+			return nil, fmt.Errorf("create raw tracepoint: %w", err)
+		}
+	default:
+		return nil, fmt.Errorf("invalid attach type: %s", at.String())
 	}
 
 	raw := RawLink{fd: fd}
@@ -124,8 +167,7 @@ func attachBTFID(program *ebpf.Program) (Link, error) {
 		// a raw_tracepoint link. Other types return a tracing link.
 		return &rawTracepoint{raw}, nil
 	}
-
-	return &tracing{RawLink: RawLink{fd: fd}}, nil
+	return &tracing{raw}, nil
 }
 
 // AttachTracing links a tracing (fentry/fexit/fmod_ret) BPF program or
@@ -136,7 +178,14 @@ func AttachTracing(opts TracingOptions) (Link, error) {
 		return nil, fmt.Errorf("invalid program type %s, expected Tracing", t)
 	}
 
-	return attachBTFID(opts.Program)
+	switch opts.AttachType {
+	case ebpf.AttachTraceFEntry, ebpf.AttachTraceFExit, ebpf.AttachModifyReturn,
+		ebpf.AttachTraceRawTp, ebpf.AttachNone:
+	default:
+		return nil, fmt.Errorf("invalid attach type: %s", opts.AttachType.String())
+	}
+
+	return attachBTFID(opts.Program, opts.AttachType, opts.Cookie)
 }
 
 // AttachLSM links a Linux security module (LSM) BPF Program to a BPF
@@ -146,5 +195,5 @@ func AttachLSM(opts LSMOptions) (Link, error) {
 		return nil, fmt.Errorf("invalid program type %s, expected LSM", t)
 	}
 
-	return attachBTFID(opts.Program)
+	return attachBTFID(opts.Program, ebpf.AttachLSMMac, opts.Cookie)
 }
diff --git a/vendor/github.com/cilium/ebpf/link/uprobe.go b/vendor/github.com/cilium/ebpf/link/uprobe.go
index aa1ad9bb..272bac41 100644
--- a/vendor/github.com/cilium/ebpf/link/uprobe.go
+++ b/vendor/github.com/cilium/ebpf/link/uprobe.go
@@ -5,16 +5,14 @@ import (
 	"errors"
 	"fmt"
 	"os"
-	"path/filepath"
-	"strings"
+	"sync"
 
 	"github.com/cilium/ebpf"
 	"github.com/cilium/ebpf/internal"
+	"github.com/cilium/ebpf/internal/tracefs"
 )
 
 var (
-	uprobeEventsPath = filepath.Join(tracefsPath, "uprobe_events")
-
 	uprobeRefCtrOffsetPMUPath = "/sys/bus/event_source/devices/uprobe/format/ref_ctr_offset"
 	// elixir.bootlin.com/linux/v5.15-rc7/source/kernel/events/core.c#L9799
 	uprobeRefCtrOffsetShift = 32
@@ -37,6 +35,8 @@ type Executable struct {
 	path string
 	// Parsed ELF and dynamic symbols' addresses.
 	addresses map[string]uint64
+	// Keep track of symbol table lazy load.
+	addressesOnce sync.Once
 }
 
 // UprobeOptions defines additional parameters that will be used
@@ -70,6 +70,17 @@ type UprobeOptions struct {
 	//
 	// Needs kernel 5.15+.
 	Cookie uint64
+	// Prefix used for the event name if the uprobe must be attached using tracefs.
+	// The group name will be formatted as `<prefix>_<randomstr>`.
+	// The default empty string is equivalent to "ebpf" as the prefix.
+	TraceFSPrefix string
+}
+
+func (uo *UprobeOptions) cookie() uint64 {
+	if uo == nil {
+		return 0
+	}
+	return uo.Cookie
 }
 
 // To open a new Executable, use:
@@ -82,32 +93,21 @@ func OpenExecutable(path string) (*Executable, error) {
 		return nil, fmt.Errorf("path cannot be empty")
 	}
 
-	f, err := os.Open(path)
-	if err != nil {
-		return nil, fmt.Errorf("open file '%s': %w", path, err)
-	}
-	defer f.Close()
-
-	se, err := internal.NewSafeELFFile(f)
+	f, err := internal.OpenSafeELFFile(path)
 	if err != nil {
 		return nil, fmt.Errorf("parse ELF file: %w", err)
 	}
+	defer f.Close()
 
-	if se.Type != elf.ET_EXEC && se.Type != elf.ET_DYN {
+	if f.Type != elf.ET_EXEC && f.Type != elf.ET_DYN {
 		// ELF is not an executable or a shared object.
 		return nil, errors.New("the given file is not an executable or a shared object")
 	}
 
-	ex := Executable{
+	return &Executable{
 		path:      path,
 		addresses: make(map[string]uint64),
-	}
-
-	if err := ex.load(se); err != nil {
-		return nil, err
-	}
-
-	return &ex, nil
+	}, nil
 }
 
 func (ex *Executable) load(f *internal.SafeELFFile) error {
@@ -164,6 +164,22 @@ func (ex *Executable) address(symbol string, opts *UprobeOptions) (uint64, error
 		return opts.Address + opts.Offset, nil
 	}
 
+	var err error
+	ex.addressesOnce.Do(func() {
+		var f *internal.SafeELFFile
+		f, err = internal.OpenSafeELFFile(ex.path)
+		if err != nil {
+			err = fmt.Errorf("parse ELF file: %w", err)
+			return
+		}
+		defer f.Close()
+
+		err = ex.load(f)
+	})
+	if err != nil {
+		return 0, fmt.Errorf("lazy load symbols: %w", err)
+	}
+
 	address, ok := ex.addresses[symbol]
 	if !ok {
 		return 0, fmt.Errorf("symbol %s: %w", symbol, ErrNoSymbol)
@@ -209,7 +225,7 @@ func (ex *Executable) Uprobe(symbol string, prog *ebpf.Program, opts *UprobeOpti
 		return nil, err
 	}
 
-	lnk, err := attachPerfEvent(u, prog)
+	lnk, err := attachPerfEvent(u, prog, opts.cookie())
 	if err != nil {
 		u.Close()
 		return nil, err
@@ -243,7 +259,7 @@ func (ex *Executable) Uretprobe(symbol string, prog *ebpf.Program, opts *UprobeO
 		return nil, err
 	}
 
-	lnk, err := attachPerfEvent(u, prog)
+	lnk, err := attachPerfEvent(u, prog, opts.cookie())
 	if err != nil {
 		u.Close()
 		return nil, err
@@ -281,18 +297,20 @@ func (ex *Executable) uprobe(symbol string, prog *ebpf.Program, opts *UprobeOpti
 		}
 	}
 
-	args := probeArgs{
-		symbol:       symbol,
-		path:         ex.path,
-		offset:       offset,
-		pid:          pid,
-		refCtrOffset: opts.RefCtrOffset,
-		ret:          ret,
-		cookie:       opts.Cookie,
+	args := tracefs.ProbeArgs{
+		Type:         tracefs.Uprobe,
+		Symbol:       symbol,
+		Path:         ex.path,
+		Offset:       offset,
+		Pid:          pid,
+		RefCtrOffset: opts.RefCtrOffset,
+		Ret:          ret,
+		Cookie:       opts.Cookie,
+		Group:        opts.TraceFSPrefix,
 	}
 
 	// Use uprobe PMU if the kernel has it available.
-	tp, err := pmuUprobe(args)
+	tp, err := pmuProbe(args)
 	if err == nil {
 		return tp, nil
 	}
@@ -301,59 +319,10 @@ func (ex *Executable) uprobe(symbol string, prog *ebpf.Program, opts *UprobeOpti
 	}
 
 	// Use tracefs if uprobe PMU is missing.
-	args.symbol = sanitizeSymbol(symbol)
-	tp, err = tracefsUprobe(args)
+	tp, err = tracefsProbe(args)
 	if err != nil {
 		return nil, fmt.Errorf("creating trace event '%s:%s' in tracefs: %w", ex.path, symbol, err)
 	}
 
 	return tp, nil
 }
-
-// pmuUprobe opens a perf event based on the uprobe PMU.
-func pmuUprobe(args probeArgs) (*perfEvent, error) {
-	return pmuProbe(uprobeType, args)
-}
-
-// tracefsUprobe creates a Uprobe tracefs entry.
-func tracefsUprobe(args probeArgs) (*perfEvent, error) {
-	return tracefsProbe(uprobeType, args)
-}
-
-// sanitizeSymbol replaces every invalid character for the tracefs api with an underscore.
-// It is equivalent to calling regexp.MustCompile("[^a-zA-Z0-9]+").ReplaceAllString("_").
-func sanitizeSymbol(s string) string {
-	var b strings.Builder
-	b.Grow(len(s))
-	var skip bool
-	for _, c := range []byte(s) {
-		switch {
-		case c >= 'a' && c <= 'z',
-			c >= 'A' && c <= 'Z',
-			c >= '0' && c <= '9':
-			skip = false
-			b.WriteByte(c)
-
-		default:
-			if !skip {
-				b.WriteByte('_')
-				skip = true
-			}
-		}
-	}
-
-	return b.String()
-}
-
-// uprobeToken creates the PATH:OFFSET(REF_CTR_OFFSET) token for the tracefs api.
-func uprobeToken(args probeArgs) string {
-	po := fmt.Sprintf("%s:%#x", args.path, args.offset)
-
-	if args.refCtrOffset != 0 {
-		// This is not documented in Documentation/trace/uprobetracer.txt.
-		// elixir.bootlin.com/linux/v5.15-rc7/source/kernel/trace/trace.c#L5564
-		po += fmt.Sprintf("(%#x)", args.refCtrOffset)
-	}
-
-	return po
-}
diff --git a/vendor/github.com/cilium/ebpf/linker.go b/vendor/github.com/cilium/ebpf/linker.go
index d97a44b2..e0dbfcff 100644
--- a/vendor/github.com/cilium/ebpf/linker.go
+++ b/vendor/github.com/cilium/ebpf/linker.go
@@ -4,12 +4,48 @@ import (
 	"encoding/binary"
 	"errors"
 	"fmt"
+	"io"
+	"math"
 
 	"github.com/cilium/ebpf/asm"
 	"github.com/cilium/ebpf/btf"
 	"github.com/cilium/ebpf/internal"
 )
 
+// handles stores handle objects to avoid gc cleanup
+type handles []*btf.Handle
+
+func (hs *handles) add(h *btf.Handle) (int, error) {
+	if h == nil {
+		return 0, nil
+	}
+
+	if len(*hs) == math.MaxInt16 {
+		return 0, fmt.Errorf("can't add more than %d module FDs to fdArray", math.MaxInt16)
+	}
+
+	*hs = append(*hs, h)
+
+	// return length of slice so that indexes start at 1
+	return len(*hs), nil
+}
+
+func (hs handles) fdArray() []int32 {
+	// first element of fda is reserved as no module can be indexed with 0
+	fda := []int32{0}
+	for _, h := range hs {
+		fda = append(fda, int32(h.FD()))
+	}
+
+	return fda
+}
+
+func (hs handles) close() {
+	for _, h := range hs {
+		h.Close()
+	}
+}
+
 // splitSymbols splits insns into subsections delimited by Symbol Instructions.
 // insns cannot be empty and must start with a Symbol Instruction.
 //
@@ -87,14 +123,6 @@ func applyRelocations(insns asm.Instructions, target *btf.Spec, bo binary.ByteOr
 		bo = internal.NativeEndian
 	}
 
-	if target == nil {
-		var err error
-		target, err = btf.LoadKernelSpec()
-		if err != nil {
-			return fmt.Errorf("load kernel spec: %w", err)
-		}
-	}
-
 	fixups, err := btf.CORERelocate(relos, target, bo)
 	if err != nil {
 		return err
@@ -102,7 +130,7 @@ func applyRelocations(insns asm.Instructions, target *btf.Spec, bo binary.ByteOr
 
 	for i, fixup := range fixups {
 		if err := fixup.Apply(reloInsns[i]); err != nil {
-			return fmt.Errorf("apply fixup %s: %w", &fixup, err)
+			return fmt.Errorf("fixup for %s: %w", relos[i], err)
 		}
 	}
 
@@ -189,8 +217,9 @@ func fixupAndValidate(insns asm.Instructions) error {
 		ins := iter.Ins
 
 		// Map load was tagged with a Reference, but does not contain a Map pointer.
-		if ins.IsLoadFromMap() && ins.Reference() != "" && ins.Map() == nil {
-			return fmt.Errorf("instruction %d: map %s: %w", iter.Index, ins.Reference(), asm.ErrUnsatisfiedMapReference)
+		needsMap := ins.Reference() != "" || ins.Metadata.Get(kconfigMetaKey{}) != nil
+		if ins.IsLoadFromMap() && needsMap && ins.Map() == nil {
+			return fmt.Errorf("instruction %d: %w", iter.Index, asm.ErrUnsatisfiedMapReference)
 		}
 
 		fixupProbeReadKernel(ins)
@@ -199,6 +228,88 @@ func fixupAndValidate(insns asm.Instructions) error {
 	return nil
 }
 
+// fixupKfuncs loops over all instructions in search for kfunc calls.
+// If at least one is found, the current kernels BTF and module BTFis are searched to set Instruction.Constant
+// and Instruction.Offset to the correct values.
+func fixupKfuncs(insns asm.Instructions) (handles, error) {
+	iter := insns.Iterate()
+	for iter.Next() {
+		ins := iter.Ins
+		if ins.IsKfuncCall() {
+			goto fixups
+		}
+	}
+
+	return nil, nil
+
+fixups:
+	// only load the kernel spec if we found at least one kfunc call
+	kernelSpec, err := btf.LoadKernelSpec()
+	if err != nil {
+		return nil, err
+	}
+
+	fdArray := make(handles, 0)
+	for {
+		ins := iter.Ins
+
+		if !ins.IsKfuncCall() {
+			if !iter.Next() {
+				// break loop if this was the last instruction in the stream.
+				break
+			}
+			continue
+		}
+
+		// check meta, if no meta return err
+		kfm, _ := ins.Metadata.Get(kfuncMeta{}).(*btf.Func)
+		if kfm == nil {
+			return nil, fmt.Errorf("kfunc call has no kfuncMeta")
+		}
+
+		target := btf.Type((*btf.Func)(nil))
+		spec, module, err := findTargetInKernel(kernelSpec, kfm.Name, &target)
+		if errors.Is(err, btf.ErrNotFound) {
+			return nil, fmt.Errorf("kfunc %q: %w", kfm.Name, ErrNotSupported)
+		}
+		if err != nil {
+			return nil, err
+		}
+
+		if err := btf.CheckTypeCompatibility(kfm.Type, target.(*btf.Func).Type); err != nil {
+			return nil, &incompatibleKfuncError{kfm.Name, err}
+		}
+
+		id, err := spec.TypeID(target)
+		if err != nil {
+			return nil, err
+		}
+
+		idx, err := fdArray.add(module)
+		if err != nil {
+			return nil, err
+		}
+
+		ins.Constant = int64(id)
+		ins.Offset = int16(idx)
+
+		if !iter.Next() {
+			break
+		}
+	}
+
+	return fdArray, nil
+}
+
+type incompatibleKfuncError struct {
+	name string
+	err  error
+}
+
+func (ike *incompatibleKfuncError) Error() string {
+	return fmt.Sprintf("kfunc %q: %s", ike.name, ike.err)
+}
+
 // fixupProbeReadKernel replaces calls to bpf_probe_read_{kernel,user}(_str)
 // with bpf_probe_read(_str) on kernels that don't support it yet.
 func fixupProbeReadKernel(ins *asm.Instruction) {
@@ -218,3 +329,63 @@ func fixupProbeReadKernel(ins *asm.Instruction) {
 		ins.Constant = int64(asm.FnProbeReadStr)
 	}
 }
+
+// resolveKconfigReferences creates and populates a .kconfig map if necessary.
+//
+// Returns a nil Map and no error if no references exist.
+func resolveKconfigReferences(insns asm.Instructions) (_ *Map, err error) {
+	closeOnError := func(c io.Closer) {
+		if err != nil {
+			c.Close()
+		}
+	}
+
+	var spec *MapSpec
+	iter := insns.Iterate()
+	for iter.Next() {
+		meta, _ := iter.Ins.Metadata.Get(kconfigMetaKey{}).(*kconfigMeta)
+		if meta != nil {
+			spec = meta.Map
+			break
+		}
+	}
+
+	if spec == nil {
+		return nil, nil
+	}
+
+	cpy := spec.Copy()
+	if err := resolveKconfig(cpy); err != nil {
+		return nil, err
+	}
+
+	kconfig, err := NewMap(cpy)
+	if err != nil {
+		return nil, err
+	}
+	defer closeOnError(kconfig)
+
+	// Resolve all instructions which load from .kconfig map with actual map
+	// and offset inside it.
+	iter = insns.Iterate()
+	for iter.Next() {
+		meta, _ := iter.Ins.Metadata.Get(kconfigMetaKey{}).(*kconfigMeta)
+		if meta == nil {
+			continue
+		}
+
+		if meta.Map != spec {
+			return nil, fmt.Errorf("instruction %d: reference to multiple .kconfig maps is not allowed", iter.Index)
+		}
+
+		if err := iter.Ins.AssociateMap(kconfig); err != nil {
+			return nil, fmt.Errorf("instruction %d: %w", iter.Index, err)
+		}
+
+		// Encode a map read at the offset of the var in the datasec.
+		iter.Ins.Constant = int64(uint64(meta.Offset) << 32)
+		iter.Ins.Metadata.Set(kconfigMetaKey{}, nil)
+	}
+
+	return kconfig, nil
+}
diff --git a/vendor/github.com/cilium/ebpf/map.go b/vendor/github.com/cilium/ebpf/map.go
index 800d59da..a11664cc 100644
--- a/vendor/github.com/cilium/ebpf/map.go
+++ b/vendor/github.com/cilium/ebpf/map.go
@@ -6,6 +6,7 @@ import (
 	"fmt"
 	"io"
 	"math/rand"
+	"os"
 	"path/filepath"
 	"reflect"
 	"time"
@@ -168,7 +169,10 @@ func (ms *MapSpec) Compatible(m *Map) error {
 		m.maxEntries != ms.MaxEntries:
 		return fmt.Errorf("expected max entries %v, got %v: %w", ms.MaxEntries, m.maxEntries, ErrMapIncompatible)
 
-	case m.flags != ms.Flags:
+	// BPF_F_RDONLY_PROG is set unconditionally for devmaps. Explicitly allow
+	// this mismatch.
+	case !((ms.Type == DevMap || ms.Type == DevMapHash) && m.flags^ms.Flags == unix.BPF_F_RDONLY_PROG) &&
+		m.flags != ms.Flags:
 		return fmt.Errorf("expected flags %v, got %v: %w", ms.Flags, m.flags, ErrMapIncompatible)
 	}
 	return nil
@@ -430,8 +434,8 @@ func (spec *MapSpec) createMap(inner *sys.FD, opts MapOptions) (_ *Map, err erro
 
 			// Use BTF k/v during map creation.
 			attr.BtfFd = uint32(handle.FD())
-			attr.BtfKeyTypeId = uint32(keyTypeID)
-			attr.BtfValueTypeId = uint32(valueTypeID)
+			attr.BtfKeyTypeId = keyTypeID
+			attr.BtfValueTypeId = valueTypeID
 		}
 	}
 
@@ -450,6 +454,9 @@ func (spec *MapSpec) createMap(inner *sys.FD, opts MapOptions) (_ *Map, err erro
 		if errors.Is(err, unix.EINVAL) && attr.MaxEntries == 0 {
 			return nil, fmt.Errorf("map create: %w (MaxEntries may be incorrectly set to zero)", err)
 		}
+		if errors.Is(err, unix.EINVAL) && spec.Type == UnspecifiedMap {
+			return nil, fmt.Errorf("map create: cannot use type %s", UnspecifiedMap)
+		}
 		if attr.BtfFd == 0 {
 			return nil, fmt.Errorf("map create: %w (without BTF k/v)", err)
 		}
@@ -489,7 +496,7 @@ func newMap(fd *sys.FD, name string, typ MapType, keySize, valueSize, maxEntries
 		return nil, err
 	}
 
-	m.fullValueSize = internal.Align(int(valueSize), 8) * possibleCPUs
+	m.fullValueSize = int(internal.Align(valueSize, 8)) * possibleCPUs
 	return m, nil
 }
 
@@ -543,12 +550,7 @@ const LookupLock MapLookupFlags = 4
 //
 // Returns an error if the key doesn't exist, see ErrKeyNotExist.
 func (m *Map) Lookup(key, valueOut interface{}) error {
-	valuePtr, valueBytes := makeBuffer(valueOut, m.fullValueSize)
-	if err := m.lookup(key, valuePtr, 0); err != nil {
-		return err
-	}
-
-	return m.unmarshalValue(valueOut, valueBytes)
+	return m.LookupWithFlags(key, valueOut, 0)
 }
 
 // LookupWithFlags retrieves a value from a Map with flags.
@@ -562,6 +564,10 @@ func (m *Map) Lookup(key, valueOut interface{}) error {
 //
 // Returns an error if the key doesn't exist, see ErrKeyNotExist.
 func (m *Map) LookupWithFlags(key, valueOut interface{}, flags MapLookupFlags) error {
+	if m.typ.hasPerCPUValue() {
+		return m.lookupPerCPU(key, valueOut, flags)
+	}
+
 	valuePtr, valueBytes := makeBuffer(valueOut, m.fullValueSize)
 	if err := m.lookup(key, valuePtr, flags); err != nil {
 		return err
@@ -574,7 +580,7 @@ func (m *Map) LookupWithFlags(key, valueOut interface{}, flags MapLookupFlags) e
 //
 // Returns ErrKeyNotExist if the key doesn't exist.
 func (m *Map) LookupAndDelete(key, valueOut interface{}) error {
-	return m.lookupAndDelete(key, valueOut, 0)
+	return m.LookupAndDeleteWithFlags(key, valueOut, 0)
 }
 
 // LookupAndDeleteWithFlags retrieves and deletes a value from a Map.
@@ -585,7 +591,15 @@ func (m *Map) LookupAndDelete(key, valueOut interface{}) error {
 //
 // Returns ErrKeyNotExist if the key doesn't exist.
 func (m *Map) LookupAndDeleteWithFlags(key, valueOut interface{}, flags MapLookupFlags) error {
-	return m.lookupAndDelete(key, valueOut, flags)
+	if m.typ.hasPerCPUValue() {
+		return m.lookupAndDeletePerCPU(key, valueOut, flags)
+	}
+
+	valuePtr, valueBytes := makeBuffer(valueOut, m.fullValueSize)
+	if err := m.lookupAndDelete(key, valuePtr, flags); err != nil {
+		return err
+	}
+	return m.unmarshalValue(valueOut, valueBytes)
 }
 
 // LookupBytes gets a value from Map.
@@ -603,6 +617,14 @@ func (m *Map) LookupBytes(key interface{}) ([]byte, error) {
 	return valueBytes, err
 }
 
+func (m *Map) lookupPerCPU(key, valueOut any, flags MapLookupFlags) error {
+	valueBytes := make([]byte, m.fullValueSize)
+	if err := m.lookup(key, sys.NewSlicePointer(valueBytes), flags); err != nil {
+		return err
+	}
+	return unmarshalPerCPUValue(valueOut, int(m.valueSize), valueBytes)
+}
+
 func (m *Map) lookup(key interface{}, valueOut sys.Pointer, flags MapLookupFlags) error {
 	keyPtr, err := m.marshalKey(key)
 	if err != nil {
@@ -622,9 +644,15 @@ func (m *Map) lookup(key interface{}, valueOut sys.Pointer, flags MapLookupFlags
 	return nil
 }
 
-func (m *Map) lookupAndDelete(key, valueOut interface{}, flags MapLookupFlags) error {
-	valuePtr, valueBytes := makeBuffer(valueOut, m.fullValueSize)
+func (m *Map) lookupAndDeletePerCPU(key, valueOut any, flags MapLookupFlags) error {
+	valueBytes := make([]byte, m.fullValueSize)
+	if err := m.lookupAndDelete(key, sys.NewSlicePointer(valueBytes), flags); err != nil {
+		return err
+	}
+	return unmarshalPerCPUValue(valueOut, int(m.valueSize), valueBytes)
+}
 
+func (m *Map) lookupAndDelete(key any, valuePtr sys.Pointer, flags MapLookupFlags) error {
 	keyPtr, err := m.marshalKey(key)
 	if err != nil {
 		return fmt.Errorf("can't marshal key: %w", err)
@@ -641,7 +669,7 @@ func (m *Map) lookupAndDelete(key, valueOut interface{}, flags MapLookupFlags) e
 		return fmt.Errorf("lookup and delete: %w", wrapMapError(err))
 	}
 
-	return m.unmarshalValue(valueOut, valueBytes)
+	return nil
 }
 
 // MapUpdateFlags controls the behaviour of the Map.Update call.
@@ -668,15 +696,32 @@ func (m *Map) Put(key, value interface{}) error {
 }
 
 // Update changes the value of a key.
-func (m *Map) Update(key, value interface{}, flags MapUpdateFlags) error {
-	keyPtr, err := m.marshalKey(key)
-	if err != nil {
-		return fmt.Errorf("can't marshal key: %w", err)
+func (m *Map) Update(key, value any, flags MapUpdateFlags) error {
+	if m.typ.hasPerCPUValue() {
+		return m.updatePerCPU(key, value, flags)
 	}
 
 	valuePtr, err := m.marshalValue(value)
 	if err != nil {
-		return fmt.Errorf("can't marshal value: %w", err)
+		return fmt.Errorf("marshal value: %w", err)
+	}
+
+	return m.update(key, valuePtr, flags)
+}
+
+func (m *Map) updatePerCPU(key, value any, flags MapUpdateFlags) error {
+	valuePtr, err := marshalPerCPUValue(value, int(m.valueSize))
+	if err != nil {
+		return fmt.Errorf("marshal value: %w", err)
+	}
+
+	return m.update(key, valuePtr, flags)
+}
+
+func (m *Map) update(key any, valuePtr sys.Pointer, flags MapUpdateFlags) error {
+	keyPtr, err := m.marshalKey(key)
+	if err != nil {
+		return fmt.Errorf("marshal key: %w", err)
 	}
 
 	attr := sys.MapUpdateElemAttr{
@@ -792,12 +837,22 @@ func (m *Map) nextKey(key interface{}, nextKeyOut sys.Pointer) error {
 	return nil
 }
 
+var mmapProtectedPage = internal.Memoize(func() ([]byte, error) {
+	return unix.Mmap(-1, 0, os.Getpagesize(), unix.PROT_NONE, unix.MAP_ANON|unix.MAP_SHARED)
+})
+
 // guessNonExistentKey attempts to perform a map lookup that returns ENOENT.
 // This is necessary on kernels before 4.4.132, since those don't support
 // iterating maps from the start by providing an invalid key pointer.
 func (m *Map) guessNonExistentKey() ([]byte, error) {
-	// Provide an invalid value pointer to prevent a copy on the kernel side.
-	valuePtr := sys.NewPointer(unsafe.Pointer(^uintptr(0)))
+	// Map a protected page and use that as the value pointer. This saves some
+	// work copying out the value, which we're not interested in.
+	page, err := mmapProtectedPage()
+	if err != nil {
+		return nil, err
+	}
+	valuePtr := sys.NewSlicePointer(page)
+
 	randKey := make([]byte, int(m.keySize))
 
 	for i := 0; i < 4; i++ {
@@ -1090,7 +1145,7 @@ func (m *Map) Clone() (*Map, error) {
 // You can Clone a map to pin it to a different path.
 //
 // This requires bpffs to be mounted above fileName.
-// See https://docs.cilium.io/en/stable/concepts/kubernetes/configuration/#mounting-bpffs-with-systemd
+// See https://docs.cilium.io/en/stable/network/kubernetes/configuration/#mounting-bpffs-with-systemd
 func (m *Map) Pin(fileName string) error {
 	if err := internal.Pin(m.pinnedPath, fileName, m.fd); err != nil {
 		return err
@@ -1175,10 +1230,6 @@ func (m *Map) unmarshalKey(data interface{}, buf []byte) error {
 }
 
 func (m *Map) marshalValue(data interface{}) (sys.Pointer, error) {
-	if m.typ.hasPerCPUValue() {
-		return marshalPerCPUValue(data, int(m.valueSize))
-	}
-
 	var (
 		buf []byte
 		err error
@@ -1311,8 +1362,7 @@ func marshalMap(m *Map, length int) ([]byte, error) {
 // See Map.Iterate.
 type MapIterator struct {
 	target            *Map
-	prevKey           interface{}
-	prevBytes         []byte
+	curKey            []byte
 	count, maxEntries uint32
 	done              bool
 	err               error
@@ -1322,7 +1372,6 @@ func newMapIterator(target *Map) *MapIterator {
 	return &MapIterator{
 		target:     target,
 		maxEntries: target.maxEntries,
-		prevBytes:  make([]byte, target.keySize),
 	}
 }
 
@@ -1344,26 +1393,35 @@ func (mi *MapIterator) Next(keyOut, valueOut interface{}) bool {
 	// For array-like maps NextKeyBytes returns nil only on after maxEntries
 	// iterations.
 	for mi.count <= mi.maxEntries {
-		var nextBytes []byte
-		nextBytes, mi.err = mi.target.NextKeyBytes(mi.prevKey)
+		var nextKey []byte
+		if mi.curKey == nil {
+			// Pass nil interface to NextKeyBytes to make sure the Map's first key
+			// is returned. If we pass an uninitialized []byte instead, it'll see a
+			// non-nil interface and try to marshal it.
+			nextKey, mi.err = mi.target.NextKeyBytes(nil)
+
+			mi.curKey = make([]byte, mi.target.keySize)
+		} else {
+			nextKey, mi.err = mi.target.NextKeyBytes(mi.curKey)
+		}
 		if mi.err != nil {
+			mi.err = fmt.Errorf("get next key: %w", mi.err)
 			return false
 		}
 
-		if nextBytes == nil {
+		if nextKey == nil {
 			mi.done = true
 			return false
 		}
 
-		// The user can get access to nextBytes since unmarshalBytes
+		// The user can get access to nextKey since unmarshalBytes
 		// does not copy when unmarshaling into a []byte.
 		// Make a copy to prevent accidental corruption of
 		// iterator state.
-		copy(mi.prevBytes, nextBytes)
-		mi.prevKey = mi.prevBytes
+		copy(mi.curKey, nextKey)
 
 		mi.count++
-		mi.err = mi.target.Lookup(nextBytes, valueOut)
+		mi.err = mi.target.Lookup(nextKey, valueOut)
 		if errors.Is(mi.err, ErrKeyNotExist) {
 			// Even though the key should be valid, we couldn't look up
 			// its value. If we're iterating a hash map this is probably
@@ -1376,10 +1434,11 @@ func (mi *MapIterator) Next(keyOut, valueOut interface{}) bool {
 			continue
 		}
 		if mi.err != nil {
+			mi.err = fmt.Errorf("look up next key: %w", mi.err)
 			return false
 		}
 
-		mi.err = mi.target.unmarshalKey(keyOut, nextBytes)
+		mi.err = mi.target.unmarshalKey(keyOut, nextKey)
 		return mi.err == nil
 	}
 
diff --git a/vendor/github.com/cilium/ebpf/marshalers.go b/vendor/github.com/cilium/ebpf/marshalers.go
index 544d17f3..a568bff9 100644
--- a/vendor/github.com/cilium/ebpf/marshalers.go
+++ b/vendor/github.com/cilium/ebpf/marshalers.go
@@ -57,8 +57,10 @@ func marshalBytes(data interface{}, length int) (buf []byte, err error) {
 	case Map, *Map, Program, *Program:
 		err = fmt.Errorf("can't marshal %T", value)
 	default:
-		var wr bytes.Buffer
-		err = binary.Write(&wr, internal.NativeEndian, value)
+		wr := internal.NewBuffer(make([]byte, 0, length))
+		defer internal.PutBuffer(wr)
+
+		err = binary.Write(wr, internal.NativeEndian, value)
 		if err != nil {
 			err = fmt.Errorf("encoding %T: %v", value, err)
 		}
diff --git a/vendor/github.com/cilium/ebpf/prog.go b/vendor/github.com/cilium/ebpf/prog.go
index dbc25e4f..70aaef55 100644
--- a/vendor/github.com/cilium/ebpf/prog.go
+++ b/vendor/github.com/cilium/ebpf/prog.go
@@ -10,6 +10,7 @@ import (
 	"runtime"
 	"strings"
 	"time"
+	"unsafe"
 
 	"github.com/cilium/ebpf/asm"
 	"github.com/cilium/ebpf/btf"
@@ -169,6 +170,9 @@ type Program struct {
 // NewProgram creates a new Program.
 //
 // See [NewProgramWithOptions] for details.
+//
+// Returns a [VerifierError] containing the full verifier log if the program is
+// rejected by the kernel.
 func NewProgram(spec *ProgramSpec) (*Program, error) {
 	return NewProgramWithOptions(spec, ProgramOptions{})
 }
@@ -178,7 +182,8 @@ func NewProgram(spec *ProgramSpec) (*Program, error) {
 // Loading a program for the first time will perform
 // feature detection by loading small, temporary programs.
 //
-// Returns a [VerifierError] if the program is rejected by the kernel.
+// Returns a [VerifierError] containing the full verifier log if the program is
+// rejected by the kernel.
 func NewProgramWithOptions(spec *ProgramSpec, opts ProgramOptions) (*Program, error) {
 	if spec == nil {
 		return nil, errors.New("can't load a program from a nil spec")
@@ -258,10 +263,27 @@ func newProgramWithOptions(spec *ProgramSpec, opts ProgramOptions) (*Program, er
 		return nil, fmt.Errorf("apply CO-RE relocations: %w", err)
 	}
 
+	kconfig, err := resolveKconfigReferences(insns)
+	if err != nil {
+		return nil, fmt.Errorf("resolve .kconfig: %w", err)
+	}
+	defer kconfig.Close()
+
 	if err := fixupAndValidate(insns); err != nil {
 		return nil, err
 	}
 
+	handles, err := fixupKfuncs(insns)
+	if err != nil {
+		return nil, fmt.Errorf("fixing up kfuncs: %w", err)
+	}
+	defer handles.close()
+
+	if len(handles) > 0 {
+		fdArray := handles.fdArray()
+		attr.FdArray = sys.NewPointer(unsafe.Pointer(&fdArray[0]))
+	}
+
 	buf := bytes.NewBuffer(make([]byte, 0, insns.Size()))
 	err = insns.Marshal(buf, internal.NativeEndian)
 	if err != nil {
@@ -278,18 +300,18 @@ func newProgramWithOptions(spec *ProgramSpec, opts ProgramOptions) (*Program, er
 			return nil, fmt.Errorf("attach %s/%s: %w", spec.Type, spec.AttachType, err)
 		}
 
-		attr.AttachBtfId = uint32(targetID)
+		attr.AttachBtfId = targetID
 		attr.AttachBtfObjFd = uint32(spec.AttachTarget.FD())
 		defer runtime.KeepAlive(spec.AttachTarget)
 	} else if spec.AttachTo != "" {
-		module, targetID, err := findTargetInKernel(spec.AttachTo, spec.Type, spec.AttachType)
+		module, targetID, err := findProgramTargetInKernel(spec.AttachTo, spec.Type, spec.AttachType)
 		if err != nil && !errors.Is(err, errUnrecognizedAttachType) {
 			// We ignore errUnrecognizedAttachType since AttachTo may be non-empty
 			// for programs that don't attach anywhere.
 			return nil, fmt.Errorf("attach %s/%s: %w", spec.Type, spec.AttachType, err)
 		}
 
-		attr.AttachBtfId = uint32(targetID)
+		attr.AttachBtfId = targetID
 		if module != nil {
 			attr.AttachBtfObjFd = uint32(module.FD())
 			defer module.Close()
@@ -462,7 +484,7 @@ func (p *Program) Clone() (*Program, error) {
 // the new path already exists. Re-pinning across filesystems is not supported.
 //
 // This requires bpffs to be mounted above fileName.
-// See https://docs.cilium.io/en/stable/concepts/kubernetes/configuration/#mounting-bpffs-with-systemd
+// See https://docs.cilium.io/en/stable/network/kubernetes/configuration/#mounting-bpffs-with-systemd
 func (p *Program) Pin(fileName string) error {
 	if err := internal.Pin(p.pinnedPath, fileName, p.fd); err != nil {
 		return err
@@ -579,9 +601,6 @@ func (p *Program) Run(opts *RunOptions) (uint32, error) {
 // run or an error. reset is called whenever the benchmark syscall is
 // interrupted, and should be set to testing.B.ResetTimer or similar.
 //
-// Note: profiling a call to this function will skew its results, see
-// https://github.com/cilium/ebpf/issues/24
-//
 // This function requires at least Linux 4.12.
 func (p *Program) Benchmark(in []byte, repeat int, reset func()) (uint32, time.Duration, error) {
 	if uint(repeat) > math.MaxUint32 {
@@ -783,7 +802,14 @@ func LoadPinnedProgram(fileName string, opts *LoadPinOptions) (*Program, error)
 		return nil, fmt.Errorf("info for %s: %w", fileName, err)
 	}
 
-	return &Program{"", fd, filepath.Base(fileName), fileName, info.Type}, nil
+	var progName string
+	if haveObjName() == nil {
+		progName = info.Name
+	} else {
+		progName = filepath.Base(fileName)
+	}
+
+	return &Program{"", fd, progName, fileName, info.Type}, nil
 }
 
 // SanitizeName replaces all invalid characters in name with replacement.
@@ -835,7 +861,7 @@ var errUnrecognizedAttachType = errors.New("unrecognized attach type")
 //
 // Returns errUnrecognizedAttachType if the combination of progType and attachType
 // is not recognised.
-func findTargetInKernel(name string, progType ProgramType, attachType AttachType) (*btf.Handle, btf.TypeID, error) {
+func findProgramTargetInKernel(name string, progType ProgramType, attachType AttachType) (*btf.Handle, btf.TypeID, error) {
 	type match struct {
 		p ProgramType
 		a AttachType
@@ -880,16 +906,9 @@ func findTargetInKernel(name string, progType ProgramType, attachType AttachType
 		return nil, 0, fmt.Errorf("load kernel spec: %w", err)
 	}
 
-	err = spec.TypeByName(typeName, &target)
+	spec, module, err := findTargetInKernel(spec, typeName, &target)
 	if errors.Is(err, btf.ErrNotFound) {
-		module, id, err := findTargetInModule(typeName, target)
-		if errors.Is(err, btf.ErrNotFound) {
-			return nil, 0, &internal.UnsupportedFeatureError{Name: featureName}
-		}
-		if err != nil {
-			return nil, 0, fmt.Errorf("find target for %s in modules: %w", featureName, err)
-		}
-		return module, id, nil
+		return nil, 0, &internal.UnsupportedFeatureError{Name: featureName}
 	}
 	// See cilium/ebpf#894. Until we can disambiguate between equally-named kernel
 	// symbols, we should explicitly refuse program loads. They will not reliably
@@ -898,57 +917,75 @@ func findTargetInKernel(name string, progType ProgramType, attachType AttachType
 		return nil, 0, fmt.Errorf("attaching to ambiguous kernel symbol is not supported: %w", err)
 	}
 	if err != nil {
-		return nil, 0, fmt.Errorf("find target for %s in vmlinux: %w", featureName, err)
+		return nil, 0, fmt.Errorf("find target for %s: %w", featureName, err)
 	}
 
 	id, err := spec.TypeID(target)
-	return nil, id, err
+	return module, id, err
 }
 
-// find an attach target type in a kernel module.
+// findTargetInKernel attempts to find a named type in the current kernel.
 //
-// vmlinux must contain the kernel's types and is used to parse kmod BTF.
+// target will point at the found type after a successful call. Searches both
+// vmlinux and any loaded modules.
+//
+// Returns a non-nil handle if the type was found in a module, or btf.ErrNotFound
+// if the type wasn't found at all.
+func findTargetInKernel(kernelSpec *btf.Spec, typeName string, target *btf.Type) (*btf.Spec, *btf.Handle, error) {
+	err := kernelSpec.TypeByName(typeName, target)
+	if errors.Is(err, btf.ErrNotFound) {
+		spec, module, err := findTargetInModule(kernelSpec, typeName, target)
+		if err != nil {
+			return nil, nil, fmt.Errorf("find target in modules: %w", err)
+		}
+		return spec, module, nil
+	}
+	if err != nil {
+		return nil, nil, fmt.Errorf("find target in vmlinux: %w", err)
+	}
+	return kernelSpec, nil, err
+}
+
+// findTargetInModule attempts to find a named type in any loaded module.
+//
+// base must contain the kernel's types and is used to parse kmod BTF. Modules
+// are searched in the order they were loaded.
 //
 // Returns btf.ErrNotFound if the target can't be found in any module.
-func findTargetInModule(typeName string, target btf.Type) (*btf.Handle, btf.TypeID, error) {
+func findTargetInModule(base *btf.Spec, typeName string, target *btf.Type) (*btf.Spec, *btf.Handle, error) {
 	it := new(btf.HandleIterator)
 	defer it.Handle.Close()
 
 	for it.Next() {
 		info, err := it.Handle.Info()
 		if err != nil {
-			return nil, 0, fmt.Errorf("get info for BTF ID %d: %w", it.ID, err)
+			return nil, nil, fmt.Errorf("get info for BTF ID %d: %w", it.ID, err)
 		}
 
 		if !info.IsModule() {
 			continue
 		}
 
-		spec, err := it.Handle.Spec()
+		spec, err := it.Handle.Spec(base)
 		if err != nil {
-			return nil, 0, fmt.Errorf("parse types for module %s: %w", info.Name, err)
+			return nil, nil, fmt.Errorf("parse types for module %s: %w", info.Name, err)
 		}
 
-		err = spec.TypeByName(typeName, &target)
+		err = spec.TypeByName(typeName, target)
 		if errors.Is(err, btf.ErrNotFound) {
 			continue
 		}
 		if err != nil {
-			return nil, 0, fmt.Errorf("lookup type in module %s: %w", info.Name, err)
-		}
-
-		id, err := spec.TypeID(target)
-		if err != nil {
-			return nil, 0, fmt.Errorf("lookup type id in module %s: %w", info.Name, err)
+			return nil, nil, fmt.Errorf("lookup type in module %s: %w", info.Name, err)
 		}
 
-		return it.Take(), id, nil
+		return spec, it.Take(), nil
 	}
 	if err := it.Err(); err != nil {
-		return nil, 0, fmt.Errorf("iterate modules: %w", err)
+		return nil, nil, fmt.Errorf("iterate modules: %w", err)
 	}
 
-	return nil, 0, btf.ErrNotFound
+	return nil, nil, btf.ErrNotFound
 }
 
 // find an attach target type in a program.
@@ -974,7 +1011,7 @@ func findTargetInProgram(prog *Program, name string, progType ProgramType, attac
 	}
 	defer btfHandle.Close()
 
-	spec, err := btfHandle.Spec()
+	spec, err := btfHandle.Spec(nil)
 	if err != nil {
 		return 0, err
 	}
diff --git a/vendor/github.com/cilium/ebpf/run-tests.sh b/vendor/github.com/cilium/ebpf/run-tests.sh
index 3507ece3..1d1490ad 100644
--- a/vendor/github.com/cilium/ebpf/run-tests.sh
+++ b/vendor/github.com/cilium/ebpf/run-tests.sh
@@ -6,6 +6,8 @@
 #     $ ./run-tests.sh 5.4
 #     Run a subset of tests:
 #     $ ./run-tests.sh 5.4 ./link
+#     Run using a local kernel image
+#     $ ./run-tests.sh /path/to/bzImage
 
 set -euo pipefail
 
@@ -95,38 +97,45 @@ elif [[ "${1:-}" = "--exec-test" ]]; then
   exit $rc # this return code is "swallowed" by qemu
 fi
 
-readonly kernel_version="${1:-}"
-if [[ -z "${kernel_version}" ]]; then
-  echo "Expecting kernel version as first argument"
+if [[ -z "${1:-}" ]]; then
+  echo "Expecting kernel version or path as first argument"
   exit 1
 fi
-shift
 
-readonly kernel="linux-${kernel_version}.bz"
-readonly selftests="linux-${kernel_version}-selftests-bpf.tgz"
 readonly input="$(mktemp -d)"
 readonly tmp_dir="${TMPDIR:-/tmp}"
-readonly branch="${BRANCH:-master}"
 
 fetch() {
     echo Fetching "${1}"
     pushd "${tmp_dir}" > /dev/null
-    curl --no-progress-meter -L -O --fail --etag-compare "${1}.etag" --etag-save "${1}.etag" "https://github.com/cilium/ci-kernels/raw/${branch}/${1}"
+    curl --no-progress-meter -L -O --fail --etag-compare "${1}.etag" --etag-save "${1}.etag" "https://github.com/cilium/ci-kernels/raw/${BRANCH:-master}/${1}"
     local ret=$?
     popd > /dev/null
     return $ret
 }
 
-fetch "${kernel}"
-cp "${tmp_dir}/${kernel}" "${input}/bzImage"
-
-if fetch "${selftests}"; then
-  echo "Decompressing selftests"
-  mkdir "${input}/bpf"
-  tar --strip-components=4 -xf "${tmp_dir}/${selftests}" -C "${input}/bpf"
+if [[ -f "${1}" ]]; then
+  readonly kernel="${1}"
+  cp "${1}" "${input}/bzImage"
 else
-  echo "No selftests found, disabling"
+# LINUX_VERSION_CODE test compares this to discovered value.
+  export KERNEL_VERSION="${1}"
+
+  readonly kernel="linux-${1}.bz"
+  readonly selftests="linux-${1}-selftests-bpf.tgz"
+
+  fetch "${kernel}"
+  cp "${tmp_dir}/${kernel}" "${input}/bzImage"
+
+  if fetch "${selftests}"; then
+    echo "Decompressing selftests"
+    mkdir "${input}/bpf"
+    tar --strip-components=4 -xf "${tmp_dir}/${selftests}" -C "${input}/bpf"
+  else
+    echo "No selftests found, disabling"
+  fi
 fi
+shift
 
 args=(-short -coverpkg=./... -coverprofile=coverage.out -count 1 ./...)
 if (( $# > 0 )); then
@@ -135,11 +144,9 @@ fi
 
 export GOFLAGS=-mod=readonly
 export CGO_ENABLED=0
-# LINUX_VERSION_CODE test compares this to discovered value.
-export KERNEL_VERSION="${kernel_version}"
 
-echo Testing on "${kernel_version}"
+echo Testing on "${kernel}"
 go test -exec "$script --exec-vm $input" "${args[@]}"
-echo "Test successful on ${kernel_version}"
+echo "Test successful on ${kernel}"
 
 rm -r "${input}"
diff --git a/vendor/github.com/cilium/ebpf/syscalls.go b/vendor/github.com/cilium/ebpf/syscalls.go
index 1ab13363..fd21dea2 100644
--- a/vendor/github.com/cilium/ebpf/syscalls.go
+++ b/vendor/github.com/cilium/ebpf/syscalls.go
@@ -4,13 +4,25 @@ import (
 	"bytes"
 	"errors"
 	"fmt"
+	"os"
+	"runtime"
 
 	"github.com/cilium/ebpf/asm"
 	"github.com/cilium/ebpf/internal"
 	"github.com/cilium/ebpf/internal/sys"
+	"github.com/cilium/ebpf/internal/tracefs"
 	"github.com/cilium/ebpf/internal/unix"
 )
 
+var (
+	// pre-allocating these here since they may
+	// get called in hot code paths and cause
+	// unnecessary memory allocations
+	sysErrKeyNotExist  = sys.Error(ErrKeyNotExist, unix.ENOENT)
+	sysErrKeyExist     = sys.Error(ErrKeyExist, unix.EEXIST)
+	sysErrNotSupported = sys.Error(ErrNotSupported, sys.ENOTSUPP)
+)
+
 // invalidBPFObjNameChar returns true if char may not appear in
 // a BPF object name.
 func invalidBPFObjNameChar(char rune) bool {
@@ -136,15 +148,15 @@ func wrapMapError(err error) error {
 	}
 
 	if errors.Is(err, unix.ENOENT) {
-		return sys.Error(ErrKeyNotExist, unix.ENOENT)
+		return sysErrKeyNotExist
 	}
 
 	if errors.Is(err, unix.EEXIST) {
-		return sys.Error(ErrKeyExist, unix.EEXIST)
+		return sysErrKeyExist
 	}
 
 	if errors.Is(err, sys.ENOTSUPP) {
-		return sys.Error(ErrNotSupported, sys.ENOTSUPP)
+		return sysErrNotSupported
 	}
 
 	if errors.Is(err, unix.E2BIG) {
@@ -262,3 +274,32 @@ var haveBPFToBPFCalls = internal.NewFeatureTest("bpf2bpf calls", "4.16", func()
 	_ = fd.Close()
 	return nil
 })
+
+var haveSyscallWrapper = internal.NewFeatureTest("syscall wrapper", "4.17", func() error {
+	prefix := internal.PlatformPrefix()
+	if prefix == "" {
+		return fmt.Errorf("unable to find the platform prefix for (%s)", runtime.GOARCH)
+	}
+
+	args := tracefs.ProbeArgs{
+		Type:   tracefs.Kprobe,
+		Symbol: prefix + "sys_bpf",
+		Pid:    -1,
+	}
+
+	var err error
+	args.Group, err = tracefs.RandomGroup("ebpf_probe")
+	if err != nil {
+		return err
+	}
+
+	evt, err := tracefs.NewEvent(args)
+	if errors.Is(err, os.ErrNotExist) {
+		return internal.ErrNotSupported
+	}
+	if err != nil {
+		return err
+	}
+
+	return evt.Close()
+})
diff --git a/vendor/golang.org/x/exp/LICENSE b/vendor/golang.org/x/exp/LICENSE
new file mode 100644
index 00000000..6a66aea5
--- /dev/null
+++ b/vendor/golang.org/x/exp/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/golang.org/x/exp/PATENTS b/vendor/golang.org/x/exp/PATENTS
new file mode 100644
index 00000000..73309904
--- /dev/null
+++ b/vendor/golang.org/x/exp/PATENTS
@@ -0,0 +1,22 @@
+Additional IP Rights Grant (Patents)
+
+"This implementation" means the copyrightable works distributed by
+Google as part of the Go project.
+
+Google hereby grants to You a perpetual, worldwide, non-exclusive,
+no-charge, royalty-free, irrevocable (except as stated in this section)
+patent license to make, have made, use, offer to sell, sell, import,
+transfer and otherwise run, modify and propagate the contents of this
+implementation of Go, where such license applies only to those patent
+claims, both currently owned or controlled by Google and acquired in
+the future, licensable by Google that are necessarily infringed by this
+implementation of Go.  This grant does not include claims that would be
+infringed only as a consequence of further modification of this
+implementation.  If you or your agent or exclusive licensee institute or
+order or agree to the institution of patent litigation against any
+entity (including a cross-claim or counterclaim in a lawsuit) alleging
+that this implementation of Go or any code incorporated within this
+implementation of Go constitutes direct or contributory patent
+infringement, or inducement of patent infringement, then any patent
+rights granted to you under this License for this implementation of Go
+shall terminate as of the date such litigation is filed.
diff --git a/vendor/golang.org/x/exp/constraints/constraints.go b/vendor/golang.org/x/exp/constraints/constraints.go
new file mode 100644
index 00000000..2c033dff
--- /dev/null
+++ b/vendor/golang.org/x/exp/constraints/constraints.go
@@ -0,0 +1,50 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package constraints defines a set of useful constraints to be used
+// with type parameters.
+package constraints
+
+// Signed is a constraint that permits any signed integer type.
+// If future releases of Go add new predeclared signed integer types,
+// this constraint will be modified to include them.
+type Signed interface {
+	~int | ~int8 | ~int16 | ~int32 | ~int64
+}
+
+// Unsigned is a constraint that permits any unsigned integer type.
+// If future releases of Go add new predeclared unsigned integer types,
+// this constraint will be modified to include them.
+type Unsigned interface {
+	~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr
+}
+
+// Integer is a constraint that permits any integer type.
+// If future releases of Go add new predeclared integer types,
+// this constraint will be modified to include them.
+type Integer interface {
+	Signed | Unsigned
+}
+
+// Float is a constraint that permits any floating-point type.
+// If future releases of Go add new predeclared floating-point types,
+// this constraint will be modified to include them.
+type Float interface {
+	~float32 | ~float64
+}
+
+// Complex is a constraint that permits any complex numeric type.
+// If future releases of Go add new predeclared complex numeric types,
+// this constraint will be modified to include them.
+type Complex interface {
+	~complex64 | ~complex128
+}
+
+// Ordered is a constraint that permits any ordered type: any type
+// that supports the operators < <= >= >.
+// If future releases of Go add new ordered types,
+// this constraint will be modified to include them.
+type Ordered interface {
+	Integer | Float | ~string
+}
diff --git a/vendor/golang.org/x/exp/maps/maps.go b/vendor/golang.org/x/exp/maps/maps.go
new file mode 100644
index 00000000..ecc0dabb
--- /dev/null
+++ b/vendor/golang.org/x/exp/maps/maps.go
@@ -0,0 +1,94 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package maps defines various functions useful with maps of any type.
+package maps
+
+// Keys returns the keys of the map m.
+// The keys will be in an indeterminate order.
+func Keys[M ~map[K]V, K comparable, V any](m M) []K {
+	r := make([]K, 0, len(m))
+	for k := range m {
+		r = append(r, k)
+	}
+	return r
+}
+
+// Values returns the values of the map m.
+// The values will be in an indeterminate order.
+func Values[M ~map[K]V, K comparable, V any](m M) []V {
+	r := make([]V, 0, len(m))
+	for _, v := range m {
+		r = append(r, v)
+	}
+	return r
+}
+
+// Equal reports whether two maps contain the same key/value pairs.
+// Values are compared using ==.
+func Equal[M1, M2 ~map[K]V, K, V comparable](m1 M1, m2 M2) bool {
+	if len(m1) != len(m2) {
+		return false
+	}
+	for k, v1 := range m1 {
+		if v2, ok := m2[k]; !ok || v1 != v2 {
+			return false
+		}
+	}
+	return true
+}
+
+// EqualFunc is like Equal, but compares values using eq.
+// Keys are still compared with ==.
+func EqualFunc[M1 ~map[K]V1, M2 ~map[K]V2, K comparable, V1, V2 any](m1 M1, m2 M2, eq func(V1, V2) bool) bool {
+	if len(m1) != len(m2) {
+		return false
+	}
+	for k, v1 := range m1 {
+		if v2, ok := m2[k]; !ok || !eq(v1, v2) {
+			return false
+		}
+	}
+	return true
+}
+
+// Clear removes all entries from m, leaving it empty.
+func Clear[M ~map[K]V, K comparable, V any](m M) {
+	for k := range m {
+		delete(m, k)
+	}
+}
+
+// Clone returns a copy of m.  This is a shallow clone:
+// the new keys and values are set using ordinary assignment.
+func Clone[M ~map[K]V, K comparable, V any](m M) M {
+	// Preserve nil in case it matters.
+	if m == nil {
+		return nil
+	}
+	r := make(M, len(m))
+	for k, v := range m {
+		r[k] = v
+	}
+	return r
+}
+
+// Copy copies all key/value pairs in src adding them to dst.
+// When a key in src is already present in dst,
+// the value in dst will be overwritten by the value associated
+// with the key in src.
+func Copy[M1 ~map[K]V, M2 ~map[K]V, K comparable, V any](dst M1, src M2) {
+	for k, v := range src {
+		dst[k] = v
+	}
+}
+
+// DeleteFunc deletes any key/value pairs from m for which del returns true.
+func DeleteFunc[M ~map[K]V, K comparable, V any](m M, del func(K, V) bool) {
+	for k, v := range m {
+		if del(k, v) {
+			delete(m, k)
+		}
+	}
+}
diff --git a/vendor/golang.org/x/exp/slices/slices.go b/vendor/golang.org/x/exp/slices/slices.go
new file mode 100644
index 00000000..cff0cd49
--- /dev/null
+++ b/vendor/golang.org/x/exp/slices/slices.go
@@ -0,0 +1,258 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package slices defines various functions useful with slices of any type.
+// Unless otherwise specified, these functions all apply to the elements
+// of a slice at index 0 <= i < len(s).
+//
+// Note that the less function in IsSortedFunc, SortFunc, SortStableFunc requires a
+// strict weak ordering (https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings),
+// or the sorting may fail to sort correctly. A common case is when sorting slices of
+// floating-point numbers containing NaN values.
+package slices
+
+import "golang.org/x/exp/constraints"
+
+// Equal reports whether two slices are equal: the same length and all
+// elements equal. If the lengths are different, Equal returns false.
+// Otherwise, the elements are compared in increasing index order, and the
+// comparison stops at the first unequal pair.
+// Floating point NaNs are not considered equal.
+func Equal[E comparable](s1, s2 []E) bool {
+	if len(s1) != len(s2) {
+		return false
+	}
+	for i := range s1 {
+		if s1[i] != s2[i] {
+			return false
+		}
+	}
+	return true
+}
+
+// EqualFunc reports whether two slices are equal using a comparison
+// function on each pair of elements. If the lengths are different,
+// EqualFunc returns false. Otherwise, the elements are compared in
+// increasing index order, and the comparison stops at the first index
+// for which eq returns false.
+func EqualFunc[E1, E2 any](s1 []E1, s2 []E2, eq func(E1, E2) bool) bool {
+	if len(s1) != len(s2) {
+		return false
+	}
+	for i, v1 := range s1 {
+		v2 := s2[i]
+		if !eq(v1, v2) {
+			return false
+		}
+	}
+	return true
+}
+
+// Compare compares the elements of s1 and s2.
+// The elements are compared sequentially, starting at index 0,
+// until one element is not equal to the other.
+// The result of comparing the first non-matching elements is returned.
+// If both slices are equal until one of them ends, the shorter slice is
+// considered less than the longer one.
+// The result is 0 if s1 == s2, -1 if s1 < s2, and +1 if s1 > s2.
+// Comparisons involving floating point NaNs are ignored.
+func Compare[E constraints.Ordered](s1, s2 []E) int {
+	s2len := len(s2)
+	for i, v1 := range s1 {
+		if i >= s2len {
+			return +1
+		}
+		v2 := s2[i]
+		switch {
+		case v1 < v2:
+			return -1
+		case v1 > v2:
+			return +1
+		}
+	}
+	if len(s1) < s2len {
+		return -1
+	}
+	return 0
+}
+
+// CompareFunc is like Compare but uses a comparison function
+// on each pair of elements. The elements are compared in increasing
+// index order, and the comparisons stop after the first time cmp
+// returns non-zero.
+// The result is the first non-zero result of cmp; if cmp always
+// returns 0 the result is 0 if len(s1) == len(s2), -1 if len(s1) < len(s2),
+// and +1 if len(s1) > len(s2).
+func CompareFunc[E1, E2 any](s1 []E1, s2 []E2, cmp func(E1, E2) int) int {
+	s2len := len(s2)
+	for i, v1 := range s1 {
+		if i >= s2len {
+			return +1
+		}
+		v2 := s2[i]
+		if c := cmp(v1, v2); c != 0 {
+			return c
+		}
+	}
+	if len(s1) < s2len {
+		return -1
+	}
+	return 0
+}
+
+// Index returns the index of the first occurrence of v in s,
+// or -1 if not present.
+func Index[E comparable](s []E, v E) int {
+	for i, vs := range s {
+		if v == vs {
+			return i
+		}
+	}
+	return -1
+}
+
+// IndexFunc returns the first index i satisfying f(s[i]),
+// or -1 if none do.
+func IndexFunc[E any](s []E, f func(E) bool) int {
+	for i, v := range s {
+		if f(v) {
+			return i
+		}
+	}
+	return -1
+}
+
+// Contains reports whether v is present in s.
+func Contains[E comparable](s []E, v E) bool {
+	return Index(s, v) >= 0
+}
+
+// ContainsFunc reports whether at least one
+// element e of s satisfies f(e).
+func ContainsFunc[E any](s []E, f func(E) bool) bool {
+	return IndexFunc(s, f) >= 0
+}
+
+// Insert inserts the values v... into s at index i,
+// returning the modified slice.
+// In the returned slice r, r[i] == v[0].
+// Insert panics if i is out of range.
+// This function is O(len(s) + len(v)).
+func Insert[S ~[]E, E any](s S, i int, v ...E) S {
+	tot := len(s) + len(v)
+	if tot <= cap(s) {
+		s2 := s[:tot]
+		copy(s2[i+len(v):], s[i:])
+		copy(s2[i:], v)
+		return s2
+	}
+	s2 := make(S, tot)
+	copy(s2, s[:i])
+	copy(s2[i:], v)
+	copy(s2[i+len(v):], s[i:])
+	return s2
+}
+
+// Delete removes the elements s[i:j] from s, returning the modified slice.
+// Delete panics if s[i:j] is not a valid slice of s.
+// Delete modifies the contents of the slice s; it does not create a new slice.
+// Delete is O(len(s)-j), so if many items must be deleted, it is better to
+// make a single call deleting them all together than to delete one at a time.
+// Delete might not modify the elements s[len(s)-(j-i):len(s)]. If those
+// elements contain pointers you might consider zeroing those elements so that
+// objects they reference can be garbage collected.
+func Delete[S ~[]E, E any](s S, i, j int) S {
+	_ = s[i:j] // bounds check
+
+	return append(s[:i], s[j:]...)
+}
+
+// Replace replaces the elements s[i:j] by the given v, and returns the
+// modified slice. Replace panics if s[i:j] is not a valid slice of s.
+func Replace[S ~[]E, E any](s S, i, j int, v ...E) S {
+	_ = s[i:j] // verify that i:j is a valid subslice
+	tot := len(s[:i]) + len(v) + len(s[j:])
+	if tot <= cap(s) {
+		s2 := s[:tot]
+		copy(s2[i+len(v):], s[j:])
+		copy(s2[i:], v)
+		return s2
+	}
+	s2 := make(S, tot)
+	copy(s2, s[:i])
+	copy(s2[i:], v)
+	copy(s2[i+len(v):], s[j:])
+	return s2
+}
+
+// Clone returns a copy of the slice.
+// The elements are copied using assignment, so this is a shallow clone.
+func Clone[S ~[]E, E any](s S) S {
+	// Preserve nil in case it matters.
+	if s == nil {
+		return nil
+	}
+	return append(S([]E{}), s...)
+}
+
+// Compact replaces consecutive runs of equal elements with a single copy.
+// This is like the uniq command found on Unix.
+// Compact modifies the contents of the slice s; it does not create a new slice.
+// When Compact discards m elements in total, it might not modify the elements
+// s[len(s)-m:len(s)]. If those elements contain pointers you might consider
+// zeroing those elements so that objects they reference can be garbage collected.
+func Compact[S ~[]E, E comparable](s S) S {
+	if len(s) < 2 {
+		return s
+	}
+	i := 1
+	last := s[0]
+	for _, v := range s[1:] {
+		if v != last {
+			s[i] = v
+			i++
+			last = v
+		}
+	}
+	return s[:i]
+}
+
+// CompactFunc is like Compact but uses a comparison function.
+func CompactFunc[S ~[]E, E any](s S, eq func(E, E) bool) S {
+	if len(s) < 2 {
+		return s
+	}
+	i := 1
+	last := s[0]
+	for _, v := range s[1:] {
+		if !eq(v, last) {
+			s[i] = v
+			i++
+			last = v
+		}
+	}
+	return s[:i]
+}
+
+// Grow increases the slice's capacity, if necessary, to guarantee space for
+// another n elements. After Grow(n), at least n elements can be appended
+// to the slice without another allocation. If n is negative or too large to
+// allocate the memory, Grow panics.
+func Grow[S ~[]E, E any](s S, n int) S {
+	if n < 0 {
+		panic("cannot be negative")
+	}
+	if n -= cap(s) - len(s); n > 0 {
+		// TODO(https://go.dev/issue/53888): Make using []E instead of S
+		// to workaround a compiler bug where the runtime.growslice optimization
+		// does not take effect. Revert when the compiler is fixed.
+		s = append([]E(s)[:cap(s)], make([]E, n)...)[:len(s)]
+	}
+	return s
+}
+
+// Clip removes unused capacity from the slice, returning s[:len(s):len(s)].
+func Clip[S ~[]E, E any](s S) S {
+	return s[:len(s):len(s)]
+}
diff --git a/vendor/golang.org/x/exp/slices/sort.go b/vendor/golang.org/x/exp/slices/sort.go
new file mode 100644
index 00000000..f14f40da
--- /dev/null
+++ b/vendor/golang.org/x/exp/slices/sort.go
@@ -0,0 +1,126 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package slices
+
+import (
+	"math/bits"
+
+	"golang.org/x/exp/constraints"
+)
+
+// Sort sorts a slice of any ordered type in ascending order.
+// Sort may fail to sort correctly when sorting slices of floating-point
+// numbers containing Not-a-number (NaN) values.
+// Use slices.SortFunc(x, func(a, b float64) bool {return a < b || (math.IsNaN(a) && !math.IsNaN(b))})
+// instead if the input may contain NaNs.
+func Sort[E constraints.Ordered](x []E) {
+	n := len(x)
+	pdqsortOrdered(x, 0, n, bits.Len(uint(n)))
+}
+
+// SortFunc sorts the slice x in ascending order as determined by the less function.
+// This sort is not guaranteed to be stable.
+//
+// SortFunc requires that less is a strict weak ordering.
+// See https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings.
+func SortFunc[E any](x []E, less func(a, b E) bool) {
+	n := len(x)
+	pdqsortLessFunc(x, 0, n, bits.Len(uint(n)), less)
+}
+
+// SortStableFunc sorts the slice x while keeping the original order of equal
+// elements, using less to compare elements.
+func SortStableFunc[E any](x []E, less func(a, b E) bool) {
+	stableLessFunc(x, len(x), less)
+}
+
+// IsSorted reports whether x is sorted in ascending order.
+func IsSorted[E constraints.Ordered](x []E) bool {
+	for i := len(x) - 1; i > 0; i-- {
+		if x[i] < x[i-1] {
+			return false
+		}
+	}
+	return true
+}
+
+// IsSortedFunc reports whether x is sorted in ascending order, with less as the
+// comparison function.
+func IsSortedFunc[E any](x []E, less func(a, b E) bool) bool {
+	for i := len(x) - 1; i > 0; i-- {
+		if less(x[i], x[i-1]) {
+			return false
+		}
+	}
+	return true
+}
+
+// BinarySearch searches for target in a sorted slice and returns the position
+// where target is found, or the position where target would appear in the
+// sort order; it also returns a bool saying whether the target is really found
+// in the slice. The slice must be sorted in increasing order.
+func BinarySearch[E constraints.Ordered](x []E, target E) (int, bool) {
+	// Inlining is faster than calling BinarySearchFunc with a lambda.
+	n := len(x)
+	// Define x[-1] < target and x[n] >= target.
+	// Invariant: x[i-1] < target, x[j] >= target.
+	i, j := 0, n
+	for i < j {
+		h := int(uint(i+j) >> 1) // avoid overflow when computing h
+		// i ≤ h < j
+		if x[h] < target {
+			i = h + 1 // preserves x[i-1] < target
+		} else {
+			j = h // preserves x[j] >= target
+		}
+	}
+	// i == j, x[i-1] < target, and x[j] (= x[i]) >= target  =>  answer is i.
+	return i, i < n && x[i] == target
+}
+
+// BinarySearchFunc works like BinarySearch, but uses a custom comparison
+// function. The slice must be sorted in increasing order, where "increasing" is
+// defined by cmp. cmp(a, b) is expected to return an integer comparing the two
+// parameters: 0 if a == b, a negative number if a < b and a positive number if
+// a > b.
+func BinarySearchFunc[E, T any](x []E, target T, cmp func(E, T) int) (int, bool) {
+	n := len(x)
+	// Define cmp(x[-1], target) < 0 and cmp(x[n], target) >= 0 .
+	// Invariant: cmp(x[i - 1], target) < 0, cmp(x[j], target) >= 0.
+	i, j := 0, n
+	for i < j {
+		h := int(uint(i+j) >> 1) // avoid overflow when computing h
+		// i ≤ h < j
+		if cmp(x[h], target) < 0 {
+			i = h + 1 // preserves cmp(x[i - 1], target) < 0
+		} else {
+			j = h // preserves cmp(x[j], target) >= 0
+		}
+	}
+	// i == j, cmp(x[i-1], target) < 0, and cmp(x[j], target) (= cmp(x[i], target)) >= 0  =>  answer is i.
+	return i, i < n && cmp(x[i], target) == 0
+}
+
+type sortedHint int // hint for pdqsort when choosing the pivot
+
+const (
+	unknownHint sortedHint = iota
+	increasingHint
+	decreasingHint
+)
+
+// xorshift paper: https://www.jstatsoft.org/article/view/v008i14/xorshift.pdf
+type xorshift uint64
+
+func (r *xorshift) Next() uint64 {
+	*r ^= *r << 13
+	*r ^= *r >> 17
+	*r ^= *r << 5
+	return uint64(*r)
+}
+
+func nextPowerOfTwo(length int) uint {
+	return 1 << bits.Len(uint(length))
+}
diff --git a/vendor/golang.org/x/exp/slices/zsortfunc.go b/vendor/golang.org/x/exp/slices/zsortfunc.go
new file mode 100644
index 00000000..2a632476
--- /dev/null
+++ b/vendor/golang.org/x/exp/slices/zsortfunc.go
@@ -0,0 +1,479 @@
+// Code generated by gen_sort_variants.go; DO NOT EDIT.
+
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package slices
+
+// insertionSortLessFunc sorts data[a:b] using insertion sort.
+func insertionSortLessFunc[E any](data []E, a, b int, less func(a, b E) bool) {
+	for i := a + 1; i < b; i++ {
+		for j := i; j > a && less(data[j], data[j-1]); j-- {
+			data[j], data[j-1] = data[j-1], data[j]
+		}
+	}
+}
+
+// siftDownLessFunc implements the heap property on data[lo:hi].
+// first is an offset into the array where the root of the heap lies.
+func siftDownLessFunc[E any](data []E, lo, hi, first int, less func(a, b E) bool) {
+	root := lo
+	for {
+		child := 2*root + 1
+		if child >= hi {
+			break
+		}
+		if child+1 < hi && less(data[first+child], data[first+child+1]) {
+			child++
+		}
+		if !less(data[first+root], data[first+child]) {
+			return
+		}
+		data[first+root], data[first+child] = data[first+child], data[first+root]
+		root = child
+	}
+}
+
+func heapSortLessFunc[E any](data []E, a, b int, less func(a, b E) bool) {
+	first := a
+	lo := 0
+	hi := b - a
+
+	// Build heap with greatest element at top.
+	for i := (hi - 1) / 2; i >= 0; i-- {
+		siftDownLessFunc(data, i, hi, first, less)
+	}
+
+	// Pop elements, largest first, into end of data.
+	for i := hi - 1; i >= 0; i-- {
+		data[first], data[first+i] = data[first+i], data[first]
+		siftDownLessFunc(data, lo, i, first, less)
+	}
+}
+
+// pdqsortLessFunc sorts data[a:b].
+// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort.
+// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf
+// C++ implementation: https://github.com/orlp/pdqsort
+// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/
+// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort.
+func pdqsortLessFunc[E any](data []E, a, b, limit int, less func(a, b E) bool) {
+	const maxInsertion = 12
+
+	var (
+		wasBalanced    = true // whether the last partitioning was reasonably balanced
+		wasPartitioned = true // whether the slice was already partitioned
+	)
+
+	for {
+		length := b - a
+
+		if length <= maxInsertion {
+			insertionSortLessFunc(data, a, b, less)
+			return
+		}
+
+		// Fall back to heapsort if too many bad choices were made.
+		if limit == 0 {
+			heapSortLessFunc(data, a, b, less)
+			return
+		}
+
+		// If the last partitioning was imbalanced, we need to breaking patterns.
+		if !wasBalanced {
+			breakPatternsLessFunc(data, a, b, less)
+			limit--
+		}
+
+		pivot, hint := choosePivotLessFunc(data, a, b, less)
+		if hint == decreasingHint {
+			reverseRangeLessFunc(data, a, b, less)
+			// The chosen pivot was pivot-a elements after the start of the array.
+			// After reversing it is pivot-a elements before the end of the array.
+			// The idea came from Rust's implementation.
+			pivot = (b - 1) - (pivot - a)
+			hint = increasingHint
+		}
+
+		// The slice is likely already sorted.
+		if wasBalanced && wasPartitioned && hint == increasingHint {
+			if partialInsertionSortLessFunc(data, a, b, less) {
+				return
+			}
+		}
+
+		// Probably the slice contains many duplicate elements, partition the slice into
+		// elements equal to and elements greater than the pivot.
+		if a > 0 && !less(data[a-1], data[pivot]) {
+			mid := partitionEqualLessFunc(data, a, b, pivot, less)
+			a = mid
+			continue
+		}
+
+		mid, alreadyPartitioned := partitionLessFunc(data, a, b, pivot, less)
+		wasPartitioned = alreadyPartitioned
+
+		leftLen, rightLen := mid-a, b-mid
+		balanceThreshold := length / 8
+		if leftLen < rightLen {
+			wasBalanced = leftLen >= balanceThreshold
+			pdqsortLessFunc(data, a, mid, limit, less)
+			a = mid + 1
+		} else {
+			wasBalanced = rightLen >= balanceThreshold
+			pdqsortLessFunc(data, mid+1, b, limit, less)
+			b = mid
+		}
+	}
+}
+
+// partitionLessFunc does one quicksort partition.
+// Let p = data[pivot]
+// Moves elements in data[a:b] around, so that data[i]<p and data[j]>=p for i<newpivot and j>newpivot.
+// On return, data[newpivot] = p
+func partitionLessFunc[E any](data []E, a, b, pivot int, less func(a, b E) bool) (newpivot int, alreadyPartitioned bool) {
+	data[a], data[pivot] = data[pivot], data[a]
+	i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
+
+	for i <= j && less(data[i], data[a]) {
+		i++
+	}
+	for i <= j && !less(data[j], data[a]) {
+		j--
+	}
+	if i > j {
+		data[j], data[a] = data[a], data[j]
+		return j, true
+	}
+	data[i], data[j] = data[j], data[i]
+	i++
+	j--
+
+	for {
+		for i <= j && less(data[i], data[a]) {
+			i++
+		}
+		for i <= j && !less(data[j], data[a]) {
+			j--
+		}
+		if i > j {
+			break
+		}
+		data[i], data[j] = data[j], data[i]
+		i++
+		j--
+	}
+	data[j], data[a] = data[a], data[j]
+	return j, false
+}
+
+// partitionEqualLessFunc partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot].
+// It assumed that data[a:b] does not contain elements smaller than the data[pivot].
+func partitionEqualLessFunc[E any](data []E, a, b, pivot int, less func(a, b E) bool) (newpivot int) {
+	data[a], data[pivot] = data[pivot], data[a]
+	i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
+
+	for {
+		for i <= j && !less(data[a], data[i]) {
+			i++
+		}
+		for i <= j && less(data[a], data[j]) {
+			j--
+		}
+		if i > j {
+			break
+		}
+		data[i], data[j] = data[j], data[i]
+		i++
+		j--
+	}
+	return i
+}
+
+// partialInsertionSortLessFunc partially sorts a slice, returns true if the slice is sorted at the end.
+func partialInsertionSortLessFunc[E any](data []E, a, b int, less func(a, b E) bool) bool {
+	const (
+		maxSteps         = 5  // maximum number of adjacent out-of-order pairs that will get shifted
+		shortestShifting = 50 // don't shift any elements on short arrays
+	)
+	i := a + 1
+	for j := 0; j < maxSteps; j++ {
+		for i < b && !less(data[i], data[i-1]) {
+			i++
+		}
+
+		if i == b {
+			return true
+		}
+
+		if b-a < shortestShifting {
+			return false
+		}
+
+		data[i], data[i-1] = data[i-1], data[i]
+
+		// Shift the smaller one to the left.
+		if i-a >= 2 {
+			for j := i - 1; j >= 1; j-- {
+				if !less(data[j], data[j-1]) {
+					break
+				}
+				data[j], data[j-1] = data[j-1], data[j]
+			}
+		}
+		// Shift the greater one to the right.
+		if b-i >= 2 {
+			for j := i + 1; j < b; j++ {
+				if !less(data[j], data[j-1]) {
+					break
+				}
+				data[j], data[j-1] = data[j-1], data[j]
+			}
+		}
+	}
+	return false
+}
+
+// breakPatternsLessFunc scatters some elements around in an attempt to break some patterns
+// that might cause imbalanced partitions in quicksort.
+func breakPatternsLessFunc[E any](data []E, a, b int, less func(a, b E) bool) {
+	length := b - a
+	if length >= 8 {
+		random := xorshift(length)
+		modulus := nextPowerOfTwo(length)
+
+		for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ {
+			other := int(uint(random.Next()) & (modulus - 1))
+			if other >= length {
+				other -= length
+			}
+			data[idx], data[a+other] = data[a+other], data[idx]
+		}
+	}
+}
+
+// choosePivotLessFunc chooses a pivot in data[a:b].
+//
+// [0,8): chooses a static pivot.
+// [8,shortestNinther): uses the simple median-of-three method.
+// [shortestNinther,∞): uses the Tukey ninther method.
+func choosePivotLessFunc[E any](data []E, a, b int, less func(a, b E) bool) (pivot int, hint sortedHint) {
+	const (
+		shortestNinther = 50
+		maxSwaps        = 4 * 3
+	)
+
+	l := b - a
+
+	var (
+		swaps int
+		i     = a + l/4*1
+		j     = a + l/4*2
+		k     = a + l/4*3
+	)
+
+	if l >= 8 {
+		if l >= shortestNinther {
+			// Tukey ninther method, the idea came from Rust's implementation.
+			i = medianAdjacentLessFunc(data, i, &swaps, less)
+			j = medianAdjacentLessFunc(data, j, &swaps, less)
+			k = medianAdjacentLessFunc(data, k, &swaps, less)
+		}
+		// Find the median among i, j, k and stores it into j.
+		j = medianLessFunc(data, i, j, k, &swaps, less)
+	}
+
+	switch swaps {
+	case 0:
+		return j, increasingHint
+	case maxSwaps:
+		return j, decreasingHint
+	default:
+		return j, unknownHint
+	}
+}
+
+// order2LessFunc returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a.
+func order2LessFunc[E any](data []E, a, b int, swaps *int, less func(a, b E) bool) (int, int) {
+	if less(data[b], data[a]) {
+		*swaps++
+		return b, a
+	}
+	return a, b
+}
+
+// medianLessFunc returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c.
+func medianLessFunc[E any](data []E, a, b, c int, swaps *int, less func(a, b E) bool) int {
+	a, b = order2LessFunc(data, a, b, swaps, less)
+	b, c = order2LessFunc(data, b, c, swaps, less)
+	a, b = order2LessFunc(data, a, b, swaps, less)
+	return b
+}
+
+// medianAdjacentLessFunc finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a.
+func medianAdjacentLessFunc[E any](data []E, a int, swaps *int, less func(a, b E) bool) int {
+	return medianLessFunc(data, a-1, a, a+1, swaps, less)
+}
+
+func reverseRangeLessFunc[E any](data []E, a, b int, less func(a, b E) bool) {
+	i := a
+	j := b - 1
+	for i < j {
+		data[i], data[j] = data[j], data[i]
+		i++
+		j--
+	}
+}
+
+func swapRangeLessFunc[E any](data []E, a, b, n int, less func(a, b E) bool) {
+	for i := 0; i < n; i++ {
+		data[a+i], data[b+i] = data[b+i], data[a+i]
+	}
+}
+
+func stableLessFunc[E any](data []E, n int, less func(a, b E) bool) {
+	blockSize := 20 // must be > 0
+	a, b := 0, blockSize
+	for b <= n {
+		insertionSortLessFunc(data, a, b, less)
+		a = b
+		b += blockSize
+	}
+	insertionSortLessFunc(data, a, n, less)
+
+	for blockSize < n {
+		a, b = 0, 2*blockSize
+		for b <= n {
+			symMergeLessFunc(data, a, a+blockSize, b, less)
+			a = b
+			b += 2 * blockSize
+		}
+		if m := a + blockSize; m < n {
+			symMergeLessFunc(data, a, m, n, less)
+		}
+		blockSize *= 2
+	}
+}
+
+// symMergeLessFunc merges the two sorted subsequences data[a:m] and data[m:b] using
+// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum
+// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz
+// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in
+// Computer Science, pages 714-723. Springer, 2004.
+//
+// Let M = m-a and N = b-n. Wolog M < N.
+// The recursion depth is bound by ceil(log(N+M)).
+// The algorithm needs O(M*log(N/M + 1)) calls to data.Less.
+// The algorithm needs O((M+N)*log(M)) calls to data.Swap.
+//
+// The paper gives O((M+N)*log(M)) as the number of assignments assuming a
+// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation
+// in the paper carries through for Swap operations, especially as the block
+// swapping rotate uses only O(M+N) Swaps.
+//
+// symMerge assumes non-degenerate arguments: a < m && m < b.
+// Having the caller check this condition eliminates many leaf recursion calls,
+// which improves performance.
+func symMergeLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) {
+	// Avoid unnecessary recursions of symMerge
+	// by direct insertion of data[a] into data[m:b]
+	// if data[a:m] only contains one element.
+	if m-a == 1 {
+		// Use binary search to find the lowest index i
+		// such that data[i] >= data[a] for m <= i < b.
+		// Exit the search loop with i == b in case no such index exists.
+		i := m
+		j := b
+		for i < j {
+			h := int(uint(i+j) >> 1)
+			if less(data[h], data[a]) {
+				i = h + 1
+			} else {
+				j = h
+			}
+		}
+		// Swap values until data[a] reaches the position before i.
+		for k := a; k < i-1; k++ {
+			data[k], data[k+1] = data[k+1], data[k]
+		}
+		return
+	}
+
+	// Avoid unnecessary recursions of symMerge
+	// by direct insertion of data[m] into data[a:m]
+	// if data[m:b] only contains one element.
+	if b-m == 1 {
+		// Use binary search to find the lowest index i
+		// such that data[i] > data[m] for a <= i < m.
+		// Exit the search loop with i == m in case no such index exists.
+		i := a
+		j := m
+		for i < j {
+			h := int(uint(i+j) >> 1)
+			if !less(data[m], data[h]) {
+				i = h + 1
+			} else {
+				j = h
+			}
+		}
+		// Swap values until data[m] reaches the position i.
+		for k := m; k > i; k-- {
+			data[k], data[k-1] = data[k-1], data[k]
+		}
+		return
+	}
+
+	mid := int(uint(a+b) >> 1)
+	n := mid + m
+	var start, r int
+	if m > mid {
+		start = n - b
+		r = mid
+	} else {
+		start = a
+		r = m
+	}
+	p := n - 1
+
+	for start < r {
+		c := int(uint(start+r) >> 1)
+		if !less(data[p-c], data[c]) {
+			start = c + 1
+		} else {
+			r = c
+		}
+	}
+
+	end := n - start
+	if start < m && m < end {
+		rotateLessFunc(data, start, m, end, less)
+	}
+	if a < start && start < mid {
+		symMergeLessFunc(data, a, start, mid, less)
+	}
+	if mid < end && end < b {
+		symMergeLessFunc(data, mid, end, b, less)
+	}
+}
+
+// rotateLessFunc rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data:
+// Data of the form 'x u v y' is changed to 'x v u y'.
+// rotate performs at most b-a many calls to data.Swap,
+// and it assumes non-degenerate arguments: a < m && m < b.
+func rotateLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) {
+	i := m - a
+	j := b - m
+
+	for i != j {
+		if i > j {
+			swapRangeLessFunc(data, m-i, m, j, less)
+			i -= j
+		} else {
+			swapRangeLessFunc(data, m-i, m+j-i, i, less)
+			j -= i
+		}
+	}
+	// i == j
+	swapRangeLessFunc(data, m-i, m, i, less)
+}
diff --git a/vendor/golang.org/x/exp/slices/zsortordered.go b/vendor/golang.org/x/exp/slices/zsortordered.go
new file mode 100644
index 00000000..efaa1c8b
--- /dev/null
+++ b/vendor/golang.org/x/exp/slices/zsortordered.go
@@ -0,0 +1,481 @@
+// Code generated by gen_sort_variants.go; DO NOT EDIT.
+
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package slices
+
+import "golang.org/x/exp/constraints"
+
+// insertionSortOrdered sorts data[a:b] using insertion sort.
+func insertionSortOrdered[E constraints.Ordered](data []E, a, b int) {
+	for i := a + 1; i < b; i++ {
+		for j := i; j > a && (data[j] < data[j-1]); j-- {
+			data[j], data[j-1] = data[j-1], data[j]
+		}
+	}
+}
+
+// siftDownOrdered implements the heap property on data[lo:hi].
+// first is an offset into the array where the root of the heap lies.
+func siftDownOrdered[E constraints.Ordered](data []E, lo, hi, first int) {
+	root := lo
+	for {
+		child := 2*root + 1
+		if child >= hi {
+			break
+		}
+		if child+1 < hi && (data[first+child] < data[first+child+1]) {
+			child++
+		}
+		if !(data[first+root] < data[first+child]) {
+			return
+		}
+		data[first+root], data[first+child] = data[first+child], data[first+root]
+		root = child
+	}
+}
+
+func heapSortOrdered[E constraints.Ordered](data []E, a, b int) {
+	first := a
+	lo := 0
+	hi := b - a
+
+	// Build heap with greatest element at top.
+	for i := (hi - 1) / 2; i >= 0; i-- {
+		siftDownOrdered(data, i, hi, first)
+	}
+
+	// Pop elements, largest first, into end of data.
+	for i := hi - 1; i >= 0; i-- {
+		data[first], data[first+i] = data[first+i], data[first]
+		siftDownOrdered(data, lo, i, first)
+	}
+}
+
+// pdqsortOrdered sorts data[a:b].
+// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort.
+// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf
+// C++ implementation: https://github.com/orlp/pdqsort
+// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/
+// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort.
+func pdqsortOrdered[E constraints.Ordered](data []E, a, b, limit int) {
+	const maxInsertion = 12
+
+	var (
+		wasBalanced    = true // whether the last partitioning was reasonably balanced
+		wasPartitioned = true // whether the slice was already partitioned
+	)
+
+	for {
+		length := b - a
+
+		if length <= maxInsertion {
+			insertionSortOrdered(data, a, b)
+			return
+		}
+
+		// Fall back to heapsort if too many bad choices were made.
+		if limit == 0 {
+			heapSortOrdered(data, a, b)
+			return
+		}
+
+		// If the last partitioning was imbalanced, we need to breaking patterns.
+		if !wasBalanced {
+			breakPatternsOrdered(data, a, b)
+			limit--
+		}
+
+		pivot, hint := choosePivotOrdered(data, a, b)
+		if hint == decreasingHint {
+			reverseRangeOrdered(data, a, b)
+			// The chosen pivot was pivot-a elements after the start of the array.
+			// After reversing it is pivot-a elements before the end of the array.
+			// The idea came from Rust's implementation.
+			pivot = (b - 1) - (pivot - a)
+			hint = increasingHint
+		}
+
+		// The slice is likely already sorted.
+		if wasBalanced && wasPartitioned && hint == increasingHint {
+			if partialInsertionSortOrdered(data, a, b) {
+				return
+			}
+		}
+
+		// Probably the slice contains many duplicate elements, partition the slice into
+		// elements equal to and elements greater than the pivot.
+		if a > 0 && !(data[a-1] < data[pivot]) {
+			mid := partitionEqualOrdered(data, a, b, pivot)
+			a = mid
+			continue
+		}
+
+		mid, alreadyPartitioned := partitionOrdered(data, a, b, pivot)
+		wasPartitioned = alreadyPartitioned
+
+		leftLen, rightLen := mid-a, b-mid
+		balanceThreshold := length / 8
+		if leftLen < rightLen {
+			wasBalanced = leftLen >= balanceThreshold
+			pdqsortOrdered(data, a, mid, limit)
+			a = mid + 1
+		} else {
+			wasBalanced = rightLen >= balanceThreshold
+			pdqsortOrdered(data, mid+1, b, limit)
+			b = mid
+		}
+	}
+}
+
+// partitionOrdered does one quicksort partition.
+// Let p = data[pivot]
+// Moves elements in data[a:b] around, so that data[i]<p and data[j]>=p for i<newpivot and j>newpivot.
+// On return, data[newpivot] = p
+func partitionOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivot int, alreadyPartitioned bool) {
+	data[a], data[pivot] = data[pivot], data[a]
+	i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
+
+	for i <= j && (data[i] < data[a]) {
+		i++
+	}
+	for i <= j && !(data[j] < data[a]) {
+		j--
+	}
+	if i > j {
+		data[j], data[a] = data[a], data[j]
+		return j, true
+	}
+	data[i], data[j] = data[j], data[i]
+	i++
+	j--
+
+	for {
+		for i <= j && (data[i] < data[a]) {
+			i++
+		}
+		for i <= j && !(data[j] < data[a]) {
+			j--
+		}
+		if i > j {
+			break
+		}
+		data[i], data[j] = data[j], data[i]
+		i++
+		j--
+	}
+	data[j], data[a] = data[a], data[j]
+	return j, false
+}
+
+// partitionEqualOrdered partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot].
+// It assumed that data[a:b] does not contain elements smaller than the data[pivot].
+func partitionEqualOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivot int) {
+	data[a], data[pivot] = data[pivot], data[a]
+	i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
+
+	for {
+		for i <= j && !(data[a] < data[i]) {
+			i++
+		}
+		for i <= j && (data[a] < data[j]) {
+			j--
+		}
+		if i > j {
+			break
+		}
+		data[i], data[j] = data[j], data[i]
+		i++
+		j--
+	}
+	return i
+}
+
+// partialInsertionSortOrdered partially sorts a slice, returns true if the slice is sorted at the end.
+func partialInsertionSortOrdered[E constraints.Ordered](data []E, a, b int) bool {
+	const (
+		maxSteps         = 5  // maximum number of adjacent out-of-order pairs that will get shifted
+		shortestShifting = 50 // don't shift any elements on short arrays
+	)
+	i := a + 1
+	for j := 0; j < maxSteps; j++ {
+		for i < b && !(data[i] < data[i-1]) {
+			i++
+		}
+
+		if i == b {
+			return true
+		}
+
+		if b-a < shortestShifting {
+			return false
+		}
+
+		data[i], data[i-1] = data[i-1], data[i]
+
+		// Shift the smaller one to the left.
+		if i-a >= 2 {
+			for j := i - 1; j >= 1; j-- {
+				if !(data[j] < data[j-1]) {
+					break
+				}
+				data[j], data[j-1] = data[j-1], data[j]
+			}
+		}
+		// Shift the greater one to the right.
+		if b-i >= 2 {
+			for j := i + 1; j < b; j++ {
+				if !(data[j] < data[j-1]) {
+					break
+				}
+				data[j], data[j-1] = data[j-1], data[j]
+			}
+		}
+	}
+	return false
+}
+
+// breakPatternsOrdered scatters some elements around in an attempt to break some patterns
+// that might cause imbalanced partitions in quicksort.
+func breakPatternsOrdered[E constraints.Ordered](data []E, a, b int) {
+	length := b - a
+	if length >= 8 {
+		random := xorshift(length)
+		modulus := nextPowerOfTwo(length)
+
+		for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ {
+			other := int(uint(random.Next()) & (modulus - 1))
+			if other >= length {
+				other -= length
+			}
+			data[idx], data[a+other] = data[a+other], data[idx]
+		}
+	}
+}
+
+// choosePivotOrdered chooses a pivot in data[a:b].
+//
+// [0,8): chooses a static pivot.
+// [8,shortestNinther): uses the simple median-of-three method.
+// [shortestNinther,∞): uses the Tukey ninther method.
+func choosePivotOrdered[E constraints.Ordered](data []E, a, b int) (pivot int, hint sortedHint) {
+	const (
+		shortestNinther = 50
+		maxSwaps        = 4 * 3
+	)
+
+	l := b - a
+
+	var (
+		swaps int
+		i     = a + l/4*1
+		j     = a + l/4*2
+		k     = a + l/4*3
+	)
+
+	if l >= 8 {
+		if l >= shortestNinther {
+			// Tukey ninther method, the idea came from Rust's implementation.
+			i = medianAdjacentOrdered(data, i, &swaps)
+			j = medianAdjacentOrdered(data, j, &swaps)
+			k = medianAdjacentOrdered(data, k, &swaps)
+		}
+		// Find the median among i, j, k and stores it into j.
+		j = medianOrdered(data, i, j, k, &swaps)
+	}
+
+	switch swaps {
+	case 0:
+		return j, increasingHint
+	case maxSwaps:
+		return j, decreasingHint
+	default:
+		return j, unknownHint
+	}
+}
+
+// order2Ordered returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a.
+func order2Ordered[E constraints.Ordered](data []E, a, b int, swaps *int) (int, int) {
+	if data[b] < data[a] {
+		*swaps++
+		return b, a
+	}
+	return a, b
+}
+
+// medianOrdered returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c.
+func medianOrdered[E constraints.Ordered](data []E, a, b, c int, swaps *int) int {
+	a, b = order2Ordered(data, a, b, swaps)
+	b, c = order2Ordered(data, b, c, swaps)
+	a, b = order2Ordered(data, a, b, swaps)
+	return b
+}
+
+// medianAdjacentOrdered finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a.
+func medianAdjacentOrdered[E constraints.Ordered](data []E, a int, swaps *int) int {
+	return medianOrdered(data, a-1, a, a+1, swaps)
+}
+
+func reverseRangeOrdered[E constraints.Ordered](data []E, a, b int) {
+	i := a
+	j := b - 1
+	for i < j {
+		data[i], data[j] = data[j], data[i]
+		i++
+		j--
+	}
+}
+
+func swapRangeOrdered[E constraints.Ordered](data []E, a, b, n int) {
+	for i := 0; i < n; i++ {
+		data[a+i], data[b+i] = data[b+i], data[a+i]
+	}
+}
+
+func stableOrdered[E constraints.Ordered](data []E, n int) {
+	blockSize := 20 // must be > 0
+	a, b := 0, blockSize
+	for b <= n {
+		insertionSortOrdered(data, a, b)
+		a = b
+		b += blockSize
+	}
+	insertionSortOrdered(data, a, n)
+
+	for blockSize < n {
+		a, b = 0, 2*blockSize
+		for b <= n {
+			symMergeOrdered(data, a, a+blockSize, b)
+			a = b
+			b += 2 * blockSize
+		}
+		if m := a + blockSize; m < n {
+			symMergeOrdered(data, a, m, n)
+		}
+		blockSize *= 2
+	}
+}
+
+// symMergeOrdered merges the two sorted subsequences data[a:m] and data[m:b] using
+// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum
+// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz
+// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in
+// Computer Science, pages 714-723. Springer, 2004.
+//
+// Let M = m-a and N = b-n. Wolog M < N.
+// The recursion depth is bound by ceil(log(N+M)).
+// The algorithm needs O(M*log(N/M + 1)) calls to data.Less.
+// The algorithm needs O((M+N)*log(M)) calls to data.Swap.
+//
+// The paper gives O((M+N)*log(M)) as the number of assignments assuming a
+// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation
+// in the paper carries through for Swap operations, especially as the block
+// swapping rotate uses only O(M+N) Swaps.
+//
+// symMerge assumes non-degenerate arguments: a < m && m < b.
+// Having the caller check this condition eliminates many leaf recursion calls,
+// which improves performance.
+func symMergeOrdered[E constraints.Ordered](data []E, a, m, b int) {
+	// Avoid unnecessary recursions of symMerge
+	// by direct insertion of data[a] into data[m:b]
+	// if data[a:m] only contains one element.
+	if m-a == 1 {
+		// Use binary search to find the lowest index i
+		// such that data[i] >= data[a] for m <= i < b.
+		// Exit the search loop with i == b in case no such index exists.
+		i := m
+		j := b
+		for i < j {
+			h := int(uint(i+j) >> 1)
+			if data[h] < data[a] {
+				i = h + 1
+			} else {
+				j = h
+			}
+		}
+		// Swap values until data[a] reaches the position before i.
+		for k := a; k < i-1; k++ {
+			data[k], data[k+1] = data[k+1], data[k]
+		}
+		return
+	}
+
+	// Avoid unnecessary recursions of symMerge
+	// by direct insertion of data[m] into data[a:m]
+	// if data[m:b] only contains one element.
+	if b-m == 1 {
+		// Use binary search to find the lowest index i
+		// such that data[i] > data[m] for a <= i < m.
+		// Exit the search loop with i == m in case no such index exists.
+		i := a
+		j := m
+		for i < j {
+			h := int(uint(i+j) >> 1)
+			if !(data[m] < data[h]) {
+				i = h + 1
+			} else {
+				j = h
+			}
+		}
+		// Swap values until data[m] reaches the position i.
+		for k := m; k > i; k-- {
+			data[k], data[k-1] = data[k-1], data[k]
+		}
+		return
+	}
+
+	mid := int(uint(a+b) >> 1)
+	n := mid + m
+	var start, r int
+	if m > mid {
+		start = n - b
+		r = mid
+	} else {
+		start = a
+		r = m
+	}
+	p := n - 1
+
+	for start < r {
+		c := int(uint(start+r) >> 1)
+		if !(data[p-c] < data[c]) {
+			start = c + 1
+		} else {
+			r = c
+		}
+	}
+
+	end := n - start
+	if start < m && m < end {
+		rotateOrdered(data, start, m, end)
+	}
+	if a < start && start < mid {
+		symMergeOrdered(data, a, start, mid)
+	}
+	if mid < end && end < b {
+		symMergeOrdered(data, mid, end, b)
+	}
+}
+
+// rotateOrdered rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data:
+// Data of the form 'x u v y' is changed to 'x v u y'.
+// rotate performs at most b-a many calls to data.Swap,
+// and it assumes non-degenerate arguments: a < m && m < b.
+func rotateOrdered[E constraints.Ordered](data []E, a, m, b int) {
+	i := m - a
+	j := b - m
+
+	for i != j {
+		if i > j {
+			swapRangeOrdered(data, m-i, m, j)
+			i -= j
+		} else {
+			swapRangeOrdered(data, m-i, m+j-i, i)
+			j -= i
+		}
+	}
+	// i == j
+	swapRangeOrdered(data, m-i, m, i)
+}
diff --git a/vendor/golang.org/x/sys/unix/ioctl.go b/vendor/golang.org/x/sys/unix/ioctl.go
index 1c51b0ec..7ce8dd40 100644
--- a/vendor/golang.org/x/sys/unix/ioctl.go
+++ b/vendor/golang.org/x/sys/unix/ioctl.go
@@ -8,7 +8,6 @@
 package unix
 
 import (
-	"runtime"
 	"unsafe"
 )
 
@@ -27,7 +26,7 @@ func IoctlSetInt(fd int, req uint, value int) error {
 // passing the integer value directly.
 func IoctlSetPointerInt(fd int, req uint, value int) error {
 	v := int32(value)
-	return ioctl(fd, req, uintptr(unsafe.Pointer(&v)))
+	return ioctlPtr(fd, req, unsafe.Pointer(&v))
 }
 
 // IoctlSetWinsize performs an ioctl on fd with a *Winsize argument.
@@ -36,9 +35,7 @@ func IoctlSetPointerInt(fd int, req uint, value int) error {
 func IoctlSetWinsize(fd int, req uint, value *Winsize) error {
 	// TODO: if we get the chance, remove the req parameter and
 	// hardcode TIOCSWINSZ.
-	err := ioctl(fd, req, uintptr(unsafe.Pointer(value)))
-	runtime.KeepAlive(value)
-	return err
+	return ioctlPtr(fd, req, unsafe.Pointer(value))
 }
 
 // IoctlSetTermios performs an ioctl on fd with a *Termios.
@@ -46,9 +43,7 @@ func IoctlSetWinsize(fd int, req uint, value *Winsize) error {
 // The req value will usually be TCSETA or TIOCSETA.
 func IoctlSetTermios(fd int, req uint, value *Termios) error {
 	// TODO: if we get the chance, remove the req parameter.
-	err := ioctl(fd, req, uintptr(unsafe.Pointer(value)))
-	runtime.KeepAlive(value)
-	return err
+	return ioctlPtr(fd, req, unsafe.Pointer(value))
 }
 
 // IoctlGetInt performs an ioctl operation which gets an integer value
@@ -58,18 +53,18 @@ func IoctlSetTermios(fd int, req uint, value *Termios) error {
 // for those, IoctlRetInt should be used instead of this function.
 func IoctlGetInt(fd int, req uint) (int, error) {
 	var value int
-	err := ioctl(fd, req, uintptr(unsafe.Pointer(&value)))
+	err := ioctlPtr(fd, req, unsafe.Pointer(&value))
 	return value, err
 }
 
 func IoctlGetWinsize(fd int, req uint) (*Winsize, error) {
 	var value Winsize
-	err := ioctl(fd, req, uintptr(unsafe.Pointer(&value)))
+	err := ioctlPtr(fd, req, unsafe.Pointer(&value))
 	return &value, err
 }
 
 func IoctlGetTermios(fd int, req uint) (*Termios, error) {
 	var value Termios
-	err := ioctl(fd, req, uintptr(unsafe.Pointer(&value)))
+	err := ioctlPtr(fd, req, unsafe.Pointer(&value))
 	return &value, err
 }
diff --git a/vendor/golang.org/x/sys/unix/ioctl_zos.go b/vendor/golang.org/x/sys/unix/ioctl_zos.go
index 5384e7d9..6532f09a 100644
--- a/vendor/golang.org/x/sys/unix/ioctl_zos.go
+++ b/vendor/golang.org/x/sys/unix/ioctl_zos.go
@@ -27,9 +27,7 @@ func IoctlSetInt(fd int, req uint, value int) error {
 func IoctlSetWinsize(fd int, req uint, value *Winsize) error {
 	// TODO: if we get the chance, remove the req parameter and
 	// hardcode TIOCSWINSZ.
-	err := ioctl(fd, req, uintptr(unsafe.Pointer(value)))
-	runtime.KeepAlive(value)
-	return err
+	return ioctlPtr(fd, req, unsafe.Pointer(value))
 }
 
 // IoctlSetTermios performs an ioctl on fd with a *Termios.
@@ -51,13 +49,13 @@ func IoctlSetTermios(fd int, req uint, value *Termios) error {
 // for those, IoctlRetInt should be used instead of this function.
 func IoctlGetInt(fd int, req uint) (int, error) {
 	var value int
-	err := ioctl(fd, req, uintptr(unsafe.Pointer(&value)))
+	err := ioctlPtr(fd, req, unsafe.Pointer(&value))
 	return value, err
 }
 
 func IoctlGetWinsize(fd int, req uint) (*Winsize, error) {
 	var value Winsize
-	err := ioctl(fd, req, uintptr(unsafe.Pointer(&value)))
+	err := ioctlPtr(fd, req, unsafe.Pointer(&value))
 	return &value, err
 }
 
diff --git a/vendor/golang.org/x/sys/unix/ptrace_darwin.go b/vendor/golang.org/x/sys/unix/ptrace_darwin.go
index 463c3eff..39dba6ca 100644
--- a/vendor/golang.org/x/sys/unix/ptrace_darwin.go
+++ b/vendor/golang.org/x/sys/unix/ptrace_darwin.go
@@ -7,6 +7,12 @@
 
 package unix
 
+import "unsafe"
+
 func ptrace(request int, pid int, addr uintptr, data uintptr) error {
 	return ptrace1(request, pid, addr, data)
 }
+
+func ptracePtr(request int, pid int, addr uintptr, data unsafe.Pointer) error {
+	return ptrace1Ptr(request, pid, addr, data)
+}
diff --git a/vendor/golang.org/x/sys/unix/ptrace_ios.go b/vendor/golang.org/x/sys/unix/ptrace_ios.go
index ed0509a0..9ea66330 100644
--- a/vendor/golang.org/x/sys/unix/ptrace_ios.go
+++ b/vendor/golang.org/x/sys/unix/ptrace_ios.go
@@ -7,6 +7,12 @@
 
 package unix
 
+import "unsafe"
+
 func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) {
 	return ENOTSUP
 }
+
+func ptracePtr(request int, pid int, addr uintptr, data unsafe.Pointer) (err error) {
+	return ENOTSUP
+}
diff --git a/vendor/golang.org/x/sys/unix/syscall_aix.go b/vendor/golang.org/x/sys/unix/syscall_aix.go
index 2db1b51e..d9f5544c 100644
--- a/vendor/golang.org/x/sys/unix/syscall_aix.go
+++ b/vendor/golang.org/x/sys/unix/syscall_aix.go
@@ -292,9 +292,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) {
 				break
 			}
 		}
-
-		bytes := (*[len(pp.Path)]byte)(unsafe.Pointer(&pp.Path[0]))[0:n]
-		sa.Name = string(bytes)
+		sa.Name = string(unsafe.Slice((*byte)(unsafe.Pointer(&pp.Path[0])), n))
 		return sa, nil
 
 	case AF_INET:
@@ -411,6 +409,7 @@ func (w WaitStatus) CoreDump() bool { return w&0x80 == 0x80 }
 func (w WaitStatus) TrapCause() int { return -1 }
 
 //sys	ioctl(fd int, req uint, arg uintptr) (err error)
+//sys	ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) = ioctl
 
 // fcntl must never be called with cmd=F_DUP2FD because it doesn't work on AIX
 // There is no way to create a custom fcntl and to keep //sys fcntl easily,
diff --git a/vendor/golang.org/x/sys/unix/syscall_bsd.go b/vendor/golang.org/x/sys/unix/syscall_bsd.go
index eda42671..7705c327 100644
--- a/vendor/golang.org/x/sys/unix/syscall_bsd.go
+++ b/vendor/golang.org/x/sys/unix/syscall_bsd.go
@@ -245,8 +245,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) {
 				break
 			}
 		}
-		bytes := (*[len(pp.Path)]byte)(unsafe.Pointer(&pp.Path[0]))[0:n]
-		sa.Name = string(bytes)
+		sa.Name = string(unsafe.Slice((*byte)(unsafe.Pointer(&pp.Path[0])), n))
 		return sa, nil
 
 	case AF_INET:
diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go
index 192b071b..7064d6eb 100644
--- a/vendor/golang.org/x/sys/unix/syscall_darwin.go
+++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go
@@ -14,7 +14,6 @@ package unix
 
 import (
 	"fmt"
-	"runtime"
 	"syscall"
 	"unsafe"
 )
@@ -376,11 +375,10 @@ func Flistxattr(fd int, dest []byte) (sz int, err error) {
 func Kill(pid int, signum syscall.Signal) (err error) { return kill(pid, int(signum), 1) }
 
 //sys	ioctl(fd int, req uint, arg uintptr) (err error)
+//sys	ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) = SYS_IOCTL
 
 func IoctlCtlInfo(fd int, ctlInfo *CtlInfo) error {
-	err := ioctl(fd, CTLIOCGINFO, uintptr(unsafe.Pointer(ctlInfo)))
-	runtime.KeepAlive(ctlInfo)
-	return err
+	return ioctlPtr(fd, CTLIOCGINFO, unsafe.Pointer(ctlInfo))
 }
 
 // IfreqMTU is struct ifreq used to get or set a network device's MTU.
@@ -394,16 +392,14 @@ type IfreqMTU struct {
 func IoctlGetIfreqMTU(fd int, ifname string) (*IfreqMTU, error) {
 	var ifreq IfreqMTU
 	copy(ifreq.Name[:], ifname)
-	err := ioctl(fd, SIOCGIFMTU, uintptr(unsafe.Pointer(&ifreq)))
+	err := ioctlPtr(fd, SIOCGIFMTU, unsafe.Pointer(&ifreq))
 	return &ifreq, err
 }
 
 // IoctlSetIfreqMTU performs the SIOCSIFMTU ioctl operation on fd to set the MTU
 // of the network device specified by ifreq.Name.
 func IoctlSetIfreqMTU(fd int, ifreq *IfreqMTU) error {
-	err := ioctl(fd, SIOCSIFMTU, uintptr(unsafe.Pointer(ifreq)))
-	runtime.KeepAlive(ifreq)
-	return err
+	return ioctlPtr(fd, SIOCSIFMTU, unsafe.Pointer(ifreq))
 }
 
 //sys	sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS_SYSCTL
diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go
index b37310ce..9fa87980 100644
--- a/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go
+++ b/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go
@@ -47,5 +47,6 @@ func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr,
 //sys	getfsstat(buf unsafe.Pointer, size uintptr, flags int) (n int, err error) = SYS_GETFSSTAT64
 //sys	Lstat(path string, stat *Stat_t) (err error) = SYS_LSTAT64
 //sys	ptrace1(request int, pid int, addr uintptr, data uintptr) (err error) = SYS_ptrace
+//sys	ptrace1Ptr(request int, pid int, addr unsafe.Pointer, data uintptr) (err error) = SYS_ptrace
 //sys	Stat(path string, stat *Stat_t) (err error) = SYS_STAT64
 //sys	Statfs(path string, stat *Statfs_t) (err error) = SYS_STATFS64
diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go
index d51ec996..f17b8c52 100644
--- a/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go
+++ b/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go
@@ -47,5 +47,6 @@ func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr,
 //sys	getfsstat(buf unsafe.Pointer, size uintptr, flags int) (n int, err error) = SYS_GETFSSTAT
 //sys	Lstat(path string, stat *Stat_t) (err error)
 //sys	ptrace1(request int, pid int, addr uintptr, data uintptr) (err error) = SYS_ptrace
+//sys	ptrace1Ptr(request int, pid int, addr unsafe.Pointer, data uintptr) (err error) = SYS_ptrace
 //sys	Stat(path string, stat *Stat_t) (err error)
 //sys	Statfs(path string, stat *Statfs_t) (err error)
diff --git a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go
index a41111a7..221efc26 100644
--- a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go
+++ b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go
@@ -172,6 +172,7 @@ func Getfsstat(buf []Statfs_t, flags int) (n int, err error) {
 }
 
 //sys	ioctl(fd int, req uint, arg uintptr) (err error)
+//sys	ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) = SYS_IOCTL
 
 //sys	sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL
 
diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd.go b/vendor/golang.org/x/sys/unix/syscall_freebsd.go
index d50b9dc2..5bdde03e 100644
--- a/vendor/golang.org/x/sys/unix/syscall_freebsd.go
+++ b/vendor/golang.org/x/sys/unix/syscall_freebsd.go
@@ -161,7 +161,8 @@ func Getfsstat(buf []Statfs_t, flags int) (n int, err error) {
 	return
 }
 
-//sys	ioctl(fd int, req uint, arg uintptr) (err error)
+//sys	ioctl(fd int, req uint, arg uintptr) (err error) = SYS_IOCTL
+//sys	ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) = SYS_IOCTL
 
 //sys	sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL
 
@@ -253,6 +254,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e
 }
 
 //sys	ptrace(request int, pid int, addr uintptr, data int) (err error)
+//sys	ptracePtr(request int, pid int, addr unsafe.Pointer, data int) (err error) = SYS_PTRACE
 
 func PtraceAttach(pid int) (err error) {
 	return ptrace(PT_ATTACH, pid, 0, 0)
@@ -267,19 +269,36 @@ func PtraceDetach(pid int) (err error) {
 }
 
 func PtraceGetFpRegs(pid int, fpregsout *FpReg) (err error) {
-	return ptrace(PT_GETFPREGS, pid, uintptr(unsafe.Pointer(fpregsout)), 0)
+	return ptracePtr(PT_GETFPREGS, pid, unsafe.Pointer(fpregsout), 0)
 }
 
 func PtraceGetRegs(pid int, regsout *Reg) (err error) {
-	return ptrace(PT_GETREGS, pid, uintptr(unsafe.Pointer(regsout)), 0)
+	return ptracePtr(PT_GETREGS, pid, unsafe.Pointer(regsout), 0)
+}
+
+func PtraceIO(req int, pid int, offs uintptr, out []byte, countin int) (count int, err error) {
+	ioDesc := PtraceIoDesc{
+		Op:   int32(req),
+		Offs: offs,
+	}
+	if countin > 0 {
+		_ = out[:countin] // check bounds
+		ioDesc.Addr = &out[0]
+	} else if out != nil {
+		ioDesc.Addr = (*byte)(unsafe.Pointer(&_zero))
+	}
+	ioDesc.SetLen(countin)
+
+	err = ptracePtr(PT_IO, pid, unsafe.Pointer(&ioDesc), 0)
+	return int(ioDesc.Len), err
 }
 
 func PtraceLwpEvents(pid int, enable int) (err error) {
 	return ptrace(PT_LWP_EVENTS, pid, 0, enable)
 }
 
-func PtraceLwpInfo(pid int, info uintptr) (err error) {
-	return ptrace(PT_LWPINFO, pid, info, int(unsafe.Sizeof(PtraceLwpInfoStruct{})))
+func PtraceLwpInfo(pid int, info *PtraceLwpInfoStruct) (err error) {
+	return ptracePtr(PT_LWPINFO, pid, unsafe.Pointer(info), int(unsafe.Sizeof(*info)))
 }
 
 func PtracePeekData(pid int, addr uintptr, out []byte) (count int, err error) {
@@ -299,13 +318,25 @@ func PtracePokeText(pid int, addr uintptr, data []byte) (count int, err error) {
 }
 
 func PtraceSetRegs(pid int, regs *Reg) (err error) {
-	return ptrace(PT_SETREGS, pid, uintptr(unsafe.Pointer(regs)), 0)
+	return ptracePtr(PT_SETREGS, pid, unsafe.Pointer(regs), 0)
 }
 
 func PtraceSingleStep(pid int) (err error) {
 	return ptrace(PT_STEP, pid, 1, 0)
 }
 
+func Dup3(oldfd, newfd, flags int) error {
+	if oldfd == newfd || flags&^O_CLOEXEC != 0 {
+		return EINVAL
+	}
+	how := F_DUP2FD
+	if flags&O_CLOEXEC != 0 {
+		how = F_DUP2FD_CLOEXEC
+	}
+	_, err := fcntl(oldfd, how, newfd)
+	return err
+}
+
 /*
  * Exposed directly
  */
diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go
index 6a91d471..b8da5100 100644
--- a/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go
+++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go
@@ -42,6 +42,10 @@ func (cmsg *Cmsghdr) SetLen(length int) {
 	cmsg.Len = uint32(length)
 }
 
+func (d *PtraceIoDesc) SetLen(length int) {
+	d.Len = uint32(length)
+}
+
 func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) {
 	var writtenOut uint64 = 0
 	_, _, e1 := Syscall9(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(*offset), uintptr((*offset)>>32), uintptr(count), 0, uintptr(unsafe.Pointer(&writtenOut)), 0, 0)
@@ -57,16 +61,5 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e
 func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno)
 
 func PtraceGetFsBase(pid int, fsbase *int64) (err error) {
-	return ptrace(PT_GETFSBASE, pid, uintptr(unsafe.Pointer(fsbase)), 0)
-}
-
-func PtraceIO(req int, pid int, offs uintptr, out []byte, countin int) (count int, err error) {
-	ioDesc := PtraceIoDesc{
-		Op:   int32(req),
-		Offs: offs,
-		Addr: uintptr(unsafe.Pointer(&out[0])), // TODO(#58351): this is not safe.
-		Len:  uint32(countin),
-	}
-	err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0)
-	return int(ioDesc.Len), err
+	return ptracePtr(PT_GETFSBASE, pid, unsafe.Pointer(fsbase), 0)
 }
diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go
index 48110a0a..47155c48 100644
--- a/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go
+++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go
@@ -42,6 +42,10 @@ func (cmsg *Cmsghdr) SetLen(length int) {
 	cmsg.Len = uint32(length)
 }
 
+func (d *PtraceIoDesc) SetLen(length int) {
+	d.Len = uint64(length)
+}
+
 func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) {
 	var writtenOut uint64 = 0
 	_, _, e1 := Syscall9(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(*offset), uintptr(count), 0, uintptr(unsafe.Pointer(&writtenOut)), 0, 0, 0)
@@ -57,16 +61,5 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e
 func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno)
 
 func PtraceGetFsBase(pid int, fsbase *int64) (err error) {
-	return ptrace(PT_GETFSBASE, pid, uintptr(unsafe.Pointer(fsbase)), 0)
-}
-
-func PtraceIO(req int, pid int, offs uintptr, out []byte, countin int) (count int, err error) {
-	ioDesc := PtraceIoDesc{
-		Op:   int32(req),
-		Offs: offs,
-		Addr: uintptr(unsafe.Pointer(&out[0])), // TODO(#58351): this is not safe.
-		Len:  uint64(countin),
-	}
-	err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0)
-	return int(ioDesc.Len), err
+	return ptracePtr(PT_GETFSBASE, pid, unsafe.Pointer(fsbase), 0)
 }
diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go
index 52f1d4b7..08932093 100644
--- a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go
+++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go
@@ -42,6 +42,10 @@ func (cmsg *Cmsghdr) SetLen(length int) {
 	cmsg.Len = uint32(length)
 }
 
+func (d *PtraceIoDesc) SetLen(length int) {
+	d.Len = uint32(length)
+}
+
 func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) {
 	var writtenOut uint64 = 0
 	_, _, e1 := Syscall9(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(*offset), uintptr((*offset)>>32), uintptr(count), 0, uintptr(unsafe.Pointer(&writtenOut)), 0, 0)
@@ -55,14 +59,3 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e
 }
 
 func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno)
-
-func PtraceIO(req int, pid int, offs uintptr, out []byte, countin int) (count int, err error) {
-	ioDesc := PtraceIoDesc{
-		Op:   int32(req),
-		Offs: offs,
-		Addr: uintptr(unsafe.Pointer(&out[0])), // TODO(#58351): this is not safe.
-		Len:  uint32(countin),
-	}
-	err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0)
-	return int(ioDesc.Len), err
-}
diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go
index 5537ee4f..d151a0d0 100644
--- a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go
+++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go
@@ -42,6 +42,10 @@ func (cmsg *Cmsghdr) SetLen(length int) {
 	cmsg.Len = uint32(length)
 }
 
+func (d *PtraceIoDesc) SetLen(length int) {
+	d.Len = uint64(length)
+}
+
 func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) {
 	var writtenOut uint64 = 0
 	_, _, e1 := Syscall9(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(*offset), uintptr(count), 0, uintptr(unsafe.Pointer(&writtenOut)), 0, 0, 0)
@@ -55,14 +59,3 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e
 }
 
 func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno)
-
-func PtraceIO(req int, pid int, offs uintptr, out []byte, countin int) (count int, err error) {
-	ioDesc := PtraceIoDesc{
-		Op:   int32(req),
-		Offs: offs,
-		Addr: uintptr(unsafe.Pointer(&out[0])), // TODO(#58351): this is not safe.
-		Len:  uint64(countin),
-	}
-	err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0)
-	return int(ioDesc.Len), err
-}
diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go
index 164abd5d..d5cd64b3 100644
--- a/vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go
+++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go
@@ -42,6 +42,10 @@ func (cmsg *Cmsghdr) SetLen(length int) {
 	cmsg.Len = uint32(length)
 }
 
+func (d *PtraceIoDesc) SetLen(length int) {
+	d.Len = uint64(length)
+}
+
 func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) {
 	var writtenOut uint64 = 0
 	_, _, e1 := Syscall9(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(*offset), uintptr(count), 0, uintptr(unsafe.Pointer(&writtenOut)), 0, 0, 0)
@@ -55,14 +59,3 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e
 }
 
 func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno)
-
-func PtraceIO(req int, pid int, offs uintptr, out []byte, countin int) (count int, err error) {
-	ioDesc := PtraceIoDesc{
-		Op:   int32(req),
-		Offs: offs,
-		Addr: uintptr(unsafe.Pointer(&out[0])), // TODO(#58351): this is not safe.
-		Len:  uint64(countin),
-	}
-	err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0)
-	return int(ioDesc.Len), err
-}
diff --git a/vendor/golang.org/x/sys/unix/syscall_hurd.go b/vendor/golang.org/x/sys/unix/syscall_hurd.go
index 4ffb6480..381fd467 100644
--- a/vendor/golang.org/x/sys/unix/syscall_hurd.go
+++ b/vendor/golang.org/x/sys/unix/syscall_hurd.go
@@ -20,3 +20,11 @@ func ioctl(fd int, req uint, arg uintptr) (err error) {
 	}
 	return
 }
+
+func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) {
+	r0, er := C.ioctl(C.int(fd), C.ulong(req), C.uintptr_t(uintptr(arg)))
+	if r0 == -1 && er != nil {
+		err = er
+	}
+	return
+}
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go
index 5443dddd..97353315 100644
--- a/vendor/golang.org/x/sys/unix/syscall_linux.go
+++ b/vendor/golang.org/x/sys/unix/syscall_linux.go
@@ -1015,8 +1015,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) {
 		for n < len(pp.Path) && pp.Path[n] != 0 {
 			n++
 		}
-		bytes := (*[len(pp.Path)]byte)(unsafe.Pointer(&pp.Path[0]))[0:n]
-		sa.Name = string(bytes)
+		sa.Name = string(unsafe.Slice((*byte)(unsafe.Pointer(&pp.Path[0])), n))
 		return sa, nil
 
 	case AF_INET:
@@ -1365,6 +1364,10 @@ func SetsockoptTCPRepairOpt(fd, level, opt int, o []TCPRepairOpt) (err error) {
 	return setsockopt(fd, level, opt, unsafe.Pointer(&o[0]), uintptr(SizeofTCPRepairOpt*len(o)))
 }
 
+func SetsockoptTCPMD5Sig(fd, level, opt int, s *TCPMD5Sig) error {
+	return setsockopt(fd, level, opt, unsafe.Pointer(s), unsafe.Sizeof(*s))
+}
+
 // Keyctl Commands (http://man7.org/linux/man-pages/man2/keyctl.2.html)
 
 // KeyctlInt calls keyctl commands in which each argument is an int.
@@ -1579,6 +1582,7 @@ func BindToDevice(fd int, device string) (err error) {
 }
 
 //sys	ptrace(request int, pid int, addr uintptr, data uintptr) (err error)
+//sys	ptracePtr(request int, pid int, addr uintptr, data unsafe.Pointer) (err error) = SYS_PTRACE
 
 func ptracePeek(req int, pid int, addr uintptr, out []byte) (count int, err error) {
 	// The peek requests are machine-size oriented, so we wrap it
@@ -1596,7 +1600,7 @@ func ptracePeek(req int, pid int, addr uintptr, out []byte) (count int, err erro
 	// boundary.
 	n := 0
 	if addr%SizeofPtr != 0 {
-		err = ptrace(req, pid, addr-addr%SizeofPtr, uintptr(unsafe.Pointer(&buf[0])))
+		err = ptracePtr(req, pid, addr-addr%SizeofPtr, unsafe.Pointer(&buf[0]))
 		if err != nil {
 			return 0, err
 		}
@@ -1608,7 +1612,7 @@ func ptracePeek(req int, pid int, addr uintptr, out []byte) (count int, err erro
 	for len(out) > 0 {
 		// We use an internal buffer to guarantee alignment.
 		// It's not documented if this is necessary, but we're paranoid.
-		err = ptrace(req, pid, addr+uintptr(n), uintptr(unsafe.Pointer(&buf[0])))
+		err = ptracePtr(req, pid, addr+uintptr(n), unsafe.Pointer(&buf[0]))
 		if err != nil {
 			return n, err
 		}
@@ -1640,7 +1644,7 @@ func ptracePoke(pokeReq int, peekReq int, pid int, addr uintptr, data []byte) (c
 	n := 0
 	if addr%SizeofPtr != 0 {
 		var buf [SizeofPtr]byte
-		err = ptrace(peekReq, pid, addr-addr%SizeofPtr, uintptr(unsafe.Pointer(&buf[0])))
+		err = ptracePtr(peekReq, pid, addr-addr%SizeofPtr, unsafe.Pointer(&buf[0]))
 		if err != nil {
 			return 0, err
 		}
@@ -1667,7 +1671,7 @@ func ptracePoke(pokeReq int, peekReq int, pid int, addr uintptr, data []byte) (c
 	// Trailing edge.
 	if len(data) > 0 {
 		var buf [SizeofPtr]byte
-		err = ptrace(peekReq, pid, addr+uintptr(n), uintptr(unsafe.Pointer(&buf[0])))
+		err = ptracePtr(peekReq, pid, addr+uintptr(n), unsafe.Pointer(&buf[0]))
 		if err != nil {
 			return n, err
 		}
@@ -1696,11 +1700,11 @@ func PtracePokeUser(pid int, addr uintptr, data []byte) (count int, err error) {
 }
 
 func PtraceGetRegs(pid int, regsout *PtraceRegs) (err error) {
-	return ptrace(PTRACE_GETREGS, pid, 0, uintptr(unsafe.Pointer(regsout)))
+	return ptracePtr(PTRACE_GETREGS, pid, 0, unsafe.Pointer(regsout))
 }
 
 func PtraceSetRegs(pid int, regs *PtraceRegs) (err error) {
-	return ptrace(PTRACE_SETREGS, pid, 0, uintptr(unsafe.Pointer(regs)))
+	return ptracePtr(PTRACE_SETREGS, pid, 0, unsafe.Pointer(regs))
 }
 
 func PtraceSetOptions(pid int, options int) (err error) {
@@ -1709,7 +1713,7 @@ func PtraceSetOptions(pid int, options int) (err error) {
 
 func PtraceGetEventMsg(pid int) (msg uint, err error) {
 	var data _C_long
-	err = ptrace(PTRACE_GETEVENTMSG, pid, 0, uintptr(unsafe.Pointer(&data)))
+	err = ptracePtr(PTRACE_GETEVENTMSG, pid, 0, unsafe.Pointer(&data))
 	msg = uint(data)
 	return
 }
@@ -2154,6 +2158,14 @@ func isGroupMember(gid int) bool {
 	return false
 }
 
+func isCapDacOverrideSet() bool {
+	hdr := CapUserHeader{Version: LINUX_CAPABILITY_VERSION_3}
+	data := [2]CapUserData{}
+	err := Capget(&hdr, &data[0])
+
+	return err == nil && data[0].Effective&(1<<CAP_DAC_OVERRIDE) != 0
+}
+
 //sys	faccessat(dirfd int, path string, mode uint32) (err error)
 //sys	Faccessat2(dirfd int, path string, mode uint32, flags int) (err error)
 
@@ -2189,6 +2201,12 @@ func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) {
 	var uid int
 	if flags&AT_EACCESS != 0 {
 		uid = Geteuid()
+		if uid != 0 && isCapDacOverrideSet() {
+			// If CAP_DAC_OVERRIDE is set, file access check is
+			// done by the kernel in the same way as for root
+			// (see generic_permission() in the Linux sources).
+			uid = 0
+		}
 	} else {
 		uid = Getuid()
 	}
diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd.go b/vendor/golang.org/x/sys/unix/syscall_netbsd.go
index 35a3ad75..e66865dc 100644
--- a/vendor/golang.org/x/sys/unix/syscall_netbsd.go
+++ b/vendor/golang.org/x/sys/unix/syscall_netbsd.go
@@ -13,7 +13,6 @@
 package unix
 
 import (
-	"runtime"
 	"syscall"
 	"unsafe"
 )
@@ -178,13 +177,13 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e
 }
 
 //sys	ioctl(fd int, req uint, arg uintptr) (err error)
+//sys	ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) = SYS_IOCTL
 
 //sys	sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL
 
 func IoctlGetPtmget(fd int, req uint) (*Ptmget, error) {
 	var value Ptmget
-	err := ioctl(fd, req, uintptr(unsafe.Pointer(&value)))
-	runtime.KeepAlive(value)
+	err := ioctlPtr(fd, req, unsafe.Pointer(&value))
 	return &value, err
 }
 
diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd.go b/vendor/golang.org/x/sys/unix/syscall_openbsd.go
index 9b67b908..5e9de23a 100644
--- a/vendor/golang.org/x/sys/unix/syscall_openbsd.go
+++ b/vendor/golang.org/x/sys/unix/syscall_openbsd.go
@@ -152,6 +152,7 @@ func Getfsstat(buf []Statfs_t, flags int) (n int, err error) {
 }
 
 //sys	ioctl(fd int, req uint, arg uintptr) (err error)
+//sys	ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) = SYS_IOCTL
 
 //sys	sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL
 
diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris.go b/vendor/golang.org/x/sys/unix/syscall_solaris.go
index 07ac5610..d3444b64 100644
--- a/vendor/golang.org/x/sys/unix/syscall_solaris.go
+++ b/vendor/golang.org/x/sys/unix/syscall_solaris.go
@@ -408,8 +408,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) {
 		for n < len(pp.Path) && pp.Path[n] != 0 {
 			n++
 		}
-		bytes := (*[len(pp.Path)]byte)(unsafe.Pointer(&pp.Path[0]))[0:n]
-		sa.Name = string(bytes)
+		sa.Name = string(unsafe.Slice((*byte)(unsafe.Pointer(&pp.Path[0])), n))
 		return sa, nil
 
 	case AF_INET:
@@ -547,21 +546,25 @@ func Minor(dev uint64) uint32 {
  */
 
 //sys	ioctlRet(fd int, req uint, arg uintptr) (ret int, err error) = libc.ioctl
+//sys	ioctlPtrRet(fd int, req uint, arg unsafe.Pointer) (ret int, err error) = libc.ioctl
 
 func ioctl(fd int, req uint, arg uintptr) (err error) {
 	_, err = ioctlRet(fd, req, arg)
 	return err
 }
 
-func IoctlSetTermio(fd int, req uint, value *Termio) error {
-	err := ioctl(fd, req, uintptr(unsafe.Pointer(value)))
-	runtime.KeepAlive(value)
+func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) {
+	_, err = ioctlPtrRet(fd, req, arg)
 	return err
 }
 
+func IoctlSetTermio(fd int, req uint, value *Termio) error {
+	return ioctlPtr(fd, req, unsafe.Pointer(value))
+}
+
 func IoctlGetTermio(fd int, req uint) (*Termio, error) {
 	var value Termio
-	err := ioctl(fd, req, uintptr(unsafe.Pointer(&value)))
+	err := ioctlPtr(fd, req, unsafe.Pointer(&value))
 	return &value, err
 }
 
@@ -1084,7 +1087,7 @@ func IoctlSetIntRetInt(fd int, req uint, arg int) (int, error) {
 func IoctlSetString(fd int, req uint, val string) error {
 	bs := make([]byte, len(val)+1)
 	copy(bs[:len(bs)-1], val)
-	err := ioctl(fd, req, uintptr(unsafe.Pointer(&bs[0])))
+	err := ioctlPtr(fd, req, unsafe.Pointer(&bs[0]))
 	runtime.KeepAlive(&bs[0])
 	return err
 }
@@ -1118,7 +1121,7 @@ func (l *Lifreq) GetLifruUint() uint {
 }
 
 func IoctlLifreq(fd int, req uint, l *Lifreq) error {
-	return ioctl(fd, req, uintptr(unsafe.Pointer(l)))
+	return ioctlPtr(fd, req, unsafe.Pointer(l))
 }
 
 // Strioctl Helpers
@@ -1129,5 +1132,5 @@ func (s *Strioctl) SetInt(i int) {
 }
 
 func IoctlSetStrioctlRetInt(fd int, req uint, s *Strioctl) (int, error) {
-	return ioctlRet(fd, req, uintptr(unsafe.Pointer(s)))
+	return ioctlPtrRet(fd, req, unsafe.Pointer(s))
 }
diff --git a/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go b/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go
index 68b2f3e1..b295497a 100644
--- a/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go
+++ b/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go
@@ -139,8 +139,7 @@ func anyToSockaddr(_ int, rsa *RawSockaddrAny) (Sockaddr, error) {
 		for n < int(pp.Len) && pp.Path[n] != 0 {
 			n++
 		}
-		bytes := (*[len(pp.Path)]byte)(unsafe.Pointer(&pp.Path[0]))[0:n]
-		sa.Name = string(bytes)
+		sa.Name = string(unsafe.Slice((*byte)(unsafe.Pointer(&pp.Path[0])), n))
 		return sa, nil
 
 	case AF_INET:
@@ -214,6 +213,7 @@ func (cmsg *Cmsghdr) SetLen(length int) {
 //sys   mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) = SYS_MMAP
 //sys   munmap(addr uintptr, length uintptr) (err error) = SYS_MUNMAP
 //sys   ioctl(fd int, req uint, arg uintptr) (err error) = SYS_IOCTL
+//sys   ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) = SYS_IOCTL
 
 //sys   Access(path string, mode uint32) (err error) = SYS___ACCESS_A
 //sys   Chdir(path string) (err error) = SYS___CHDIR_A
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go
index e174685a..398c37e5 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go
@@ -70,6 +70,7 @@ const (
 	ALG_SET_DRBG_ENTROPY                        = 0x6
 	ALG_SET_IV                                  = 0x2
 	ALG_SET_KEY                                 = 0x1
+	ALG_SET_KEY_BY_KEY_SERIAL                   = 0x7
 	ALG_SET_OP                                  = 0x3
 	ANON_INODE_FS_MAGIC                         = 0x9041934
 	ARPHRD_6LOWPAN                              = 0x339
@@ -774,6 +775,8 @@ const (
 	DEVLINK_GENL_MCGRP_CONFIG_NAME              = "config"
 	DEVLINK_GENL_NAME                           = "devlink"
 	DEVLINK_GENL_VERSION                        = 0x1
+	DEVLINK_PORT_FN_CAP_MIGRATABLE              = 0x2
+	DEVLINK_PORT_FN_CAP_ROCE                    = 0x1
 	DEVLINK_SB_THRESHOLD_TO_ALPHA_MAX           = 0x14
 	DEVLINK_SUPPORTED_FLASH_OVERWRITE_SECTIONS  = 0x3
 	DEVMEM_MAGIC                                = 0x454d444d
@@ -1262,6 +1265,8 @@ const (
 	FSCRYPT_MODE_AES_256_CTS                    = 0x4
 	FSCRYPT_MODE_AES_256_HCTR2                  = 0xa
 	FSCRYPT_MODE_AES_256_XTS                    = 0x1
+	FSCRYPT_MODE_SM4_CTS                        = 0x8
+	FSCRYPT_MODE_SM4_XTS                        = 0x7
 	FSCRYPT_POLICY_FLAGS_PAD_16                 = 0x2
 	FSCRYPT_POLICY_FLAGS_PAD_32                 = 0x3
 	FSCRYPT_POLICY_FLAGS_PAD_4                  = 0x0
@@ -1280,8 +1285,6 @@ const (
 	FS_ENCRYPTION_MODE_AES_256_GCM              = 0x2
 	FS_ENCRYPTION_MODE_AES_256_XTS              = 0x1
 	FS_ENCRYPTION_MODE_INVALID                  = 0x0
-	FS_ENCRYPTION_MODE_SPECK128_256_CTS         = 0x8
-	FS_ENCRYPTION_MODE_SPECK128_256_XTS         = 0x7
 	FS_IOC_ADD_ENCRYPTION_KEY                   = 0xc0506617
 	FS_IOC_GET_ENCRYPTION_KEY_STATUS            = 0xc080661a
 	FS_IOC_GET_ENCRYPTION_POLICY_EX             = 0xc0096616
@@ -1770,6 +1773,7 @@ const (
 	LANDLOCK_ACCESS_FS_REFER                    = 0x2000
 	LANDLOCK_ACCESS_FS_REMOVE_DIR               = 0x10
 	LANDLOCK_ACCESS_FS_REMOVE_FILE              = 0x20
+	LANDLOCK_ACCESS_FS_TRUNCATE                 = 0x4000
 	LANDLOCK_ACCESS_FS_WRITE_FILE               = 0x2
 	LANDLOCK_CREATE_RULESET_VERSION             = 0x1
 	LINUX_REBOOT_CMD_CAD_OFF                    = 0x0
@@ -1809,6 +1813,7 @@ const (
 	LWTUNNEL_IP_OPT_GENEVE_MAX                  = 0x3
 	LWTUNNEL_IP_OPT_VXLAN_MAX                   = 0x1
 	MADV_COLD                                   = 0x14
+	MADV_COLLAPSE                               = 0x19
 	MADV_DODUMP                                 = 0x11
 	MADV_DOFORK                                 = 0xb
 	MADV_DONTDUMP                               = 0x10
@@ -2163,6 +2168,7 @@ const (
 	PACKET_FANOUT_DATA                          = 0x16
 	PACKET_FANOUT_EBPF                          = 0x7
 	PACKET_FANOUT_FLAG_DEFRAG                   = 0x8000
+	PACKET_FANOUT_FLAG_IGNORE_OUTGOING          = 0x4000
 	PACKET_FANOUT_FLAG_ROLLOVER                 = 0x1000
 	PACKET_FANOUT_FLAG_UNIQUEID                 = 0x2000
 	PACKET_FANOUT_HASH                          = 0x0
diff --git a/vendor/golang.org/x/sys/unix/zptrace_armnn_linux.go b/vendor/golang.org/x/sys/unix/zptrace_armnn_linux.go
index bd001a6e..97f20ca2 100644
--- a/vendor/golang.org/x/sys/unix/zptrace_armnn_linux.go
+++ b/vendor/golang.org/x/sys/unix/zptrace_armnn_linux.go
@@ -15,12 +15,12 @@ type PtraceRegsArm struct {
 
 // PtraceGetRegsArm fetches the registers used by arm binaries.
 func PtraceGetRegsArm(pid int, regsout *PtraceRegsArm) error {
-	return ptrace(PTRACE_GETREGS, pid, 0, uintptr(unsafe.Pointer(regsout)))
+	return ptracePtr(PTRACE_GETREGS, pid, 0, unsafe.Pointer(regsout))
 }
 
 // PtraceSetRegsArm sets the registers used by arm binaries.
 func PtraceSetRegsArm(pid int, regs *PtraceRegsArm) error {
-	return ptrace(PTRACE_SETREGS, pid, 0, uintptr(unsafe.Pointer(regs)))
+	return ptracePtr(PTRACE_SETREGS, pid, 0, unsafe.Pointer(regs))
 }
 
 // PtraceRegsArm64 is the registers used by arm64 binaries.
@@ -33,10 +33,10 @@ type PtraceRegsArm64 struct {
 
 // PtraceGetRegsArm64 fetches the registers used by arm64 binaries.
 func PtraceGetRegsArm64(pid int, regsout *PtraceRegsArm64) error {
-	return ptrace(PTRACE_GETREGS, pid, 0, uintptr(unsafe.Pointer(regsout)))
+	return ptracePtr(PTRACE_GETREGS, pid, 0, unsafe.Pointer(regsout))
 }
 
 // PtraceSetRegsArm64 sets the registers used by arm64 binaries.
 func PtraceSetRegsArm64(pid int, regs *PtraceRegsArm64) error {
-	return ptrace(PTRACE_SETREGS, pid, 0, uintptr(unsafe.Pointer(regs)))
+	return ptracePtr(PTRACE_SETREGS, pid, 0, unsafe.Pointer(regs))
 }
diff --git a/vendor/golang.org/x/sys/unix/zptrace_linux_arm64.go b/vendor/golang.org/x/sys/unix/zptrace_linux_arm64.go
index 6cb6d688..834d2856 100644
--- a/vendor/golang.org/x/sys/unix/zptrace_linux_arm64.go
+++ b/vendor/golang.org/x/sys/unix/zptrace_linux_arm64.go
@@ -7,11 +7,11 @@ import "unsafe"
 // PtraceGetRegSetArm64 fetches the registers used by arm64 binaries.
 func PtraceGetRegSetArm64(pid, addr int, regsout *PtraceRegsArm64) error {
 	iovec := Iovec{(*byte)(unsafe.Pointer(regsout)), uint64(unsafe.Sizeof(*regsout))}
-	return ptrace(PTRACE_GETREGSET, pid, uintptr(addr), uintptr(unsafe.Pointer(&iovec)))
+	return ptracePtr(PTRACE_GETREGSET, pid, uintptr(addr), unsafe.Pointer(&iovec))
 }
 
 // PtraceSetRegSetArm64 sets the registers used by arm64 binaries.
 func PtraceSetRegSetArm64(pid, addr int, regs *PtraceRegsArm64) error {
 	iovec := Iovec{(*byte)(unsafe.Pointer(regs)), uint64(unsafe.Sizeof(*regs))}
-	return ptrace(PTRACE_SETREGSET, pid, uintptr(addr), uintptr(unsafe.Pointer(&iovec)))
+	return ptracePtr(PTRACE_SETREGSET, pid, uintptr(addr), unsafe.Pointer(&iovec))
 }
diff --git a/vendor/golang.org/x/sys/unix/zptrace_mipsnn_linux.go b/vendor/golang.org/x/sys/unix/zptrace_mipsnn_linux.go
index c34d0639..0b5f7943 100644
--- a/vendor/golang.org/x/sys/unix/zptrace_mipsnn_linux.go
+++ b/vendor/golang.org/x/sys/unix/zptrace_mipsnn_linux.go
@@ -21,12 +21,12 @@ type PtraceRegsMips struct {
 
 // PtraceGetRegsMips fetches the registers used by mips binaries.
 func PtraceGetRegsMips(pid int, regsout *PtraceRegsMips) error {
-	return ptrace(PTRACE_GETREGS, pid, 0, uintptr(unsafe.Pointer(regsout)))
+	return ptracePtr(PTRACE_GETREGS, pid, 0, unsafe.Pointer(regsout))
 }
 
 // PtraceSetRegsMips sets the registers used by mips binaries.
 func PtraceSetRegsMips(pid int, regs *PtraceRegsMips) error {
-	return ptrace(PTRACE_SETREGS, pid, 0, uintptr(unsafe.Pointer(regs)))
+	return ptracePtr(PTRACE_SETREGS, pid, 0, unsafe.Pointer(regs))
 }
 
 // PtraceRegsMips64 is the registers used by mips64 binaries.
@@ -42,10 +42,10 @@ type PtraceRegsMips64 struct {
 
 // PtraceGetRegsMips64 fetches the registers used by mips64 binaries.
 func PtraceGetRegsMips64(pid int, regsout *PtraceRegsMips64) error {
-	return ptrace(PTRACE_GETREGS, pid, 0, uintptr(unsafe.Pointer(regsout)))
+	return ptracePtr(PTRACE_GETREGS, pid, 0, unsafe.Pointer(regsout))
 }
 
 // PtraceSetRegsMips64 sets the registers used by mips64 binaries.
 func PtraceSetRegsMips64(pid int, regs *PtraceRegsMips64) error {
-	return ptrace(PTRACE_SETREGS, pid, 0, uintptr(unsafe.Pointer(regs)))
+	return ptracePtr(PTRACE_SETREGS, pid, 0, unsafe.Pointer(regs))
 }
diff --git a/vendor/golang.org/x/sys/unix/zptrace_mipsnnle_linux.go b/vendor/golang.org/x/sys/unix/zptrace_mipsnnle_linux.go
index 3ccf0c0c..2807f7e6 100644
--- a/vendor/golang.org/x/sys/unix/zptrace_mipsnnle_linux.go
+++ b/vendor/golang.org/x/sys/unix/zptrace_mipsnnle_linux.go
@@ -21,12 +21,12 @@ type PtraceRegsMipsle struct {
 
 // PtraceGetRegsMipsle fetches the registers used by mipsle binaries.
 func PtraceGetRegsMipsle(pid int, regsout *PtraceRegsMipsle) error {
-	return ptrace(PTRACE_GETREGS, pid, 0, uintptr(unsafe.Pointer(regsout)))
+	return ptracePtr(PTRACE_GETREGS, pid, 0, unsafe.Pointer(regsout))
 }
 
 // PtraceSetRegsMipsle sets the registers used by mipsle binaries.
 func PtraceSetRegsMipsle(pid int, regs *PtraceRegsMipsle) error {
-	return ptrace(PTRACE_SETREGS, pid, 0, uintptr(unsafe.Pointer(regs)))
+	return ptracePtr(PTRACE_SETREGS, pid, 0, unsafe.Pointer(regs))
 }
 
 // PtraceRegsMips64le is the registers used by mips64le binaries.
@@ -42,10 +42,10 @@ type PtraceRegsMips64le struct {
 
 // PtraceGetRegsMips64le fetches the registers used by mips64le binaries.
 func PtraceGetRegsMips64le(pid int, regsout *PtraceRegsMips64le) error {
-	return ptrace(PTRACE_GETREGS, pid, 0, uintptr(unsafe.Pointer(regsout)))
+	return ptracePtr(PTRACE_GETREGS, pid, 0, unsafe.Pointer(regsout))
 }
 
 // PtraceSetRegsMips64le sets the registers used by mips64le binaries.
 func PtraceSetRegsMips64le(pid int, regs *PtraceRegsMips64le) error {
-	return ptrace(PTRACE_SETREGS, pid, 0, uintptr(unsafe.Pointer(regs)))
+	return ptracePtr(PTRACE_SETREGS, pid, 0, unsafe.Pointer(regs))
 }
diff --git a/vendor/golang.org/x/sys/unix/zptrace_x86_linux.go b/vendor/golang.org/x/sys/unix/zptrace_x86_linux.go
index 7d658570..281ea64e 100644
--- a/vendor/golang.org/x/sys/unix/zptrace_x86_linux.go
+++ b/vendor/golang.org/x/sys/unix/zptrace_x86_linux.go
@@ -31,12 +31,12 @@ type PtraceRegs386 struct {
 
 // PtraceGetRegs386 fetches the registers used by 386 binaries.
 func PtraceGetRegs386(pid int, regsout *PtraceRegs386) error {
-	return ptrace(PTRACE_GETREGS, pid, 0, uintptr(unsafe.Pointer(regsout)))
+	return ptracePtr(PTRACE_GETREGS, pid, 0, unsafe.Pointer(regsout))
 }
 
 // PtraceSetRegs386 sets the registers used by 386 binaries.
 func PtraceSetRegs386(pid int, regs *PtraceRegs386) error {
-	return ptrace(PTRACE_SETREGS, pid, 0, uintptr(unsafe.Pointer(regs)))
+	return ptracePtr(PTRACE_SETREGS, pid, 0, unsafe.Pointer(regs))
 }
 
 // PtraceRegsAmd64 is the registers used by amd64 binaries.
@@ -72,10 +72,10 @@ type PtraceRegsAmd64 struct {
 
 // PtraceGetRegsAmd64 fetches the registers used by amd64 binaries.
 func PtraceGetRegsAmd64(pid int, regsout *PtraceRegsAmd64) error {
-	return ptrace(PTRACE_GETREGS, pid, 0, uintptr(unsafe.Pointer(regsout)))
+	return ptracePtr(PTRACE_GETREGS, pid, 0, unsafe.Pointer(regsout))
 }
 
 // PtraceSetRegsAmd64 sets the registers used by amd64 binaries.
 func PtraceSetRegsAmd64(pid int, regs *PtraceRegsAmd64) error {
-	return ptrace(PTRACE_SETREGS, pid, 0, uintptr(unsafe.Pointer(regs)))
+	return ptracePtr(PTRACE_SETREGS, pid, 0, unsafe.Pointer(regs))
 }
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go
index 870215d2..ef9dcd1b 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go
@@ -223,6 +223,16 @@ func ioctl(fd int, req uint, arg uintptr) (err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) {
+	r0, er := C.ioctl(C.int(fd), C.int(req), C.uintptr_t(uintptr(arg)))
+	if r0 == -1 && er != nil {
+		err = er
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func FcntlInt(fd uintptr, cmd int, arg int) (r int, err error) {
 	r0, er := C.fcntl(C.uintptr_t(fd), C.int(cmd), C.uintptr_t(arg))
 	r = int(r0)
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go
index a89b0bfa..f86a9459 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go
@@ -103,6 +103,16 @@ func ioctl(fd int, req uint, arg uintptr) (err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) {
+	_, e1 := callioctl_ptr(fd, int(req), arg)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func FcntlInt(fd uintptr, cmd int, arg int) (r int, err error) {
 	r0, e1 := callfcntl(fd, cmd, uintptr(arg))
 	r = int(r0)
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go
index 2caa5adf..d32a84ca 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go
@@ -423,6 +423,13 @@ func callioctl(fd int, req int, arg uintptr) (r1 uintptr, e1 Errno) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func callioctl_ptr(fd int, req int, arg unsafe.Pointer) (r1 uintptr, e1 Errno) {
+	r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_ioctl)), 3, uintptr(fd), uintptr(req), uintptr(arg), 0, 0, 0)
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func callfcntl(fd uintptr, cmd int, arg uintptr) (r1 uintptr, e1 Errno) {
 	r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_fcntl)), 3, fd, uintptr(cmd), arg, 0, 0, 0)
 	return
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go
index 944a714b..d7d8baf8 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go
@@ -191,6 +191,14 @@ func callioctl(fd int, req int, arg uintptr) (r1 uintptr, e1 Errno) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func callioctl_ptr(fd int, req int, arg unsafe.Pointer) (r1 uintptr, e1 Errno) {
+	r1 = uintptr(C.ioctl(C.int(fd), C.int(req), C.uintptr_t(uintptr(arg))))
+	e1 = syscall.GetErrno()
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func callfcntl(fd uintptr, cmd int, arg uintptr) (r1 uintptr, e1 Errno) {
 	r1 = uintptr(C.fcntl(C.uintptr_t(fd), C.int(cmd), C.uintptr_t(arg)))
 	e1 = syscall.GetErrno()
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go
index c2461c49..a29ffdd5 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go
@@ -725,6 +725,14 @@ func ioctl(fd int, req uint, arg uintptr) (err error) {
 	return
 }
 
+func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) {
+	_, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
 var libc_ioctl_trampoline_addr uintptr
 
 //go:cgo_import_dynamic libc_ioctl ioctl "/usr/lib/libSystem.B.dylib"
@@ -2502,6 +2510,14 @@ func ptrace1(request int, pid int, addr uintptr, data uintptr) (err error) {
 	return
 }
 
+func ptrace1Ptr(request int, pid int, addr uintptr, data unsafe.Pointer) (err error) {
+	_, _, e1 := syscall_syscall6(libc_ptrace_trampoline_addr, uintptr(request), uintptr(pid), addr, uintptr(data), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
 var libc_ptrace_trampoline_addr uintptr
 
 //go:cgo_import_dynamic libc_ptrace ptrace "/usr/lib/libSystem.B.dylib"
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go
index 26a0fdc5..2fd4590b 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go
@@ -725,6 +725,14 @@ func ioctl(fd int, req uint, arg uintptr) (err error) {
 	return
 }
 
+func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) {
+	_, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
 var libc_ioctl_trampoline_addr uintptr
 
 //go:cgo_import_dynamic libc_ioctl ioctl "/usr/lib/libSystem.B.dylib"
@@ -2502,6 +2510,14 @@ func ptrace1(request int, pid int, addr uintptr, data uintptr) (err error) {
 	return
 }
 
+func ptrace1Ptr(request int, pid int, addr uintptr, data unsafe.Pointer) (err error) {
+	_, _, e1 := syscall_syscall6(libc_ptrace_trampoline_addr, uintptr(request), uintptr(pid), addr, uintptr(data), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
 var libc_ptrace_trampoline_addr uintptr
 
 //go:cgo_import_dynamic libc_ptrace ptrace "/usr/lib/libSystem.B.dylib"
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go
index 54749f9c..3b851347 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go
@@ -436,6 +436,16 @@ func ioctl(fd int, req uint, arg uintptr) (err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) {
+	_, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) {
 	var _p0 unsafe.Pointer
 	if len(mib) > 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go
index 77479d45..11290656 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go
@@ -388,6 +388,16 @@ func ioctl(fd int, req uint, arg uintptr) (err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) {
+	_, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) {
 	var _p0 unsafe.Pointer
 	if len(mib) > 0 {
@@ -414,6 +424,16 @@ func ptrace(request int, pid int, addr uintptr, data int) (err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func ptracePtr(request int, pid int, addr unsafe.Pointer, data int) (err error) {
+	_, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func Access(path string, mode uint32) (err error) {
 	var _p0 *byte
 	_p0, err = BytePtrFromString(path)
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go
index 2e966d4d..55f5abfe 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go
@@ -388,6 +388,16 @@ func ioctl(fd int, req uint, arg uintptr) (err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) {
+	_, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) {
 	var _p0 unsafe.Pointer
 	if len(mib) > 0 {
@@ -414,6 +424,16 @@ func ptrace(request int, pid int, addr uintptr, data int) (err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func ptracePtr(request int, pid int, addr unsafe.Pointer, data int) (err error) {
+	_, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func Access(path string, mode uint32) (err error) {
 	var _p0 *byte
 	_p0, err = BytePtrFromString(path)
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go
index d65a7c0f..d39651c2 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go
@@ -388,6 +388,16 @@ func ioctl(fd int, req uint, arg uintptr) (err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) {
+	_, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) {
 	var _p0 unsafe.Pointer
 	if len(mib) > 0 {
@@ -414,6 +424,16 @@ func ptrace(request int, pid int, addr uintptr, data int) (err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func ptracePtr(request int, pid int, addr unsafe.Pointer, data int) (err error) {
+	_, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func Access(path string, mode uint32) (err error) {
 	var _p0 *byte
 	_p0, err = BytePtrFromString(path)
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go
index 6f0b97c6..ddb74086 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go
@@ -388,6 +388,16 @@ func ioctl(fd int, req uint, arg uintptr) (err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) {
+	_, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) {
 	var _p0 unsafe.Pointer
 	if len(mib) > 0 {
@@ -414,6 +424,16 @@ func ptrace(request int, pid int, addr uintptr, data int) (err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func ptracePtr(request int, pid int, addr unsafe.Pointer, data int) (err error) {
+	_, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func Access(path string, mode uint32) (err error) {
 	var _p0 *byte
 	_p0, err = BytePtrFromString(path)
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go
index e1c23b52..09a53a61 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go
@@ -388,6 +388,16 @@ func ioctl(fd int, req uint, arg uintptr) (err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) {
+	_, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) {
 	var _p0 unsafe.Pointer
 	if len(mib) > 0 {
@@ -414,6 +424,16 @@ func ptrace(request int, pid int, addr uintptr, data int) (err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func ptracePtr(request int, pid int, addr unsafe.Pointer, data int) (err error) {
+	_, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func Access(path string, mode uint32) (err error) {
 	var _p0 *byte
 	_p0, err = BytePtrFromString(path)
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/vendor/golang.org/x/sys/unix/zsyscall_linux.go
index 36ea3a55..430cb24d 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux.go
@@ -379,6 +379,16 @@ func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func ptracePtr(request int, pid int, addr uintptr, data unsafe.Pointer) (err error) {
+	_, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func reboot(magic1 uint, magic2 uint, cmd int, arg string) (err error) {
 	var _p0 *byte
 	_p0, err = BytePtrFromString(arg)
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go
index 79f73899..8e1d9c8f 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go
@@ -405,6 +405,16 @@ func ioctl(fd int, req uint, arg uintptr) (err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) {
+	_, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) {
 	var _p0 unsafe.Pointer
 	if len(mib) > 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go
index fb161f3a..21c69504 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go
@@ -405,6 +405,16 @@ func ioctl(fd int, req uint, arg uintptr) (err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) {
+	_, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) {
 	var _p0 unsafe.Pointer
 	if len(mib) > 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go
index 4c8ac993..298168f9 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go
@@ -405,6 +405,16 @@ func ioctl(fd int, req uint, arg uintptr) (err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) {
+	_, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) {
 	var _p0 unsafe.Pointer
 	if len(mib) > 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go
index 76dd8ec4..68b8bd49 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go
@@ -405,6 +405,16 @@ func ioctl(fd int, req uint, arg uintptr) (err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) {
+	_, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) {
 	var _p0 unsafe.Pointer
 	if len(mib) > 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go
index caeb807b..0b0f910e 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go
@@ -527,6 +527,14 @@ func ioctl(fd int, req uint, arg uintptr) (err error) {
 	return
 }
 
+func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) {
+	_, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
 var libc_ioctl_trampoline_addr uintptr
 
 //go:cgo_import_dynamic libc_ioctl ioctl "libc.so"
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go
index a05e5f4f..48ff5de7 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go
@@ -527,6 +527,14 @@ func ioctl(fd int, req uint, arg uintptr) (err error) {
 	return
 }
 
+func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) {
+	_, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
 var libc_ioctl_trampoline_addr uintptr
 
 //go:cgo_import_dynamic libc_ioctl ioctl "libc.so"
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go
index b2da8e50..2452a641 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go
@@ -527,6 +527,14 @@ func ioctl(fd int, req uint, arg uintptr) (err error) {
 	return
 }
 
+func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) {
+	_, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
 var libc_ioctl_trampoline_addr uintptr
 
 //go:cgo_import_dynamic libc_ioctl ioctl "libc.so"
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go
index 048b2655..5e35600a 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go
@@ -527,6 +527,14 @@ func ioctl(fd int, req uint, arg uintptr) (err error) {
 	return
 }
 
+func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) {
+	_, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
 var libc_ioctl_trampoline_addr uintptr
 
 //go:cgo_import_dynamic libc_ioctl ioctl "libc.so"
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go
index 6f33e37e..b04cef1a 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go
@@ -527,6 +527,14 @@ func ioctl(fd int, req uint, arg uintptr) (err error) {
 	return
 }
 
+func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) {
+	_, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
 var libc_ioctl_trampoline_addr uintptr
 
 //go:cgo_import_dynamic libc_ioctl ioctl "libc.so"
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go
index 330cf7f7..47a07ee0 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go
@@ -527,6 +527,14 @@ func ioctl(fd int, req uint, arg uintptr) (err error) {
 	return
 }
 
+func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) {
+	_, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
 var libc_ioctl_trampoline_addr uintptr
 
 //go:cgo_import_dynamic libc_ioctl ioctl "libc.so"
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go
index 5f24de0d..573378fd 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go
@@ -527,6 +527,14 @@ func ioctl(fd int, req uint, arg uintptr) (err error) {
 	return
 }
 
+func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) {
+	_, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
 var libc_ioctl_trampoline_addr uintptr
 
 //go:cgo_import_dynamic libc_ioctl ioctl "libc.so"
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go
index 78d4a424..4873a1e5 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go
@@ -657,6 +657,17 @@ func ioctlRet(fd int, req uint, arg uintptr) (ret int, err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func ioctlPtrRet(fd int, req uint, arg unsafe.Pointer) (ret int, err error) {
+	r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procioctl)), 3, uintptr(fd), uintptr(req), uintptr(arg), 0, 0, 0)
+	ret = int(r0)
+	if e1 != 0 {
+		err = e1
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func poll(fds *PollFd, nfds int, timeout int) (n int, err error) {
 	r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procpoll)), 3, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout), 0, 0, 0)
 	n = int(r0)
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go b/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go
index f2079457..07bfe2ef 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go
@@ -267,6 +267,16 @@ func ioctl(fd int, req uint, arg uintptr) (err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) {
+	_, _, e1 := syscall_syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func Access(path string, mode uint32) (err error) {
 	var _p0 *byte
 	_p0, err = BytePtrFromString(path)
diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go
index d9c78cdc..29dc4833 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go
@@ -362,7 +362,7 @@ type FpExtendedPrecision struct{}
 type PtraceIoDesc struct {
 	Op   int32
 	Offs uintptr
-	Addr uintptr
+	Addr *byte
 	Len  uint32
 }
 
diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go
index 26991b16..0a89b289 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go
@@ -367,7 +367,7 @@ type FpExtendedPrecision struct{}
 type PtraceIoDesc struct {
 	Op   int32
 	Offs uintptr
-	Addr uintptr
+	Addr *byte
 	Len  uint64
 }
 
diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go
index f8324e7e..c8666bb1 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go
@@ -350,7 +350,7 @@ type FpExtendedPrecision struct {
 type PtraceIoDesc struct {
 	Op   int32
 	Offs uintptr
-	Addr uintptr
+	Addr *byte
 	Len  uint32
 }
 
diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go
index 4220411f..88fb48a8 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go
@@ -347,7 +347,7 @@ type FpExtendedPrecision struct{}
 type PtraceIoDesc struct {
 	Op   int32
 	Offs uintptr
-	Addr uintptr
+	Addr *byte
 	Len  uint64
 }
 
diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go
index 0660fd45..698dc975 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go
@@ -348,7 +348,7 @@ type FpExtendedPrecision struct{}
 type PtraceIoDesc struct {
 	Op   int32
 	Offs uintptr
-	Addr uintptr
+	Addr *byte
 	Len  uint64
 }
 
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go
index 7d9fc8f1..ca84727c 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go
@@ -456,36 +456,60 @@ type Ucred struct {
 }
 
 type TCPInfo struct {
-	State          uint8
-	Ca_state       uint8
-	Retransmits    uint8
-	Probes         uint8
-	Backoff        uint8
-	Options        uint8
-	Rto            uint32
-	Ato            uint32
-	Snd_mss        uint32
-	Rcv_mss        uint32
-	Unacked        uint32
-	Sacked         uint32
-	Lost           uint32
-	Retrans        uint32
-	Fackets        uint32
-	Last_data_sent uint32
-	Last_ack_sent  uint32
-	Last_data_recv uint32
-	Last_ack_recv  uint32
-	Pmtu           uint32
-	Rcv_ssthresh   uint32
-	Rtt            uint32
-	Rttvar         uint32
-	Snd_ssthresh   uint32
-	Snd_cwnd       uint32
-	Advmss         uint32
-	Reordering     uint32
-	Rcv_rtt        uint32
-	Rcv_space      uint32
-	Total_retrans  uint32
+	State           uint8
+	Ca_state        uint8
+	Retransmits     uint8
+	Probes          uint8
+	Backoff         uint8
+	Options         uint8
+	Rto             uint32
+	Ato             uint32
+	Snd_mss         uint32
+	Rcv_mss         uint32
+	Unacked         uint32
+	Sacked          uint32
+	Lost            uint32
+	Retrans         uint32
+	Fackets         uint32
+	Last_data_sent  uint32
+	Last_ack_sent   uint32
+	Last_data_recv  uint32
+	Last_ack_recv   uint32
+	Pmtu            uint32
+	Rcv_ssthresh    uint32
+	Rtt             uint32
+	Rttvar          uint32
+	Snd_ssthresh    uint32
+	Snd_cwnd        uint32
+	Advmss          uint32
+	Reordering      uint32
+	Rcv_rtt         uint32
+	Rcv_space       uint32
+	Total_retrans   uint32
+	Pacing_rate     uint64
+	Max_pacing_rate uint64
+	Bytes_acked     uint64
+	Bytes_received  uint64
+	Segs_out        uint32
+	Segs_in         uint32
+	Notsent_bytes   uint32
+	Min_rtt         uint32
+	Data_segs_in    uint32
+	Data_segs_out   uint32
+	Delivery_rate   uint64
+	Busy_time       uint64
+	Rwnd_limited    uint64
+	Sndbuf_limited  uint64
+	Delivered       uint32
+	Delivered_ce    uint32
+	Bytes_sent      uint64
+	Bytes_retrans   uint64
+	Dsack_dups      uint32
+	Reord_seen      uint32
+	Rcv_ooopack     uint32
+	Snd_wnd         uint32
+	Rcv_wnd         uint32
+	Rehash          uint32
 }
 
 type CanFilter struct {
@@ -528,7 +552,7 @@ const (
 	SizeofIPv6MTUInfo       = 0x20
 	SizeofICMPv6Filter      = 0x20
 	SizeofUcred             = 0xc
-	SizeofTCPInfo           = 0x68
+	SizeofTCPInfo           = 0xf0
 	SizeofCanFilter         = 0x8
 	SizeofTCPRepairOpt      = 0x8
 )
@@ -1043,6 +1067,7 @@ const (
 	PerfBitCommExec                      = CBitFieldMaskBit24
 	PerfBitUseClockID                    = CBitFieldMaskBit25
 	PerfBitContextSwitch                 = CBitFieldMaskBit26
+	PerfBitWriteBackward                 = CBitFieldMaskBit27
 )
 
 const (
@@ -1239,7 +1264,7 @@ type TCPMD5Sig struct {
 	Flags     uint8
 	Prefixlen uint8
 	Keylen    uint16
-	_         uint32
+	Ifindex   int32
 	Key       [80]uint8
 }
 
@@ -1939,7 +1964,11 @@ const (
 	NFT_MSG_GETOBJ                    = 0x13
 	NFT_MSG_DELOBJ                    = 0x14
 	NFT_MSG_GETOBJ_RESET              = 0x15
-	NFT_MSG_MAX                       = 0x19
+	NFT_MSG_NEWFLOWTABLE              = 0x16
+	NFT_MSG_GETFLOWTABLE              = 0x17
+	NFT_MSG_DELFLOWTABLE              = 0x18
+	NFT_MSG_GETRULE_RESET             = 0x19
+	NFT_MSG_MAX                       = 0x1a
 	NFTA_LIST_UNSPEC                  = 0x0
 	NFTA_LIST_ELEM                    = 0x1
 	NFTA_HOOK_UNSPEC                  = 0x0
@@ -2443,9 +2472,11 @@ const (
 	SOF_TIMESTAMPING_OPT_STATS    = 0x1000
 	SOF_TIMESTAMPING_OPT_PKTINFO  = 0x2000
 	SOF_TIMESTAMPING_OPT_TX_SWHW  = 0x4000
+	SOF_TIMESTAMPING_BIND_PHC     = 0x8000
+	SOF_TIMESTAMPING_OPT_ID_TCP   = 0x10000
 
-	SOF_TIMESTAMPING_LAST = 0x8000
-	SOF_TIMESTAMPING_MASK = 0xffff
+	SOF_TIMESTAMPING_LAST = 0x10000
+	SOF_TIMESTAMPING_MASK = 0x1ffff
 
 	SCM_TSTAMP_SND   = 0x0
 	SCM_TSTAMP_SCHED = 0x1
@@ -3265,7 +3296,7 @@ const (
 	DEVLINK_ATTR_LINECARD_SUPPORTED_TYPES              = 0xae
 	DEVLINK_ATTR_NESTED_DEVLINK                        = 0xaf
 	DEVLINK_ATTR_SELFTESTS                             = 0xb0
-	DEVLINK_ATTR_MAX                                   = 0xb0
+	DEVLINK_ATTR_MAX                                   = 0xb3
 	DEVLINK_DPIPE_FIELD_MAPPING_TYPE_NONE              = 0x0
 	DEVLINK_DPIPE_FIELD_MAPPING_TYPE_IFINDEX           = 0x1
 	DEVLINK_DPIPE_MATCH_TYPE_FIELD_EXACT               = 0x0
@@ -3281,7 +3312,8 @@ const (
 	DEVLINK_PORT_FUNCTION_ATTR_HW_ADDR                 = 0x1
 	DEVLINK_PORT_FN_ATTR_STATE                         = 0x2
 	DEVLINK_PORT_FN_ATTR_OPSTATE                       = 0x3
-	DEVLINK_PORT_FUNCTION_ATTR_MAX                     = 0x3
+	DEVLINK_PORT_FN_ATTR_CAPS                          = 0x4
+	DEVLINK_PORT_FUNCTION_ATTR_MAX                     = 0x4
 )
 
 type FsverityDigest struct {
@@ -3572,7 +3604,8 @@ const (
 	ETHTOOL_MSG_MODULE_SET                    = 0x23
 	ETHTOOL_MSG_PSE_GET                       = 0x24
 	ETHTOOL_MSG_PSE_SET                       = 0x25
-	ETHTOOL_MSG_USER_MAX                      = 0x25
+	ETHTOOL_MSG_RSS_GET                       = 0x26
+	ETHTOOL_MSG_USER_MAX                      = 0x26
 	ETHTOOL_MSG_KERNEL_NONE                   = 0x0
 	ETHTOOL_MSG_STRSET_GET_REPLY              = 0x1
 	ETHTOOL_MSG_LINKINFO_GET_REPLY            = 0x2
@@ -3611,7 +3644,8 @@ const (
 	ETHTOOL_MSG_MODULE_GET_REPLY              = 0x23
 	ETHTOOL_MSG_MODULE_NTF                    = 0x24
 	ETHTOOL_MSG_PSE_GET_REPLY                 = 0x25
-	ETHTOOL_MSG_KERNEL_MAX                    = 0x25
+	ETHTOOL_MSG_RSS_GET_REPLY                 = 0x26
+	ETHTOOL_MSG_KERNEL_MAX                    = 0x26
 	ETHTOOL_A_HEADER_UNSPEC                   = 0x0
 	ETHTOOL_A_HEADER_DEV_INDEX                = 0x1
 	ETHTOOL_A_HEADER_DEV_NAME                 = 0x2
@@ -3679,7 +3713,8 @@ const (
 	ETHTOOL_A_LINKSTATE_SQI_MAX               = 0x4
 	ETHTOOL_A_LINKSTATE_EXT_STATE             = 0x5
 	ETHTOOL_A_LINKSTATE_EXT_SUBSTATE          = 0x6
-	ETHTOOL_A_LINKSTATE_MAX                   = 0x6
+	ETHTOOL_A_LINKSTATE_EXT_DOWN_CNT          = 0x7
+	ETHTOOL_A_LINKSTATE_MAX                   = 0x7
 	ETHTOOL_A_DEBUG_UNSPEC                    = 0x0
 	ETHTOOL_A_DEBUG_HEADER                    = 0x1
 	ETHTOOL_A_DEBUG_MSGMASK                   = 0x2
@@ -4409,7 +4444,7 @@ const (
 	NL80211_ATTR_MAC_HINT                                   = 0xc8
 	NL80211_ATTR_MAC_MASK                                   = 0xd7
 	NL80211_ATTR_MAX_AP_ASSOC_STA                           = 0xca
-	NL80211_ATTR_MAX                                        = 0x140
+	NL80211_ATTR_MAX                                        = 0x141
 	NL80211_ATTR_MAX_CRIT_PROT_DURATION                     = 0xb4
 	NL80211_ATTR_MAX_CSA_COUNTERS                           = 0xce
 	NL80211_ATTR_MAX_MATCH_SETS                             = 0x85
@@ -4552,6 +4587,7 @@ const (
 	NL80211_ATTR_SUPPORT_MESH_AUTH                          = 0x73
 	NL80211_ATTR_SURVEY_INFO                                = 0x54
 	NL80211_ATTR_SURVEY_RADIO_STATS                         = 0xda
+	NL80211_ATTR_TD_BITMAP                                  = 0x141
 	NL80211_ATTR_TDLS_ACTION                                = 0x88
 	NL80211_ATTR_TDLS_DIALOG_TOKEN                          = 0x89
 	NL80211_ATTR_TDLS_EXTERNAL_SETUP                        = 0x8c
@@ -5752,3 +5788,25 @@ const (
 	AUDIT_NLGRP_NONE    = 0x0
 	AUDIT_NLGRP_READLOG = 0x1
 )
+
+const (
+	TUN_F_CSUM    = 0x1
+	TUN_F_TSO4    = 0x2
+	TUN_F_TSO6    = 0x4
+	TUN_F_TSO_ECN = 0x8
+	TUN_F_UFO     = 0x10
+)
+
+const (
+	VIRTIO_NET_HDR_F_NEEDS_CSUM = 0x1
+	VIRTIO_NET_HDR_F_DATA_VALID = 0x2
+	VIRTIO_NET_HDR_F_RSC_INFO   = 0x4
+)
+
+const (
+	VIRTIO_NET_HDR_GSO_NONE  = 0x0
+	VIRTIO_NET_HDR_GSO_TCPV4 = 0x1
+	VIRTIO_NET_HDR_GSO_UDP   = 0x3
+	VIRTIO_NET_HDR_GSO_TCPV6 = 0x4
+	VIRTIO_NET_HDR_GSO_ECN   = 0x80
+)
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go
index 89c516a2..4ecc1495 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go
@@ -414,7 +414,7 @@ const (
 
 type SockaddrStorage struct {
 	Family uint16
-	_      [122]int8
+	Data   [122]byte
 	_      uint32
 }
 
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go
index 62b4fb26..34fddff9 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go
@@ -427,7 +427,7 @@ const (
 
 type SockaddrStorage struct {
 	Family uint16
-	_      [118]int8
+	Data   [118]byte
 	_      uint64
 }
 
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go
index e86b3589..3b14a603 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go
@@ -405,7 +405,7 @@ const (
 
 type SockaddrStorage struct {
 	Family uint16
-	_      [122]uint8
+	Data   [122]byte
 	_      uint32
 }
 
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go
index 6c6be4c9..0517651a 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go
@@ -406,7 +406,7 @@ const (
 
 type SockaddrStorage struct {
 	Family uint16
-	_      [118]int8
+	Data   [118]byte
 	_      uint64
 }
 
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go
index 4982ea35..3b0c5181 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go
@@ -407,7 +407,7 @@ const (
 
 type SockaddrStorage struct {
 	Family uint16
-	_      [118]int8
+	Data   [118]byte
 	_      uint64
 }
 
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go
index 173141a6..fccdf4dd 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go
@@ -410,7 +410,7 @@ const (
 
 type SockaddrStorage struct {
 	Family uint16
-	_      [122]int8
+	Data   [122]byte
 	_      uint32
 }
 
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go
index 93ae4c51..500de8fc 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go
@@ -409,7 +409,7 @@ const (
 
 type SockaddrStorage struct {
 	Family uint16
-	_      [118]int8
+	Data   [118]byte
 	_      uint64
 }
 
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go
index 4e4e510c..d0434cd2 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go
@@ -409,7 +409,7 @@ const (
 
 type SockaddrStorage struct {
 	Family uint16
-	_      [118]int8
+	Data   [118]byte
 	_      uint64
 }
 
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go
index 3f5ba013..84206ba5 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go
@@ -410,7 +410,7 @@ const (
 
 type SockaddrStorage struct {
 	Family uint16
-	_      [122]int8
+	Data   [122]byte
 	_      uint32
 }
 
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go
index 71dfe7cd..ab078cf1 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go
@@ -417,7 +417,7 @@ const (
 
 type SockaddrStorage struct {
 	Family uint16
-	_      [122]uint8
+	Data   [122]byte
 	_      uint32
 }
 
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go
index 3a2b7f0a..42eb2c4c 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go
@@ -416,7 +416,7 @@ const (
 
 type SockaddrStorage struct {
 	Family uint16
-	_      [118]uint8
+	Data   [118]byte
 	_      uint64
 }
 
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go
index a52d6275..31304a4e 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go
@@ -416,7 +416,7 @@ const (
 
 type SockaddrStorage struct {
 	Family uint16
-	_      [118]uint8
+	Data   [118]byte
 	_      uint64
 }
 
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go
index dfc007d8..c311f961 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go
@@ -434,7 +434,7 @@ const (
 
 type SockaddrStorage struct {
 	Family uint16
-	_      [118]uint8
+	Data   [118]byte
 	_      uint64
 }
 
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go
index b53cb910..bba3cefa 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go
@@ -429,7 +429,7 @@ const (
 
 type SockaddrStorage struct {
 	Family uint16
-	_      [118]int8
+	Data   [118]byte
 	_      uint64
 }
 
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go
index fe0aa354..ad8a0138 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go
@@ -411,7 +411,7 @@ const (
 
 type SockaddrStorage struct {
 	Family uint16
-	_      [118]int8
+	Data   [118]byte
 	_      uint64
 }
 
diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go
index 41cb3c01..3723b2c2 100644
--- a/vendor/golang.org/x/sys/windows/syscall_windows.go
+++ b/vendor/golang.org/x/sys/windows/syscall_windows.go
@@ -824,6 +824,9 @@ const socket_error = uintptr(^uint32(0))
 //sys	WSAStartup(verreq uint32, data *WSAData) (sockerr error) = ws2_32.WSAStartup
 //sys	WSACleanup() (err error) [failretval==socket_error] = ws2_32.WSACleanup
 //sys	WSAIoctl(s Handle, iocc uint32, inbuf *byte, cbif uint32, outbuf *byte, cbob uint32, cbbr *uint32, overlapped *Overlapped, completionRoutine uintptr) (err error) [failretval==socket_error] = ws2_32.WSAIoctl
+//sys	WSALookupServiceBegin(querySet *WSAQUERYSET, flags uint32, handle *Handle) (err error) [failretval==socket_error] = ws2_32.WSALookupServiceBeginW
+//sys	WSALookupServiceNext(handle Handle, flags uint32, size *int32, querySet *WSAQUERYSET) (err error) [failretval==socket_error] = ws2_32.WSALookupServiceNextW
+//sys	WSALookupServiceEnd(handle Handle) (err error) [failretval==socket_error] = ws2_32.WSALookupServiceEnd
 //sys	socket(af int32, typ int32, protocol int32) (handle Handle, err error) [failretval==InvalidHandle] = ws2_32.socket
 //sys	sendto(s Handle, buf []byte, flags int32, to unsafe.Pointer, tolen int32) (err error) [failretval==socket_error] = ws2_32.sendto
 //sys	recvfrom(s Handle, buf []byte, flags int32, from *RawSockaddrAny, fromlen *int32) (n int32, err error) [failretval==-1] = ws2_32.recvfrom
@@ -1019,8 +1022,7 @@ func (rsa *RawSockaddrAny) Sockaddr() (Sockaddr, error) {
 		for n < len(pp.Path) && pp.Path[n] != 0 {
 			n++
 		}
-		bytes := (*[len(pp.Path)]byte)(unsafe.Pointer(&pp.Path[0]))[0:n]
-		sa.Name = string(bytes)
+		sa.Name = string(unsafe.Slice((*byte)(unsafe.Pointer(&pp.Path[0])), n))
 		return sa, nil
 
 	case AF_INET:
diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go
index 0c4add97..857acf10 100644
--- a/vendor/golang.org/x/sys/windows/types_windows.go
+++ b/vendor/golang.org/x/sys/windows/types_windows.go
@@ -1243,6 +1243,51 @@ const (
 	DnsSectionAdditional = 0x0003
 )
 
+const (
+	// flags of WSALookupService
+	LUP_DEEP                = 0x0001
+	LUP_CONTAINERS          = 0x0002
+	LUP_NOCONTAINERS        = 0x0004
+	LUP_NEAREST             = 0x0008
+	LUP_RETURN_NAME         = 0x0010
+	LUP_RETURN_TYPE         = 0x0020
+	LUP_RETURN_VERSION      = 0x0040
+	LUP_RETURN_COMMENT      = 0x0080
+	LUP_RETURN_ADDR         = 0x0100
+	LUP_RETURN_BLOB         = 0x0200
+	LUP_RETURN_ALIASES      = 0x0400
+	LUP_RETURN_QUERY_STRING = 0x0800
+	LUP_RETURN_ALL          = 0x0FF0
+	LUP_RES_SERVICE         = 0x8000
+
+	LUP_FLUSHCACHE    = 0x1000
+	LUP_FLUSHPREVIOUS = 0x2000
+
+	LUP_NON_AUTHORITATIVE      = 0x4000
+	LUP_SECURE                 = 0x8000
+	LUP_RETURN_PREFERRED_NAMES = 0x10000
+	LUP_DNS_ONLY               = 0x20000
+
+	LUP_ADDRCONFIG           = 0x100000
+	LUP_DUAL_ADDR            = 0x200000
+	LUP_FILESERVER           = 0x400000
+	LUP_DISABLE_IDN_ENCODING = 0x00800000
+	LUP_API_ANSI             = 0x01000000
+
+	LUP_RESOLUTION_HANDLE = 0x80000000
+)
+
+const (
+	// values of WSAQUERYSET's namespace
+	NS_ALL       = 0
+	NS_DNS       = 12
+	NS_NLA       = 15
+	NS_BTH       = 16
+	NS_EMAIL     = 37
+	NS_PNRPNAME  = 38
+	NS_PNRPCLOUD = 39
+)
+
 type DNSSRVData struct {
 	Target   *uint16
 	Priority uint16
@@ -3258,3 +3303,43 @@ const (
 	DWMWA_TEXT_COLOR                     = 36
 	DWMWA_VISIBLE_FRAME_BORDER_THICKNESS = 37
 )
+
+type WSAQUERYSET struct {
+	Size                uint32
+	ServiceInstanceName *uint16
+	ServiceClassId      *GUID
+	Version             *WSAVersion
+	Comment             *uint16
+	NameSpace           uint32
+	NSProviderId        *GUID
+	Context             *uint16
+	NumberOfProtocols   uint32
+	AfpProtocols        *AFProtocols
+	QueryString         *uint16
+	NumberOfCsAddrs     uint32
+	SaBuffer            *CSAddrInfo
+	OutputFlags         uint32
+	Blob                *BLOB
+}
+
+type WSAVersion struct {
+	Version                 uint32
+	EnumerationOfComparison int32
+}
+
+type AFProtocols struct {
+	AddressFamily int32
+	Protocol      int32
+}
+
+type CSAddrInfo struct {
+	LocalAddr  SocketAddress
+	RemoteAddr SocketAddress
+	SocketType int32
+	Protocol   int32
+}
+
+type BLOB struct {
+	Size     uint32
+	BlobData *byte
+}
diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go
index ac60052e..6d2a2685 100644
--- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go
+++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go
@@ -474,6 +474,9 @@ var (
 	procWSAEnumProtocolsW                                    = modws2_32.NewProc("WSAEnumProtocolsW")
 	procWSAGetOverlappedResult                               = modws2_32.NewProc("WSAGetOverlappedResult")
 	procWSAIoctl                                             = modws2_32.NewProc("WSAIoctl")
+	procWSALookupServiceBeginW                               = modws2_32.NewProc("WSALookupServiceBeginW")
+	procWSALookupServiceEnd                                  = modws2_32.NewProc("WSALookupServiceEnd")
+	procWSALookupServiceNextW                                = modws2_32.NewProc("WSALookupServiceNextW")
 	procWSARecv                                              = modws2_32.NewProc("WSARecv")
 	procWSARecvFrom                                          = modws2_32.NewProc("WSARecvFrom")
 	procWSASend                                              = modws2_32.NewProc("WSASend")
@@ -4067,6 +4070,30 @@ func WSAIoctl(s Handle, iocc uint32, inbuf *byte, cbif uint32, outbuf *byte, cbo
 	return
 }
 
+func WSALookupServiceBegin(querySet *WSAQUERYSET, flags uint32, handle *Handle) (err error) {
+	r1, _, e1 := syscall.Syscall(procWSALookupServiceBeginW.Addr(), 3, uintptr(unsafe.Pointer(querySet)), uintptr(flags), uintptr(unsafe.Pointer(handle)))
+	if r1 == socket_error {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func WSALookupServiceEnd(handle Handle) (err error) {
+	r1, _, e1 := syscall.Syscall(procWSALookupServiceEnd.Addr(), 1, uintptr(handle), 0, 0)
+	if r1 == socket_error {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func WSALookupServiceNext(handle Handle, flags uint32, size *int32, querySet *WSAQUERYSET) (err error) {
+	r1, _, e1 := syscall.Syscall6(procWSALookupServiceNextW.Addr(), 4, uintptr(handle), uintptr(flags), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(querySet)), 0, 0)
+	if r1 == socket_error {
+		err = errnoErr(e1)
+	}
+	return
+}
+
 func WSARecv(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32, overlapped *Overlapped, croutine *byte) (err error) {
 	r1, _, e1 := syscall.Syscall9(procWSARecv.Addr(), 7, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)), 0, 0)
 	if r1 == socket_error {
diff --git a/vendor/modules.txt b/vendor/modules.txt
index ffbaae3c..71939a5f 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -7,14 +7,16 @@ github.com/PuerkitoBio/urlesc
 # github.com/caarlos0/env/v6 v6.9.1
 ## explicit; go 1.17
 github.com/caarlos0/env/v6
-# github.com/cilium/ebpf v0.10.0
-## explicit; go 1.18
+# github.com/cilium/ebpf v0.11.0
+## explicit; go 1.19
 github.com/cilium/ebpf
 github.com/cilium/ebpf/asm
 github.com/cilium/ebpf/btf
 github.com/cilium/ebpf/internal
 github.com/cilium/ebpf/internal/epoll
+github.com/cilium/ebpf/internal/kconfig
 github.com/cilium/ebpf/internal/sys
+github.com/cilium/ebpf/internal/tracefs
 github.com/cilium/ebpf/internal/unix
 github.com/cilium/ebpf/link
 github.com/cilium/ebpf/ringbuf
@@ -249,6 +251,11 @@ golang.org/x/crypto/cryptobyte/asn1
 golang.org/x/crypto/curve25519
 golang.org/x/crypto/curve25519/internal/field
 golang.org/x/crypto/pbkdf2
+# golang.org/x/exp v0.0.0-20230224173230-c95f2b4c22f2
+## explicit; go 1.18
+golang.org/x/exp/constraints
+golang.org/x/exp/maps
+golang.org/x/exp/slices
 # golang.org/x/net v0.7.0
 ## explicit; go 1.17
 golang.org/x/net/context
@@ -265,7 +272,7 @@ golang.org/x/net/trace
 ## explicit; go 1.17
 golang.org/x/oauth2
 golang.org/x/oauth2/internal
-# golang.org/x/sys v0.5.0
+# golang.org/x/sys v0.6.0
 ## explicit; go 1.17
 golang.org/x/sys/internal/unsafeheader
 golang.org/x/sys/plan9
-- 
GitLab