diff --git a/go.mod b/go.mod
index b4c78c0a4f4a37b48a4aaeabdde3ff694efb1a65..cb81b26197979ca01bc8c68a7d2a9f511cfa79d2 100644
--- a/go.mod
+++ b/go.mod
@@ -16,14 +16,14 @@ require (
 	github.com/netobserv/gopipes v0.3.0
 	github.com/ovn-org/ovn-kubernetes/go-controller v0.0.0-20250227173154-57a2590a1d16
 	github.com/paulbellamy/ratecounter v0.2.0
-	github.com/prometheus/client_golang v1.21.1
+	github.com/prometheus/client_golang v1.22.0
 	github.com/segmentio/kafka-go v0.4.47
 	github.com/sirupsen/logrus v1.9.3
 	github.com/stretchr/testify v1.10.0
-	github.com/vishvananda/netlink v1.3.0
+	github.com/vishvananda/netlink v1.3.1-0.20250206174618-62fb240731fa
 	github.com/vishvananda/netns v0.0.5
-	github.com/vladimirvivien/gexe v0.4.1
-	github.com/vmware/go-ipfix v0.13.0
+	github.com/vladimirvivien/gexe v0.5.0
+	github.com/vmware/go-ipfix v0.14.0
 	golang.org/x/sys v0.32.0
 	google.golang.org/grpc v1.71.1
 	google.golang.org/protobuf v1.36.6
@@ -41,7 +41,7 @@ require (
 	github.com/beorn7/perks v1.0.1 // indirect
 	github.com/blang/semver/v4 v4.0.0 // indirect
 	github.com/cenkalti/backoff/v4 v4.3.0 // indirect
-	github.com/cenkalti/hub v1.0.1 // indirect
+	github.com/cenkalti/hub v1.0.2 // indirect
 	github.com/cenkalti/rpc2 v0.0.0-20210604223624-c1acbc6ec984 // indirect
 	github.com/cespare/xxhash/v2 v2.3.0 // indirect
 	github.com/containernetworking/cni v1.1.2 // indirect
@@ -49,29 +49,31 @@ require (
 	github.com/coreos/go-iptables v0.6.0 // indirect
 	github.com/cpuguy83/go-md2man/v2 v2.0.6 // indirect
 	github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
+	github.com/dennwc/varint v1.0.0 // indirect
 	github.com/dustin/go-humanize v1.0.1 // indirect
-	github.com/emicklei/go-restful/v3 v3.12.1 // indirect
-	github.com/evanphx/json-patch/v5 v5.9.0 // indirect
-	github.com/fxamacker/cbor/v2 v2.7.0 // indirect
+	github.com/emicklei/go-restful/v3 v3.12.2 // indirect
+	github.com/evanphx/json-patch/v5 v5.9.11 // indirect
+	github.com/fxamacker/cbor/v2 v2.8.0 // indirect
 	github.com/go-ini/ini v1.67.0 // indirect
 	github.com/go-kit/kit v0.13.0 // indirect
 	github.com/go-kit/log v0.2.1 // indirect
-	github.com/go-logfmt/logfmt v0.5.1 // indirect
+	github.com/go-logfmt/logfmt v0.6.0 // indirect
 	github.com/go-logr/logr v1.4.2 // indirect
 	github.com/go-logr/stdr v1.2.2 // indirect
-	github.com/go-openapi/jsonpointer v0.21.0 // indirect
+	github.com/go-openapi/jsonpointer v0.21.1 // indirect
 	github.com/go-openapi/jsonreference v0.21.0 // indirect
-	github.com/go-openapi/swag v0.23.0 // indirect
+	github.com/go-openapi/swag v0.23.1 // indirect
 	github.com/goccy/go-json v0.10.5 // indirect
 	github.com/gogo/protobuf v1.3.2 // indirect
 	github.com/golang/protobuf v1.5.4 // indirect
-	github.com/golang/snappy v0.0.4 // indirect
-	github.com/google/gnostic-models v0.6.8 // indirect
+	github.com/golang/snappy v1.0.0 // indirect
+	github.com/google/gnostic-models v0.6.9 // indirect
 	github.com/google/go-cmp v0.7.0 // indirect
 	github.com/google/gofuzz v1.2.0 // indirect
 	github.com/google/uuid v1.6.0 // indirect
-	github.com/gorilla/websocket v1.5.0 // indirect
-	github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.1 // indirect
+	github.com/gorilla/websocket v1.5.3 // indirect
+	github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc // indirect
+	github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 // indirect
 	github.com/heptiolabs/healthcheck v0.0.0-20211123025425-613501dd5deb // indirect
 	github.com/inconshreveable/mousetrap v1.1.0 // indirect
 	github.com/ip2location/ip2location-go/v9 v9.7.1 // indirect
@@ -80,11 +82,11 @@ require (
 	github.com/json-iterator/go v1.1.12 // indirect
 	github.com/klauspost/compress v1.18.0 // indirect
 	github.com/klauspost/cpuid/v2 v2.2.10 // indirect
-	github.com/libp2p/go-reuseport v0.3.0 // indirect
-	github.com/mailru/easyjson v0.7.7 // indirect
+	github.com/libp2p/go-reuseport v0.4.0 // indirect
+	github.com/mailru/easyjson v0.9.0 // indirect
 	github.com/minio/crc64nvme v1.0.1 // indirect
 	github.com/minio/md5-simd v1.1.2 // indirect
-	github.com/minio/minio-go/v7 v7.0.89 // indirect
+	github.com/minio/minio-go/v7 v7.0.90 // indirect
 	github.com/mitchellh/mapstructure v1.5.0 // indirect
 	github.com/moby/spdystream v0.5.0 // indirect
 	github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
@@ -92,65 +94,68 @@ require (
 	github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
 	github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect
 	github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect
-	github.com/netobserv/loki-client-go v0.0.0-20220927092034-f37122a54500 // indirect
+	github.com/netobserv/loki-client-go v0.0.0-20250425113517-526b43e51847 // indirect
 	github.com/netsampler/goflow2 v1.3.7 // indirect
 	github.com/ovn-org/libovsdb v0.7.1-0.20240820095311-ce1951614a20 // indirect
 	github.com/pierrec/lz4/v4 v4.1.22 // indirect
 	github.com/pion/dtls/v2 v2.2.12 // indirect
-	github.com/pion/logging v0.2.2 // indirect
+	github.com/pion/logging v0.2.3 // indirect
 	github.com/pion/transport/v2 v2.2.10 // indirect
 	github.com/pkg/errors v0.9.1 // indirect
 	github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
-	github.com/prometheus/client_model v0.6.1 // indirect
-	github.com/prometheus/common v0.62.0 // indirect
-	github.com/prometheus/procfs v0.15.1 // indirect
-	github.com/prometheus/prometheus v1.8.2-0.20201028100903-3245b3267b24 // indirect
+	github.com/prometheus/client_model v0.6.2 // indirect
+	github.com/prometheus/common v0.63.0 // indirect
+	github.com/prometheus/procfs v0.16.0 // indirect
+	github.com/prometheus/prometheus v0.303.0 // indirect
 	github.com/rs/xid v1.6.0 // indirect
 	github.com/russross/blackfriday/v2 v2.1.0 // indirect
-	github.com/safchain/ethtool v0.3.1-0.20231027162144-83e5e0097c91 // indirect
+	github.com/safchain/ethtool v0.5.10 // indirect
 	github.com/spf13/cobra v1.9.1 // indirect
 	github.com/spf13/pflag v1.0.6 // indirect
 	github.com/stretchr/objx v0.5.2 // indirect
-	github.com/urfave/cli/v2 v2.27.2 // indirect
+	github.com/urfave/cli/v2 v2.27.6 // indirect
 	github.com/x448/float16 v0.8.4 // indirect
 	github.com/xdg-go/pbkdf2 v1.0.0 // indirect
 	github.com/xdg-go/scram v1.1.2 // indirect
 	github.com/xdg-go/stringprep v1.0.4 // indirect
-	github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913 // indirect
+	github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 // indirect
 	go.opentelemetry.io/auto/sdk v1.1.0 // indirect
 	go.opentelemetry.io/otel v1.35.0 // indirect
 	go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.35.0 // indirect
 	go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.35.0 // indirect
 	go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0 // indirect
-	go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0 // indirect
-	go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.34.0 // indirect
+	go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.35.0 // indirect
+	go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.35.0 // indirect
 	go.opentelemetry.io/otel/metric v1.35.0 // indirect
 	go.opentelemetry.io/otel/sdk v1.35.0 // indirect
 	go.opentelemetry.io/otel/sdk/metric v1.35.0 // indirect
 	go.opentelemetry.io/otel/trace v1.35.0 // indirect
 	go.opentelemetry.io/proto/otlp v1.5.0 // indirect
-	go.uber.org/atomic v1.9.0 // indirect
-	golang.org/x/crypto v0.36.0 // indirect
-	golang.org/x/net v0.37.0 // indirect
-	golang.org/x/oauth2 v0.26.0 // indirect
-	golang.org/x/term v0.30.0 // indirect
-	golang.org/x/text v0.23.0 // indirect
-	golang.org/x/time v0.7.0 // indirect
-	google.golang.org/genproto/googleapis/api v0.0.0-20250218202821-56aae31c358a // indirect
-	google.golang.org/genproto/googleapis/rpc v0.0.0-20250218202821-56aae31c358a // indirect
+	go.uber.org/atomic v1.11.0 // indirect
+	golang.org/x/crypto v0.37.0 // indirect
+	golang.org/x/net v0.39.0 // indirect
+	golang.org/x/oauth2 v0.29.0 // indirect
+	golang.org/x/term v0.31.0 // indirect
+	golang.org/x/text v0.24.0 // indirect
+	golang.org/x/time v0.11.0 // indirect
+	google.golang.org/genproto/googleapis/api v0.0.0-20250421163800-61c742ae3ef0 // indirect
+	google.golang.org/genproto/googleapis/rpc v0.0.0-20250414145226-207652e42e2e // indirect
 	gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
 	gopkg.in/gcfg.v1 v1.2.3 // indirect
 	gopkg.in/inf.v0 v0.9.1 // indirect
 	gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
 	gopkg.in/warnings.v0 v0.1.2 // indirect
 	gopkg.in/yaml.v3 v3.0.1 // indirect
-	k8s.io/component-base v0.32.1 // indirect
+	k8s.io/component-base v0.32.3 // indirect
 	k8s.io/klog/v2 v2.130.1 // indirect
-	k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect
+	k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect
 	k8s.io/utils v0.0.0-20250321185631-1f6e0b77f77e // indirect
-	lukechampine.com/uint128 v1.2.0 // indirect
-	sigs.k8s.io/controller-runtime v0.20.0 // indirect
-	sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect
-	sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect
+	lukechampine.com/uint128 v1.3.0 // indirect
+	sigs.k8s.io/controller-runtime v0.20.4 // indirect
+	sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect
+	sigs.k8s.io/randfill v1.0.0 // indirect
+	sigs.k8s.io/structured-merge-diff/v4 v4.7.0 // indirect
 	sigs.k8s.io/yaml v1.4.0 // indirect
 )
+
+replace github.com/netobserv/flowlogs-pipeline => github.com/jotak/flowlogs-pipeline v0.0.0-20250425114852-1cb601f3ba70
diff --git a/go.sum b/go.sum
index ca4c91e3952459cc17ba833c915c850f38c18a0f..08981aff53bd94d32c48efd196a3abeee2cc6ca8 100644
--- a/go.sum
+++ b/go.sum
@@ -1,143 +1,47 @@
-cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
-cloud.google.com/go v0.43.0/go.mod h1:BOSR3VbTLkk6FDC/TcffxP4NF/FFBGA5ku+jvKOP7pg=
-cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
-cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
-cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
-cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
-cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
-cloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gcw=
-cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
-cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
-cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
-cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
-cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
-cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
-cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
-cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
-cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
-cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
-cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
-cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
-cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
-cloud.google.com/go/bigtable v1.2.0/go.mod h1:JcVAOl45lrTmQfLj7T6TxyMzIN/3FGGcFm+2xVAli2o=
-cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
-cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
-cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
-cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
-cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
-cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
-cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
-cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
-cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
-cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
-cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
-collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE=
-dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
-github.com/Azure/azure-sdk-for-go v46.4.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
-github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
-github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
-github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
-github.com/Azure/go-autorest/autorest v0.9.6/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630=
-github.com/Azure/go-autorest/autorest v0.11.10/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw=
-github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=
-github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q=
-github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A=
-github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
-github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g=
-github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=
-github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
-github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
-github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM=
-github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
-github.com/Azure/go-autorest/autorest/to v0.3.0/go.mod h1:MgwOyqaIuKdG4TL/2ywSsIWKAfJfgHDo8ObuUk3t5sA=
-github.com/Azure/go-autorest/autorest/validation v0.2.0/go.mod h1:3EEqHnBxQGHXRYq3HT1WyXAvT7LLY3tl70hw6tQIbjI=
-github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc=
-github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
-github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
-github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
-github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
-github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
-github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM=
-github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
-github.com/HdrHistogram/hdrhistogram-go v0.9.0/go.mod h1:nxrse8/Tzg2tg3DZcZjm6qEclQKK70g0KxO61gFFZD4=
-github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0=
+cloud.google.com/go/auth v0.15.0 h1:Ly0u4aA5vG/fsSsxu98qCQBemXtAtJf+95z9HK+cxps=
+cloud.google.com/go/auth v0.15.0/go.mod h1:WJDGqZ1o9E9wKIL+IwStfyn/+s59zl4Bi+1KQNVXLZ8=
+cloud.google.com/go/auth/oauth2adapt v0.2.7 h1:/Lc7xODdqcEw8IrZ9SvwnlLX6j9FHQM74z6cBk9Rw6M=
+cloud.google.com/go/auth/oauth2adapt v0.2.7/go.mod h1:NTbTTzfvPl1Y3V1nPpOgl2w6d/FjO7NNUQaWSox6ZMc=
+cloud.google.com/go/compute/metadata v0.6.0 h1:A6hENjEsCDtC1k8byVsgwvVcioamEHvZ4j01OwKxG9I=
+cloud.google.com/go/compute/metadata v0.6.0/go.mod h1:FjyFAW1MW0C203CEOMDTu3Dk1FlqW3Rga40jzHL4hfg=
+github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0 h1:g0EZJwz7xkXQiZAI5xi9f3WWFYBlX1CPTrR+NDToRkQ=
+github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0/go.mod h1:XCW7KnZet0Opnr7HccfUw1PLc4CjHqpcaxW8DHklNkQ=
+github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.2 h1:F0gBpfdPLGsw+nsgk6aqqkZS1jiixa5WwFe3fk/T3Ys=
+github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.2/go.mod h1:SqINnQ9lVVdRlyC8cd1lCI0SdX4n2paeABd2K8ggfnE=
+github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xPBn1663uRv2t2q/ESv9seY=
+github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0/go.mod h1:iZDifYGJTIgIIkYRNWPENUnqx6bJ2xnSDFI2tjwZNuY=
+github.com/AzureAD/microsoft-authentication-library-for-go v1.3.3 h1:H5xDQaE3XowWfhZRUpnfC+rGZMEVoSiji+b+/HFAPU4=
+github.com/AzureAD/microsoft-authentication-library-for-go v1.3.3/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
 github.com/Knetic/govaluate v3.0.1-0.20250325060307-7625b7f8c03d+incompatible h1:PQkGQvISFXAw+Lkmcyd5OUGDVtdQdY1u0CIDjDbBg64=
 github.com/Knetic/govaluate v3.0.1-0.20250325060307-7625b7f8c03d+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0=
-github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA=
-github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
-github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
-github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
-github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
-github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
-github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
-github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
-github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
-github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
-github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g=
-github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c=
-github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM=
 github.com/agoda-com/opentelemetry-logs-go v0.6.0 h1:PdnNbW2a5vp4VWasIGVHJ85/4Eu0kZfLs3ySuitLN20=
 github.com/agoda-com/opentelemetry-logs-go v0.6.0/go.mod h1:zPrxWeyxZ8QRWJFNBFJ2zeWjJu0OuGG+Ow4KYEGEA5o=
-github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw=
-github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
-github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
-github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
-github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
-github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
+github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b h1:mimo19zliBX/vSQ6PWWSL9lK8qwHozUj03+zLoEB8O0=
+github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b/go.mod h1:fvzegU4vN3H1qMT+8wDmzjAcDONcgo2/SZ/TyfdUOFs=
 github.com/alexflint/go-filemutex v1.2.0 h1:1v0TJPDtlhgpW4nJ+GvxCLSlUDC3+gW0CQQvlmfDR/s=
 github.com/alexflint/go-filemutex v1.2.0/go.mod h1:mYyQSWvw9Tx2/H2n9qXPb52tTYfE0pZAWcBq5mK025c=
-github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8=
-github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
-github.com/apache/arrow/go/arrow v0.0.0-20191024131854-af6fa24be0db/go.mod h1:VTxUBvSJ3s3eHAg65PNgrsn5BtqCRPdmyXh6rAfdxN0=
-github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
-github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
-github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
-github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
-github.com/armon/go-metrics v0.3.3/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc=
-github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
-github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
 github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
 github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
-github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A=
-github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
-github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
-github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg=
-github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU=
-github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
-github.com/aws/aws-sdk-go v1.35.5/go.mod h1:tlPOdRjfxPBpNIwqDj61rmsnA85v9jc0Ps9+muhnW+k=
-github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
+github.com/aws/aws-sdk-go v1.55.6 h1:cSg4pvZ3m8dgYcgqB97MrcdjUmZ1BeMYKUxMMB89IPk=
+github.com/aws/aws-sdk-go v1.55.6/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU=
+github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps=
+github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3/go.mod h1:CIWtjkly68+yqLPbvwwR/fjNJA/idrtULjZWh2v1ys0=
 github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o=
 github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
-github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
-github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
 github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
 github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
-github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
 github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ=
 github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
 github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM=
 github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ=
-github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40/go.mod h1:8rLXio+WjiTceGBHIoTvn60HIbs7Hm7bcHjyrSqYB9c=
-github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps=
-github.com/c-bata/go-prompt v0.2.2/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOCSiVIqS34=
 github.com/caarlos0/env/v11 v11.3.1 h1:cArPWC15hWmEt+gWk7YBi7lEXTXCvpaSdCiZE2X5mCA=
 github.com/caarlos0/env/v11 v11.3.1/go.mod h1:qupehSf/Y0TUTsxKywqRt/vJjN5nz6vauiYEUUr8P4U=
-github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ=
-github.com/cenk/hub v1.0.1 h1:RBwXNOF4a8KjD8BJ08XqN8KbrqaGiQLDrgvUGJSHuPA=
-github.com/cenk/hub v1.0.1/go.mod h1:rJM1LNAW0ppT8FMMuPK6c2NP/R2nH/UthtuRySSaf6Y=
-github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
-github.com/cenkalti/backoff/v4 v4.0.2/go.mod h1:eEew/i+1Q6OrCDZh3WiXYv3+nJwBASZ8Bog/87DQnVg=
 github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
 github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
-github.com/cenkalti/hub v1.0.1 h1:UMtjc6dHSaOQTO15SVA50MBIR9zQwvsukQupDrkIRtg=
-github.com/cenkalti/hub v1.0.1/go.mod h1:tcYwtS3a2d9NO/0xDXVJWx3IedurUjYCqFCmpi0lpHs=
+github.com/cenkalti/hub v1.0.2 h1:Nqv9TNaA9boeO2wQFW8o87BY3zKthtnzXmWGmJqhAV8=
+github.com/cenkalti/hub v1.0.2/go.mod h1:8LAFAZcCasb83vfxatMUnZHRoQcffho2ELpHb+kaTJU=
 github.com/cenkalti/rpc2 v0.0.0-20210604223624-c1acbc6ec984 h1:CNwZyGS6KpfaOWbh2yLkSy3rSTUh3jub9CzpFpP6PVQ=
 github.com/cenkalti/rpc2 v0.0.0-20210604223624-c1acbc6ec984/go.mod h1:v2npkhrXyk5BCnkNIiPdRI23Uq6uWPUQGL2hnRcRr/M=
-github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
-github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
-github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
 github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
 github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
 github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
@@ -145,105 +49,46 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5P
 github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
 github.com/cilium/ebpf v0.18.0 h1:OsSwqS4y+gQHxaKgg2U/+Fev834kdnsQbtzRnbVC6Gs=
 github.com/cilium/ebpf v0.18.0/go.mod h1:vmsAT73y4lW2b4peE+qcOqw6MxvWQdC+LiU5gd/xyo4=
-github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
-github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
-github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE=
-github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
-github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
-github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
-github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI=
-github.com/containerd/containerd v1.3.4/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
 github.com/containernetworking/cni v1.1.2 h1:wtRGZVv7olUHMOqouPpn3cXJWpJgM6+EUl31EQbXALQ=
 github.com/containernetworking/cni v1.1.2/go.mod h1:sDpYKmGVENF3s6uvMvGgldDWeG8dMxakj/u+i9ht9vw=
 github.com/containernetworking/plugins v1.2.0 h1:SWgg3dQG1yzUo4d9iD8cwSVh1VqI+bP7mkPDoSfP9VU=
 github.com/containernetworking/plugins v1.2.0/go.mod h1:/VjX4uHecW5vVimFa1wkG4s+r/s9qIfPdqlLF4TW8c4=
 github.com/coreos/go-iptables v0.6.0 h1:is9qnZMPYjLd8LYqmm/qlE+wwEgJIkTYdhV3rfZo4jk=
 github.com/coreos/go-iptables v0.6.0/go.mod h1:Qe8Bv2Xik5FyTXwgIbLAnv2sWSBmvWdFETJConOQ//Q=
-github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
-github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
-github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
-github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
 github.com/cpuguy83/go-md2man/v2 v2.0.6 h1:XJtiaUW6dEEqVuZiMTn1ldk455QWwEIsMIJlo5vtkx0=
 github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
-github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
-github.com/dave/jennifer v1.2.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg=
 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
 github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
-github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMaSuZ+SZcx/wljOQKvp5srsbCiKDEb6K2wC4+PiBmQ=
-github.com/dgryski/go-sip13 v0.0.0-20200911182023-62edffca9245/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
-github.com/digitalocean/godo v1.46.0/go.mod h1:p7dOjjtSBqCTUksqtA5Fd3uaKs9kyTq2xcz76ulEJRU=
-github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
-github.com/docker/docker v17.12.0-ce-rc1.0.20200706150819-a40b877fbb9e+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
-github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
-github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
-github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
-github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
-github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
-github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
+github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE=
+github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA=
 github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
 github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
-github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
-github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
-github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
-github.com/eclipse/paho.mqtt.golang v1.2.0/go.mod h1:H9keYFcgq3Qr5OUJm/JZI/i6U7joQ8SYLhZwfeOo6Ts=
-github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
-github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
-github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
-github.com/emicklei/go-restful/v3 v3.12.1 h1:PJMDIM/ak7btuL8Ex0iYET9hxM3CI2sjZtzpL63nKAU=
-github.com/emicklei/go-restful/v3 v3.12.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
-github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g=
-github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
-github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
-github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
-github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
-github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
-github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg=
-github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ=
-github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
-github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
-github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k=
-github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
-github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4=
-github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20=
+github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU=
+github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
+github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjTM0wiaDU=
+github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM=
+github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
+github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
 github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
 github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
 github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k=
 github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
-github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
-github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
+github.com/fxamacker/cbor/v2 v2.8.0 h1:fFtUGXUzXPHTIUdne5+zzMPTfffl3RD5qYnkY40vtxU=
+github.com/fxamacker/cbor/v2 v2.8.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ=
 github.com/gaissmai/cidrtree v0.1.4 h1:/aYnv1LIwjtSDHNr1eNN99WJeh6vLrB+Sgr1tRMhHDc=
 github.com/gaissmai/cidrtree v0.1.4/go.mod h1:nrjEeeMZmvoJpLcSvZ3qIVFxw/+9GHKi7wDHHmHKGRI=
 github.com/gavv/monotime v0.0.0-20190418164738-30dba4353424 h1:Vh7rylVZRZCj6W41lRlP17xPk4Nq260H4Xo/DDYmEZk=
 github.com/gavv/monotime v0.0.0-20190418164738-30dba4353424/go.mod h1:vmp8DIyckQMXOPl0AQVHt+7n5h7Gb7hS6CUydiV8QeA=
-github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
-github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
-github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q=
-github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q=
-github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE=
-github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24=
-github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
-github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
-github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
 github.com/go-ini/ini v1.67.0 h1:z6ZrTEZqSWOTyH2FlglNbNgARyHG8oLW9gMELqKr06A=
 github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
-github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
-github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
-github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o=
 github.com/go-kit/kit v0.13.0 h1:OoneCcHKHQ03LfBpoQCUfCluwd2Vt3ohz+kvbJneZAU=
 github.com/go-kit/kit v0.13.0/go.mod h1:phqEHMMUbyrCFCTgH48JueqrM3md2HcAZ8N3XE4FKDg=
-github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
 github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU=
 github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0=
-github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
-github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
-github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
-github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA=
-github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
-github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
-github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
+github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4=
+github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
 github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
 github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
 github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
@@ -251,302 +96,95 @@ github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
 github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
 github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ=
 github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg=
-github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI=
-github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik=
-github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik=
-github.com/go-openapi/analysis v0.19.2/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk=
-github.com/go-openapi/analysis v0.19.4/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk=
-github.com/go-openapi/analysis v0.19.5/go.mod h1:hkEAkxagaIvIP7VTn8ygJNkd4kAYON2rCu0v0ObL0AU=
-github.com/go-openapi/analysis v0.19.10/go.mod h1:qmhS3VNFxBlquFJ0RGoDtylO9y4pgTAUNE9AEEMdlJQ=
-github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0=
-github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0=
-github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94=
-github.com/go-openapi/errors v0.19.3/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94=
-github.com/go-openapi/errors v0.19.4/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94=
-github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0=
-github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M=
-github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M=
-github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg=
-github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
-github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ=
-github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY=
-github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg=
-github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I=
-github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I=
-github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc=
-github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
+github.com/go-openapi/jsonpointer v0.21.1 h1:whnzv/pNXtK2FbX/W9yJfRmE2gsmkfahjMKB0fZvcic=
+github.com/go-openapi/jsonpointer v0.21.1/go.mod h1:50I1STOfbY1ycR8jGz8DaMeLCdXiI6aDteEdRNNzpdk=
 github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ=
 github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4=
-github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU=
-github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU=
-github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU=
-github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix5jWFxsNPs=
-github.com/go-openapi/loads v0.19.3/go.mod h1:YVfqhUCdahYwR3f3iiwQLhicVRvLlU/WO5WPaZvcvSI=
-github.com/go-openapi/loads v0.19.4/go.mod h1:zZVHonKd8DXyxyw4yfnVjPzBjIQcLt0CCsn0N0ZrQsk=
-github.com/go-openapi/loads v0.19.5/go.mod h1:dswLCAdonkRufe/gSUC3gN8nTSaB9uaS2es0x5/IbjY=
-github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA=
-github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64=
-github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4=
-github.com/go-openapi/runtime v0.19.15/go.mod h1:dhGWCTKRXlAfGnQG0ONViOZpjfg0m2gUt9nTQPQZuoo=
-github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc=
-github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI=
-github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI=
-github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY=
-github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo=
-github.com/go-openapi/spec v0.19.6/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk=
-github.com/go-openapi/spec v0.19.8/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk=
-github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU=
-github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU=
-github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY=
-github.com/go-openapi/strfmt v0.19.2/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU=
-github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU=
-github.com/go-openapi/strfmt v0.19.4/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk=
-github.com/go-openapi/strfmt v0.19.5/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk=
-github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I=
-github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg=
-github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg=
-github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
-github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
-github.com/go-openapi/swag v0.19.7/go.mod h1:ao+8BpOPyKdpQz3AOJfbeEVpLmWAvlT1IfTe5McPyhY=
-github.com/go-openapi/swag v0.19.9/go.mod h1:ao+8BpOPyKdpQz3AOJfbeEVpLmWAvlT1IfTe5McPyhY=
-github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE=
-github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ=
-github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4=
-github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA=
-github.com/go-openapi/validate v0.19.3/go.mod h1:90Vh6jjkTn+OT1Eefm0ZixWNFjhtOH7vS9k0lo6zwJo=
-github.com/go-openapi/validate v0.19.8/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4=
+github.com/go-openapi/swag v0.23.1 h1:lpsStH0n2ittzTnbaSloVZLuB5+fvSY/+hnagBjSNZU=
+github.com/go-openapi/swag v0.23.1/go.mod h1:STZs8TbRvEQQKUA+JZNAm3EWlgaOBGpyFDqQnDHMef0=
 github.com/go-quicktest/qt v1.101.1-0.20240301121107-c6c8733fa1e6 h1:teYtXy9B7y5lHTp8V9KPxpYRAVA7dozigQcMiBust1s=
 github.com/go-quicktest/qt v1.101.1-0.20240301121107-c6c8733fa1e6/go.mod h1:p4lGIVX+8Wa6ZPNDvqcxq36XpUDLh42FLetFU7odllI=
-github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
-github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
-github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
 github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I=
 github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
 github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
 github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
-github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0=
-github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY=
-github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg=
-github.com/gobuffalo/envy v1.6.15/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI=
-github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI=
-github.com/gobuffalo/flect v0.1.0/go.mod h1:d2ehjJqGOH/Kjqcoz+F7jHTBbmDb38yXA598Hb50EGs=
-github.com/gobuffalo/flect v0.1.1/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI=
-github.com/gobuffalo/flect v0.1.3/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI=
-github.com/gobuffalo/genny v0.0.0-20190329151137-27723ad26ef9/go.mod h1:rWs4Z12d1Zbf19rlsn0nurr75KqhYp52EAGGxTbBhNk=
-github.com/gobuffalo/genny v0.0.0-20190403191548-3ca520ef0d9e/go.mod h1:80lIj3kVJWwOrXWWMRzzdhW3DsrdjILVil/SFKBzF28=
-github.com/gobuffalo/genny v0.1.0/go.mod h1:XidbUqzak3lHdS//TPu2OgiFB+51Ur5f7CSnXZ/JDvo=
-github.com/gobuffalo/genny v0.1.1/go.mod h1:5TExbEyY48pfunL4QSXxlDOmdsD44RRq4mVZ0Ex28Xk=
-github.com/gobuffalo/gitgen v0.0.0-20190315122116-cc086187d211/go.mod h1:vEHJk/E9DmhejeLeNt7UVvlSGv3ziL+djtTr3yyzcOw=
-github.com/gobuffalo/gogen v0.0.0-20190315121717-8f38393713f5/go.mod h1:V9QVDIxsgKNZs6L2IYiGR8datgMhB577vzTDqypH360=
-github.com/gobuffalo/gogen v0.1.0/go.mod h1:8NTelM5qd8RZ15VjQTFkAW6qOMx5wBbW4dSCS3BY8gg=
-github.com/gobuffalo/gogen v0.1.1/go.mod h1:y8iBtmHmGc4qa3urIyo1shvOD8JftTtfcKi+71xfDNE=
-github.com/gobuffalo/logger v0.0.0-20190315122211-86e12af44bc2/go.mod h1:QdxcLw541hSGtBnhUc4gaNIXRjiDppFGaDqzbrBd3v8=
-github.com/gobuffalo/mapi v1.0.1/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc=
-github.com/gobuffalo/mapi v1.0.2/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc=
-github.com/gobuffalo/packd v0.0.0-20190315124812-a385830c7fc0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4=
-github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4=
-github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ=
-github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0=
-github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw=
+github.com/go-viper/mapstructure/v2 v2.2.1 h1:ZAaOCxANMuZx5RCeg0mBdEZk7DZasvvZIxtHqx8aGss=
+github.com/go-viper/mapstructure/v2 v2.2.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
 github.com/goccy/go-json v0.10.5 h1:Fq85nIqj+gXn/S5ahsiTlK3TmC85qgirsdTP/+DeaC4=
 github.com/goccy/go-json v0.10.5/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M=
-github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
-github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s=
-github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
-github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
-github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
-github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
 github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
 github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
-github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k=
-github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI=
-github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
-github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
-github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
-github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
-github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
-github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
-github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
-github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
+github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk=
+github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
 github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
-github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
-github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
 github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
 github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
 github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
 github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
 github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
-github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
 github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
-github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
 github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
 github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
 github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
 github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
-github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
-github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
-github.com/golang/snappy v0.0.2/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
-github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
-github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
-github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
-github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
-github.com/google/flatbuffers v1.11.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
-github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
-github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
-github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
+github.com/golang/snappy v1.0.0 h1:Oy607GVXHs7RtbggtPBnr2RmDArIsAefDwvrdWvRhGs=
+github.com/golang/snappy v1.0.0/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw=
+github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw=
 github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
 github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
 github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
 github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
 github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
 github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
 github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
 github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
 github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
-github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
 github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
-github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
 github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
 github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
-github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
-github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
-github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
-github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
-github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20201007051231-1066cbb265c7/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
 github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
-github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo=
-github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
-github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
-github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad h1:a6HEuzUHeKH6hwfN/ZoQgRgVIWFJljSWa/zetS2WTvg=
+github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
+github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0=
+github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM=
 github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
 github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
-github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
-github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg=
+github.com/googleapis/enterprise-certificate-proxy v0.3.5 h1:VgzTY2jogw3xt39CusEnFJWm7rlsq5yL5q9XdLOuP5g=
+github.com/googleapis/enterprise-certificate-proxy v0.3.5/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA=
+github.com/googleapis/gax-go/v2 v2.14.1 h1:hb0FFeiPaQskmvakKu5EbCbpntQn48jyHuvrkurSS/Q=
+github.com/googleapis/gax-go/v2 v2.14.1/go.mod h1:Hb/NubMaVM88SrNkvl8X/o8XWwDJEPqouaLeN2IUxoA=
 github.com/gopacket/gopacket v1.3.1 h1:ZppWyLrOJNZPe5XkdjLbtuTkfQoxQ0xyMJzQCqtqaPU=
 github.com/gopacket/gopacket v1.3.1/go.mod h1:3I13qcqSpB2R9fFQg866OOgzylYkZxLTmkvcXhvf6qg=
-github.com/gophercloud/gophercloud v0.13.0/go.mod h1:VX0Ibx85B60B5XOrZr6kaNwrmPUzcmMpwxvQ1WQIIWM=
-github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
-github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
-github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
-github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
-github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
-github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
-github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
-github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
-github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
-github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
-github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
-github.com/grpc-ecosystem/grpc-gateway v1.15.0/go.mod h1:vO11I9oWA+KsxmfFQPhLnnIb1VDE24M+pdxZFiuZcA8=
-github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.1 h1:e9Rjr40Z98/clHv5Yg79Is0NtosR5LXRvdr7o/6NwbA=
-github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.1/go.mod h1:tIxuGz/9mpox++sgp9fJjHO0+q1X9/UOWd798aAm22M=
-github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE=
-github.com/hashicorp/consul/api v1.7.0/go.mod h1:1NSuaUUkFaJzMasbfq/11wKYWSR67Xn6r2DXKhuDNFg=
-github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
-github.com/hashicorp/consul/sdk v0.6.0/go.mod h1:fY08Y9z5SvJqevyZNy6WWPXiG3KwBPAvlcdx16zZ0fM=
-github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
-github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
-github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
-github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
-github.com/hashicorp/go-hclog v0.12.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
-github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
-github.com/hashicorp/go-immutable-radix v1.2.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
-github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
-github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
-github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA=
-github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
-github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
-github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8=
-github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
-github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A=
-github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
-github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
-github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
-github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
-github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
-github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
-github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
-github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
-github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
-github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
-github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
-github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
-github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg2DmyNY=
-github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
-github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE=
-github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
-github.com/hashicorp/serf v0.9.3/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk=
+github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
+github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
+github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc h1:GN2Lv3MGO7AS6PrRoT6yV5+wkrOpcszoIsO4+4ds248=
+github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc/go.mod h1:+JKpmjMGhpgPL+rXZ5nsZieVzvarn86asRlBg4uNGnk=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 h1:5ZPtiqj0JL5oKWmcsq4VMaAW5ukBEgSGXEN89zeH1Jo=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3/go.mod h1:ndYquD05frm2vACXE1nsccT4oJzjhw2arTS2cpUD1PI=
 github.com/heptiolabs/healthcheck v0.0.0-20211123025425-613501dd5deb h1:tsEKRC3PU9rMw18w/uAptoijhgG4EvlA5kfJPtwrMDk=
 github.com/heptiolabs/healthcheck v0.0.0-20211123025425-613501dd5deb/go.mod h1:NtmN9h8vrTveVQRLHcX2HQ5wIPBDCsZ351TGbZWgg38=
-github.com/hetznercloud/hcloud-go v1.22.0/go.mod h1:xng8lbDUg+xM1dgc0yGHX5EeqbwIq7UYlMWMTx3SQVg=
 github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
-github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg=
-github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
 github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
-github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
-github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
 github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
 github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
-github.com/influxdata/flux v0.65.1/go.mod h1:J754/zds0vvpfwuq7Gc2wRdVwEodfpCFM7mYlOw2LqY=
-github.com/influxdata/influxdb v1.8.3/go.mod h1:JugdFhsvvI8gadxOI6noqNeeBHvWNTbfYGtiAn+2jhI=
-github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo=
-github.com/influxdata/influxql v1.1.1-0.20200828144457-65d3ef77d385/go.mod h1:gHp9y86a/pxhjJ+zMjNXiQAA197Xk9wLxaz+fGG+kWk=
-github.com/influxdata/line-protocol v0.0.0-20180522152040-32c6aa80de5e/go.mod h1:4kt73NQhadE3daL3WhR5EJ/J2ocX0PZzwxQ0gXJ7oFE=
-github.com/influxdata/promql/v2 v2.12.0/go.mod h1:fxOPu+DY0bqCTCECchSRtWfc+0X19ybifQhZoQNF5D8=
-github.com/influxdata/roaring v0.4.13-0.20180809181101-fc520f41fab6/go.mod h1:bSgUQ7q5ZLSO+bKBGqJiCBGAl+9DxyW63zLTujjUlOE=
-github.com/influxdata/tdigest v0.0.0-20181121200506-bf2b5ad3c0a9/go.mod h1:Js0mqiSBE6Ffsg94weZZ2c+v/ciT8QRHFOap7EKDrR0=
-github.com/influxdata/usage-client v0.0.0-20160829180054-6d3895376368/go.mod h1:Wbbw6tYNvwa5dlB6304Sd+82Z3f7PmVZHVKU637d4po=
 github.com/ip2location/ip2location-go/v9 v9.7.1 h1:eXu/DqS13QE0h1Yrc9oji+6/anLD9KDf6Ulf5GdIQs8=
 github.com/ip2location/ip2location-go/v9 v9.7.1/go.mod h1:MPLnsKxwQlvd2lBNcQCsLoyzJLDBFizuO67wXXdzoyI=
-github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
-github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
+github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
 github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
-github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
-github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg=
-github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
 github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
 github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
 github.com/josharian/native v1.0.0/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w=
 github.com/josharian/native v1.1.0 h1:uuaP0hAbW7Y4l0ZRQ6C9zfb7Mg1mbFKry/xzDAfmtLA=
 github.com/josharian/native v1.1.0/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w=
+github.com/jotak/flowlogs-pipeline v0.0.0-20250425114852-1cb601f3ba70 h1:y+9UvpXHrRsx67ssIadqwjlTSdkvzU/wRSeIMpVT2bg=
+github.com/jotak/flowlogs-pipeline v0.0.0-20250425114852-1cb601f3ba70/go.mod h1:pBj/E/kEWH++a6QniTo6C14qunYsyuurNi0Q8+2+7IM=
 github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA=
 github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
 github.com/jsimonetti/rtnetlink/v2 v2.0.1 h1:xda7qaHDSVOsADNouv7ukSuicKZO7GgVUCXxpaIEIlM=
 github.com/jsimonetti/rtnetlink/v2 v2.0.1/go.mod h1:7MoNYNbb3UaDHtF8udiJo/RH6VsTKP1pqKLUTVCvToE=
-github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
-github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
-github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
-github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
-github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
-github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
 github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
 github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
-github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
-github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
-github.com/jsternberg/zap-logfmt v1.0.0/go.mod h1:uvPs/4X51zdkcm5jXl5SYoN+4RK21K8mysFmDaM/h+o=
-github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
-github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
-github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
-github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes=
-github.com/jwilder/encoding v0.0.0-20170811194829-b4e1701a28ef/go.mod h1:Ct9fl0F6iIOGgxJ5npU/IUOhOhqlVrGjyIZc8/MagT0=
 github.com/k8snetworkplumbingwg/govdpa v0.1.5-0.20230926073613-07c1031aea47 h1:iSncnlC+rtlNOIpPa3fbqQMhpTscGJIlkiWaPl1VcS4=
 github.com/k8snetworkplumbingwg/govdpa v0.1.5-0.20230926073613-07c1031aea47/go.mod h1:SPaDIyUmwN03Bgn0u/mhoiE4o/+koeKh11VUsdsUX0U=
 github.com/k8snetworkplumbingwg/ipamclaims v0.4.0-alpha h1:ss+EP77GlQmh90hGKpnAG4Q3VVxRlB7GoncemaPtO4g=
@@ -557,74 +195,26 @@ github.com/k8snetworkplumbingwg/network-attachment-definition-client v1.6.0 h1:B
 github.com/k8snetworkplumbingwg/network-attachment-definition-client v1.6.0/go.mod h1:wxt2YWRVItDtaQmVSmaN5ubE2L1c9CiNoHQwSJnM8Ko=
 github.com/k8snetworkplumbingwg/sriovnet v1.2.1-0.20230427090635-4929697df2dc h1:v6+jUd70AayPbIRgTYUNpnBLG5cBPTY0+10y80CZeMk=
 github.com/k8snetworkplumbingwg/sriovnet v1.2.1-0.20230427090635-4929697df2dc/go.mod h1:jyWzGe6ZtYiPq6ih6aXCOy6mZ49Y9mNyBOLBBXnli+k=
-github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4=
-github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA=
-github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
-github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
 github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
 github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
-github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
-github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
 github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
 github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
 github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
-github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
 github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
 github.com/klauspost/cpuid/v2 v2.2.10 h1:tBs3QSyvjDyFTq3uoc/9xFpCuOsJQFNPiAhYdw2skhE=
 github.com/klauspost/cpuid/v2 v2.2.10/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0=
-github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg=
-github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
-github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
-github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
-github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
-github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
-github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
-github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
-github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
 github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
 github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
-github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
-github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
-github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
 github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
 github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
 github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
 github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
-github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
-github.com/libp2p/go-reuseport v0.3.0 h1:iiZslO5byUYZEg9iCwJGf5h+sf1Agmqx2V2FDjPyvUw=
-github.com/libp2p/go-reuseport v0.3.0/go.mod h1:laea40AimhtfEqysZ71UpYj4S+R9VpH8PgqLo7L+SwI=
-github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM=
-github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4=
-github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ=
-github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY=
-github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0=
-github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
-github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
-github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
-github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
-github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
-github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs=
-github.com/mailru/easyjson v0.7.1/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs=
-github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
-github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
+github.com/libp2p/go-reuseport v0.4.0 h1:nR5KU7hD0WxXCJbmw7r2rhRYruNRl2koHw8fQscQm2s=
+github.com/libp2p/go-reuseport v0.4.0/go.mod h1:ZtI03j/wO5hZVDFo2jKywN6bYKWLOy8Se6DrI2E1cLU=
+github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4=
+github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU=
 github.com/mariomac/guara v0.0.0-20250408105519-1e4dbdfb7136 h1:SOKpjp57SUaZeXPA+wIXTIDByfs65cr1FamFsjzT8Ic=
 github.com/mariomac/guara v0.0.0-20250408105519-1e4dbdfb7136/go.mod h1:Yolpa1FCtmN9py66WkFE+6xI2ZlRE89zkLeaowPc/g0=
-github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE=
-github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0=
-github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
-github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
-github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
-github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
-github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
-github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
-github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84=
-github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE=
-github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
-github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
-github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
-github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
-github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE=
-github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
 github.com/mdlayher/arp v0.0.0-20220512170110-6706a2966875 h1:ql8x//rJsHMjS+qqEag8n3i4azw1QneKh5PieH9UEbY=
 github.com/mdlayher/arp v0.0.0-20220512170110-6706a2966875/go.mod h1:kfOoFJuHWp76v1RgZCb9/gVUc7XdY877S2uVYbNliGc=
 github.com/mdlayher/ethernet v0.0.0-20220221185849-529eae5b6118 h1:2oDp6OOhLxQ9JBoUuysVz9UZ9uI6oLUbvAZu0x8o+vE=
@@ -640,30 +230,16 @@ github.com/mdlayher/socket v0.4.1 h1:eM9y2/jlbs1M615oshPQOHZzj6R6wMT7bX5NPiQvn2U
 github.com/mdlayher/socket v0.4.1/go.mod h1:cAqeGjoufqdxWkD7DkpyS+wcefOtmu5OQ8KuoJGIReA=
 github.com/metallb/frr-k8s v0.0.15 h1:6M3UGhovX1EFoaSGjrRD7djUAx3w2I+g81FH8OVtHkM=
 github.com/metallb/frr-k8s v0.0.15/go.mod h1:TjrGoAf+v00hYGlI8jUdyDxY5udMAOs2GWwrvLWnA4E=
-github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
-github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso=
-github.com/miekg/dns v1.1.31/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM=
-github.com/miekg/dns v1.1.43 h1:JKfpVSCB84vrAmHzyrsxB5NAr5kLoMXZArPSw7Qlgyg=
-github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4=
+github.com/miekg/dns v1.1.63 h1:8M5aAw6OMZfFXTT7K5V0Eu5YiiL8l7nUAkyN6C9YwaY=
+github.com/miekg/dns v1.1.63/go.mod h1:6NGHfjhpmr5lt3XPLuyfDJi5AXbNIPM9PY6H6sF1Nfs=
 github.com/minio/crc64nvme v1.0.1 h1:DHQPrYPdqK7jQG/Ls5CTBZWeex/2FMS3G5XGkycuFrY=
 github.com/minio/crc64nvme v1.0.1/go.mod h1:eVfm2fAzLlxMdUGc0EEBGSMmPwmXD5XiNRpnu9J3bvg=
 github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34=
 github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM=
-github.com/minio/minio-go/v7 v7.0.89 h1:hx4xV5wwTUfyv8LarhJAwNecnXpoTsj9v3f3q/ZkiJU=
-github.com/minio/minio-go/v7 v7.0.89/go.mod h1:2rFnGAp02p7Dddo1Fq4S2wYOfpF0MUTSeLTRC90I204=
-github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
-github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI=
+github.com/minio/minio-go/v7 v7.0.90 h1:TmSj1083wtAD0kEYTx7a5pFsv3iRYMsOJ6A4crjA1lE=
+github.com/minio/minio-go/v7 v7.0.90/go.mod h1:uvMUcGrpgeSAAI6+sD3818508nUyMULw94j2Nxku/Go=
 github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw=
 github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s=
-github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
-github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
-github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
-github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo=
-github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
-github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
-github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
-github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
-github.com/mitchellh/mapstructure v1.2.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
 github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
 github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
 github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ=
@@ -673,250 +249,118 @@ github.com/moby/spdystream v0.5.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVO
 github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
 github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
 github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
-github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
-github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
 github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
 github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
-github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc=
-github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
-github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg=
-github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
 github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
 github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
-github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
 github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU=
 github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
 github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus=
 github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
-github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg=
-github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU=
-github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k=
-github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w=
-github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
-github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
-github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
-github.com/netobserv/flowlogs-pipeline v1.7.0-community.0.20250407055901-c9a2490f9235 h1:dtlec186OHsjmURenkq/f/e/rCD13IoS6Mb/AdMCcyo=
-github.com/netobserv/flowlogs-pipeline v1.7.0-community.0.20250407055901-c9a2490f9235/go.mod h1:aEVNh9pj/asv6qKNKoTHctkCcQ2RDHCilIslAV1Q30M=
 github.com/netobserv/gopipes v0.3.0 h1:IYmPnnAVCdSK7VmHmpFhrVBOEm45qpgbZmJz1sSW+60=
 github.com/netobserv/gopipes v0.3.0/go.mod h1:N7/Gz05EOF0CQQSKWsv3eof22Cj2PB08Pbttw98YFYU=
-github.com/netobserv/loki-client-go v0.0.0-20220927092034-f37122a54500 h1:RmnoJe/ci5q+QdM7upFdxiU+D8F3L3qTd5wXCwwHefw=
-github.com/netobserv/loki-client-go v0.0.0-20220927092034-f37122a54500/go.mod h1:LHXpc5tjKvsfZn0pwLKrvlgEhZcCaw3Di9mUEZGAI4E=
+github.com/netobserv/loki-client-go v0.0.0-20250425113517-526b43e51847 h1:hjzhVZSSKIOmAzHbGUV4JhVIPkgKs/UtrWDx6JSVKMw=
+github.com/netobserv/loki-client-go v0.0.0-20250425113517-526b43e51847/go.mod h1:Zb/jtD3Lnu88Poo+jnhTASzxYnvncmHOoZaT93xQjJ8=
 github.com/netsampler/goflow2 v1.3.7 h1:XZaTy8kkMnGXpJ9hS3KbO1McyrFTpVNhVFEx9rNhMmc=
 github.com/netsampler/goflow2 v1.3.7/go.mod h1:4UZsVGVAs//iMCptUHn3WNScztJeUhZH7kDW2+/vDdQ=
 github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
 github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
 github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
-github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs=
-github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA=
-github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU=
 github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4=
-github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
-github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
-github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/oklog/ulid/v2 v2.1.0 h1:+9lhoxAP56we25tyYETBBY1YLA2SaoLvUFgrP2miPJU=
+github.com/oklog/ulid/v2 v2.1.0/go.mod h1:rcEKHmBBKfef9DhnvX7y1HZBYxjXb0cP5ExxNsTT1QQ=
 github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
 github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
 github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
 github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
 github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
 github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c=
-github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM=
-github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo=
-github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
-github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
-github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
+github.com/onsi/ginkgo/v2 v2.22.0 h1:Yed107/8DjTr0lKCNt7Dn8yQ6ybuDRQoMGrNFKzMfHg=
+github.com/onsi/ginkgo/v2 v2.22.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo=
 github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
 github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
 github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
-github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4=
-github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog=
-github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk=
-github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
-github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
+github.com/onsi/gomega v1.36.1 h1:bJDPBO7ibjxcbHMgSCoo4Yj18UWbKDlLwX1x9sybDcw=
+github.com/onsi/gomega v1.36.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog=
 github.com/openshift/api v0.0.0-20231120222239-b86761094ee3 h1:nLhV2lbWrJ3E3hx0/97G3ZZvppC67cNwo+CLp7/PAbA=
 github.com/openshift/api v0.0.0-20231120222239-b86761094ee3/go.mod h1:qNtV0315F+f8ld52TLtPvrfivZpdimOzTi3kn9IVbtU=
 github.com/openshift/client-go v0.0.0-20231121143148-910ca30a1a9a h1:4FVrw8hz0Wb3izbf6JfOEK+pJTYpEvteRR73mCh2g/A=
 github.com/openshift/client-go v0.0.0-20231121143148-910ca30a1a9a/go.mod h1:arApQobmOjZqtxw44TwnQdUCH+t9DgZ8geYPFqksHws=
-github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis=
-github.com/opentracing-contrib/go-stdlib v1.0.0/go.mod h1:qtI1ogk+2JhVPIXVc6q+NHziSmy2W5GbdQZFUHADCBU=
-github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74=
-github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
-github.com/opentracing/opentracing-go v1.0.3-0.20180606204148-bd9c31933947/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
-github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
 github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs=
 github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc=
-github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA=
-github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw=
-github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=
-github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=
 github.com/ovn-org/libovsdb v0.7.1-0.20240820095311-ce1951614a20 h1:OoDvzyaK7F/ZANIIFOgb4Haj7mye3Hle0fYZZNdidSs=
 github.com/ovn-org/libovsdb v0.7.1-0.20240820095311-ce1951614a20/go.mod h1:dJbxEaalQl83nn904K32FaMjlH/qOObZ0bj4ejQ78AI=
 github.com/ovn-org/ovn-kubernetes/go-controller v0.0.0-20250227173154-57a2590a1d16 h1:t4NphP6IIFRe5/2NGc1MD0e72pLYIzaG9YizrYyk84Y=
 github.com/ovn-org/ovn-kubernetes/go-controller v0.0.0-20250227173154-57a2590a1d16/go.mod h1:MzFM3OEsLM2w/4MBMOCsxGR6ZBUvJfOxvQHB8LIKSv4=
-github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM=
-github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
-github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
 github.com/paulbellamy/ratecounter v0.2.0 h1:2L/RhJq+HA8gBQImDXtLPrDXK5qAj6ozWVK/zFXVJGs=
 github.com/paulbellamy/ratecounter v0.2.0/go.mod h1:Hfx1hDpSGoqxkVVpBi/IlYD7kChlfo5C6hzIHwPqfFE=
-github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
-github.com/pelletier/go-toml v1.4.0 h1:u3Z1r+oOXJIkxqw34zVhyPgjBsm6X2wn21NWs/HfSeg=
-github.com/pelletier/go-toml v1.4.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo=
-github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM=
-github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs=
-github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac=
-github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
-github.com/peterh/liner v1.0.1-0.20180619022028-8c1271fcf47f/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc=
-github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU=
-github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc=
-github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
+github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4=
+github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY=
 github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
 github.com/pierrec/lz4/v4 v4.1.22 h1:cKFw6uJDK+/gfw5BcDL0JL5aBsAFdsIT18eRtLj7VIU=
 github.com/pierrec/lz4/v4 v4.1.22/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
 github.com/pion/dtls/v2 v2.2.12 h1:KP7H5/c1EiVAAKUmXyCzPiQe5+bCJrpOeKg/L05dunk=
 github.com/pion/dtls/v2 v2.2.12/go.mod h1:d9SYc9fch0CqK90mRk1dC7AkzzpwJj6u2GU3u+9pqFE=
-github.com/pion/logging v0.2.2 h1:M9+AIj/+pxNsDfAT64+MAVgJO0rsyLnoJKCqf//DoeY=
 github.com/pion/logging v0.2.2/go.mod h1:k0/tDVsRCX2Mb2ZEmTqNa7CWsQPc+YYCB7Q+5pahoms=
+github.com/pion/logging v0.2.3 h1:gHuf0zpoh1GW67Nr6Gj4cv5Z9ZscU7g/EaoC/Ke/igI=
+github.com/pion/logging v0.2.3/go.mod h1:z8YfknkquMe1csOrxK5kc+5/ZPAzMxbKLX5aXpbpC90=
 github.com/pion/transport/v2 v2.2.4/go.mod h1:q2U/tf9FEfnSBGSW6w5Qp5PFWRLRj3NjLhCCgpRK4p0=
 github.com/pion/transport/v2 v2.2.10 h1:ucLBLE8nuxiHfvkFKnkDQRYWYfp8ejf4YBOPfaQpw6Q=
 github.com/pion/transport/v2 v2.2.10/go.mod h1:sq1kSLWs+cHW9E+2fJP95QudkzbK7wscs8yYgQToO5E=
 github.com/pion/transport/v3 v3.0.7 h1:iRbMH05BzSNwhILHoBoAPxoB9xQgOaJk+591KC9P1o0=
 github.com/pion/transport/v3 v3.0.7/go.mod h1:YleKiTZ4vqNxVwh77Z0zytYi7rXHl7j6uPLGhhz9rwo=
-github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ=
+github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU=
 github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
 github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA=
-github.com/pkg/term v0.0.0-20180730021639-bffc007b7fd5/go.mod h1:eCbImbZ95eXtAUIbLAuAVnBnwf83mjf6QIVH8SHYwqQ=
 github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
 github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
 github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
-github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s=
-github.com/prometheus/alertmanager v0.21.0/go.mod h1:h7tJ81NA0VLWvWEayi1QltevFkLF3KxmC/malTcT8Go=
-github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
-github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs=
-github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
-github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og=
-github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
-github.com/prometheus/client_golang v1.6.0/go.mod h1:ZLOG9ck3JLRdB5MgO8f+lLTe83AXG6ro35rLTxvnIl4=
-github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
-github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
-github.com/prometheus/client_golang v1.21.1 h1:DOvXXTqVzvkIewV/CDPFdejpMCGeMcbGCQ8YOmu+Ibk=
-github.com/prometheus/client_golang v1.21.1/go.mod h1:U9NM32ykUErtVBxdvD3zfi+EuFkkaBvMb09mIfe0Zgg=
-github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
-github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
-github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
-github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
-github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
-github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
-github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
-github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA=
-github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
-github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
-github.com/prometheus/common v0.14.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s=
-github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
-github.com/prometheus/common v0.31.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
-github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io=
-github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I=
-github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
-github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
-github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
-github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
-github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
-github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
-github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
-github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
-github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
-github.com/prometheus/prometheus v1.8.2-0.20201028100903-3245b3267b24 h1:V/4Cj2GytqdqK7OMEz6c4LNjey3SNyfw3pg5jPKtJvQ=
-github.com/prometheus/prometheus v1.8.2-0.20201028100903-3245b3267b24/go.mod h1:MDRkz271loM/PrYN+wUNEaTMDGSP760MQzB0yEjdgSQ=
-github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
-github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52/go.mod h1:RDpi1RftBQPUCDRw6SmxeaREsAaRKnOclghuzp/WRzc=
-github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
-github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
-github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
-github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
-github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
+github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q=
+github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0=
+github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
+github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
+github.com/prometheus/common v0.63.0 h1:YR/EIY1o3mEFP/kZCD7iDMnLPlGyuU2Gb3HIcXnA98k=
+github.com/prometheus/common v0.63.0/go.mod h1:VVFF/fBIoToEnWRVkYoXEkq3R3paCoxG9PXP74SnV18=
+github.com/prometheus/procfs v0.16.0 h1:xh6oHhKwnOJKMYiYBDWmkHqQPyiY40sny36Cmx2bbsM=
+github.com/prometheus/procfs v0.16.0/go.mod h1:8veyXUu3nGP7oaCxhX6yeaM5u4stL2FeMXnCqhDthZg=
+github.com/prometheus/prometheus v0.303.0 h1:wsNNsbd4EycMCphYnTmNY9JASBVbp7NWwJna857cGpA=
+github.com/prometheus/prometheus v0.303.0/go.mod h1:8PMRi+Fk1WzopMDeb0/6hbNs9nV6zgySkU/zds5Lu3o=
+github.com/prometheus/sigv4 v0.1.2 h1:R7570f8AoM5YnTUPFm3mjZH5q2k4D+I/phCWvZ4PXG8=
+github.com/prometheus/sigv4 v0.1.2/go.mod h1:GF9fwrvLgkQwDdQ5BXeV9XUSCH/IPNqzvAoaohfjqMU=
 github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
 github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
-github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
 github.com/rs/xid v1.6.0 h1:fV591PaemRlL6JfRxGDEPl69wICngIQ3shQtzfy2gxU=
 github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0=
-github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
 github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
 github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
-github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
-github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
-github.com/safchain/ethtool v0.3.1-0.20231027162144-83e5e0097c91 h1:q815fjV3G+4JvXNo2VwT2m+/msMU0sUkCK68CgHV9Y8=
-github.com/safchain/ethtool v0.3.1-0.20231027162144-83e5e0097c91/go.mod h1:qIWCTaK0xQlXNlNlIVoZjKMZFopqfMZcg4JcRqGoYc0=
-github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ=
-github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4=
-github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE=
-github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ=
-github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E=
-github.com/samuel/go-zookeeper v0.0.0-20200724154423-2164a8ac840e/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E=
-github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
-github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
-github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=
-github.com/segmentio/kafka-go v0.2.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=
+github.com/safchain/ethtool v0.5.10 h1:Im294gZtuf4pSGJRAOGKaASNi3wMeFaGaWuSaomedpc=
+github.com/safchain/ethtool v0.5.10/go.mod h1:w9jh2Lx7YBR4UwzLkzCmWl85UY0W2uZdd7/DckVE5+c=
+github.com/sagikazarmark/locafero v0.9.0 h1:GbgQGNtTrEmddYDSAH9QLRyfAHY12md+8YFTqyMTC9k=
+github.com/sagikazarmark/locafero v0.9.0/go.mod h1:UBUyz37V+EdMS3hDF3QWIiVr/2dPrx49OMO0Bn0hJqk=
 github.com/segmentio/kafka-go v0.4.47 h1:IqziR4pA3vrZq7YdRxaT3w1/5fvIH5qpCwstUanQQB0=
 github.com/segmentio/kafka-go v0.4.47/go.mod h1:HjF6XbOKh0Pjlkr5GVZxt6CsjjwnmhVOfURM5KMd8qg=
-github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
-github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg=
-github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
-github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw=
-github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw=
-github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
-github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
-github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
-github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
-github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
 github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
 github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
-github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
-github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
-github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
-github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY=
 github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo=
 github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0=
-github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
-github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
-github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8=
-github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY=
-github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
-github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0=
-github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
-github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
+github.com/spf13/afero v1.14.0 h1:9tH6MapGnn/j0eb0yIXiLjERO8RB6xIVZRDCX7PtqWA=
+github.com/spf13/afero v1.14.0/go.mod h1:acJQ8t0ohCGuMN3O+Pv0V0hgMxNYDlvdk+VTfyZmbYo=
+github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y=
+github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
 github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo=
 github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0=
-github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
-github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
-github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
-github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
 github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o=
 github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
-github.com/spf13/viper v1.19.0 h1:RWq5SEjt8o25SROyN3z2OrDB9l7RPd3lwTWU8EcEdcI=
-github.com/spf13/viper v1.19.0/go.mod h1:GQUN9bilAbhU/jgc1bKs99f/suXKeUMct8Adx5+Ntkg=
-github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
-github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
-github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI=
+github.com/spf13/viper v1.20.1 h1:ZMi+z/lvLyPSCoNtFCpqjy0S4kPbirhpTMwl8BkW9X4=
+github.com/spf13/viper v1.20.1/go.mod h1:P9Mdzt1zoHIG8m2eZQinpiBjo6kCmZSKBClNNqjJvu4=
 github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
 github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
 github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
 github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
 github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
-github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
-github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
 github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
-github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
 github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
-github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
 github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
 github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
 github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
@@ -925,27 +369,17 @@ github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOf
 github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
 github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8=
 github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=
-github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
-github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
-github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
-github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
-github.com/uber/jaeger-client-go v2.25.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk=
-github.com/uber/jaeger-lib v2.4.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U=
-github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
-github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
-github.com/urfave/cli/v2 v2.27.2 h1:6e0H+AkS+zDckwPCUrZkKX38mRaau4nL2uipkJpbkcI=
-github.com/urfave/cli/v2 v2.27.2/go.mod h1:g0+79LmHHATl7DAcHO99smiR/T7uGLw84w8Y42x+4eM=
-github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw=
-github.com/vishvananda/netlink v1.3.0 h1:X7l42GfcV4S6E4vHTsw48qbrV+9PVojNfIhZcwQdrZk=
-github.com/vishvananda/netlink v1.3.0/go.mod h1:i6NetklAujEcC6fK0JPjT8qSwWyO0HLn4UKG+hGqeJs=
+github.com/urfave/cli/v2 v2.27.6 h1:VdRdS98FNhKZ8/Az8B7MTyGQmpIr36O1EHybx/LaZ4g=
+github.com/urfave/cli/v2 v2.27.6/go.mod h1:3Sevf16NykTbInEnD0yKkjDAeZDS0A6bzhBH5hrMvTQ=
+github.com/vishvananda/netlink v1.3.1-0.20250206174618-62fb240731fa h1:iAhToRwOrdk+pKzclvLM7nKZhsg8f7dVrgkFccDUbUw=
+github.com/vishvananda/netlink v1.3.1-0.20250206174618-62fb240731fa/go.mod h1:i6NetklAujEcC6fK0JPjT8qSwWyO0HLn4UKG+hGqeJs=
 github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM=
 github.com/vishvananda/netns v0.0.5 h1:DfiHV+j8bA32MFM7bfEunvT8IAqQ/NzSJHtcmW5zdEY=
 github.com/vishvananda/netns v0.0.5/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM=
-github.com/vladimirvivien/gexe v0.4.1 h1:W9gWkp8vSPjDoXDu04Yp4KljpVMaSt8IQuHswLDd5LY=
-github.com/vladimirvivien/gexe v0.4.1/go.mod h1:3gjgTqE2c0VyHnU5UOIwk7gyNzZDGulPb/DJPgcw64E=
-github.com/vmware/go-ipfix v0.13.0 h1:v3paBzd7oq7LEU1SzDwD5RGoYcGROLQycYyN3EzLvDk=
-github.com/vmware/go-ipfix v0.13.0/go.mod h1:UTIR38AuEePzrWYjQOvnORCYRG33xZJ56E0K75mSosM=
-github.com/willf/bitset v1.1.3/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
+github.com/vladimirvivien/gexe v0.5.0 h1:AWBVaYnrTsGYBktXvcO0DfWPeSiZxn6mnQ5nvL+A1/A=
+github.com/vladimirvivien/gexe v0.5.0/go.mod h1:3gjgTqE2c0VyHnU5UOIwk7gyNzZDGulPb/DJPgcw64E=
+github.com/vmware/go-ipfix v0.14.0 h1:Flz77R8mjD7QfAE56rGqKuI6Xc8O8BZ8kToyu8mA0rM=
+github.com/vmware/go-ipfix v0.14.0/go.mod h1:HagsZnswXBEglXBYL5NCAkRk35OETTpYwnMoBsT7Qp8=
 github.com/wlynxg/anet v0.0.3/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA=
 github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
 github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
@@ -955,34 +389,15 @@ github.com/xdg-go/scram v1.1.2 h1:FHX5I5B4i4hKRVRBCFRxq1iQRej7WO3hhBuJf+UUySY=
 github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4=
 github.com/xdg-go/stringprep v1.0.4 h1:XLI/Ng3O1Atzq0oBs3TWm+5ZVgkq2aqdlvP9JtoZ6c8=
 github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM=
-github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I=
-github.com/xdg/stringprep v0.0.0-20180714160509-73f8eece6fdc/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y=
-github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
-github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg=
-github.com/xlab/treeprint v1.0.0/go.mod h1:IoImgRak9i3zJyuxOKUP1v4UZd1tMoKkq/Cimt1uhCg=
-github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913 h1:+qGGcbkzsfDQNPPe9UDgpxAWQrhbbBXOYJFQDq/dtJw=
-github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913/go.mod h1:4aEEwZQutDLsQv2Deui4iYQ6DWTxR14g6m8Wv88+Xqk=
-github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 h1:gEOO8jv9F4OT7lGCjxCBTO/36wtF6j2nSip77qHd4x4=
+github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1/go.mod h1:Ohn+xnUBiLI6FVj/9LpzZWtj1/D6lUovWYBkxHVV3aM=
 github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
 github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
 github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
-go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
-go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg=
-go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
-go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
-go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
-go.mongodb.org/mongo-driver v1.3.0/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE=
-go.mongodb.org/mongo-driver v1.3.2/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE=
-go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
-go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
-go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
-go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
-go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
-go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
-go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
 go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
 go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 h1:sbiXRNDSWJOTobXh5HyQKjq6wUC5tNybqjIqDpAY4CU=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0/go.mod h1:69uWxva0WgAA/4bu2Yy70SLDBwZXuQ6PbBpbsa5iZrQ=
 go.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ=
 go.opentelemetry.io/otel v1.35.0/go.mod h1:UEqy8Zp11hpkUrL73gSlELM0DupHoiq72dR+Zqel/+Y=
 go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.35.0 h1:QcFwRrZLc82r8wODjvyCbP7Ifp3UANaBSmhDSFjnqSc=
@@ -991,10 +406,10 @@ go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.35.0 h1:0NI
 go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.35.0/go.mod h1:ChZSJbbfbl/DcRZNc9Gqh6DYGlfjw4PvO1pEOZH1ZsE=
 go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0 h1:1fTNlAIJZGWLP5FVu0fikVry1IsiUnXjf7QFvoNN3Xw=
 go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0/go.mod h1:zjPK58DtkqQFn+YUMbx0M2XV3QgKU0gS9LeGohREyK4=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0 h1:tgJ0uaNS4c98WRNUEx5U3aDlrDOI5Rs+1Vifcw4DJ8U=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0/go.mod h1:U7HYyW0zt/a9x5J1Kjs+r1f/d4ZHnYFclhYY2+YbeoE=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.34.0 h1:BEj3SPM81McUZHYjRS5pEgNgnmzGJ5tRpU5krWnV8Bs=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.34.0/go.mod h1:9cKLGBDzI/F3NoHLQGm4ZrYdIHsvGt6ej6hUowxY0J4=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.35.0 h1:m639+BofXTvcY1q8CGs4ItwQarYtJPOWmVobfM1HpVI=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.35.0/go.mod h1:LjReUci/F4BUyv+y4dwnq3h/26iNOeC3wAIqgvTIZVo=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.35.0 h1:xJ2qHD0C1BeYVTLLR9sX12+Qb95kfeD/byKj6Ky1pXg=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.35.0/go.mod h1:u5BF1xyjstDowA1R5QAO9JHzqK+ublenEW/dyqTjBVk=
 go.opentelemetry.io/otel/metric v1.35.0 h1:0znxYu2SNyuMSQT4Y9WDWej0VpcsxkuklLa4/siN90M=
 go.opentelemetry.io/otel/metric v1.35.0/go.mod h1:nKVFgxBZ2fReX6IlyW28MgZojkoAkJGaE8CpgeAU3oE=
 go.opentelemetry.io/otel/sdk v1.35.0 h1:iPctf8iprVySXSKJffSS79eOjl9pvxV9ZqOWT0QejKY=
@@ -1005,229 +420,72 @@ go.opentelemetry.io/otel/trace v1.35.0 h1:dPpEfJu1sDIqruz7BHFG3c7528f6ddfSWfFDVt
 go.opentelemetry.io/otel/trace v1.35.0/go.mod h1:WUk7DtFp1Aw2MkvqGdwiXYDZZNvA/1J8o6xRXLrIkyc=
 go.opentelemetry.io/proto/otlp v1.5.0 h1:xJvq7gMzB31/d406fB8U5CBdyQGw4P399D1aQWU/3i4=
 go.opentelemetry.io/proto/otlp v1.5.0/go.mod h1:keN8WnHxOy8PG0rQZjJJ5A2ebUoafqWp0eVQ4yIXvJ4=
-go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
-go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
-go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
-go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE=
-go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
-go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
+go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
+go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
 go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
 go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
-go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
-go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
 go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
 go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
-go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
-go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
-go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
-go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM=
 go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
 go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
-golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
-golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
 golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
-golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY=
 golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20191202143827-86a70503ff7e/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
 golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
 golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
 golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw=
 golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4=
 golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg=
-golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34=
-golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc=
-golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
-golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
-golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
-golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
-golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
-golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
-golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
-golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
-golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8=
-golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY=
-golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs=
-golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
-golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
-golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
-golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
-golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
-golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
-golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
-golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
-golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
-golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
-golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
-golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
-golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
-golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
+golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE=
+golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc=
+golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0 h1:R84qjqJb5nVJMxqWYb3np9L5ZsaDtB+a39EqjV0JSUM=
+golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0/go.mod h1:S9Xr4PYopiDyqSyp5NjCrhFrqg6A5zA2E/iPHPhqnS8=
 golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
 golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
 golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
 golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
-golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/mod v0.23.0 h1:Zb7khfcRGKk+kqfxFaP5tZqCnDZMjC5VtUBs87Hr6QM=
+golang.org/x/mod v0.23.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
 golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
 golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
 golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
 golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
-golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
 golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
 golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
 golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
-golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
-golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
-golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
 golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
 golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
 golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
-golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
 golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
 golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
 golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
 golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI=
 golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
 golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
-golang.org/x/net v0.37.0 h1:1zLorHbz+LYj7MQlSf1+2tPIIgibq2eL5xkrGk6f+2c=
-golang.org/x/net v0.37.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
-golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
-golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.26.0 h1:afQXWNNaeC4nvZ0Ed9XvCCzXM6UHJG7iCg0W4fPqSBE=
-golang.org/x/oauth2 v0.26.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
+golang.org/x/net v0.39.0 h1:ZCu7HMWDxpXpaiKdhzIfaltL9Lp31x/3fCP11bc6/fY=
+golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E=
+golang.org/x/oauth2 v0.29.0 h1:WdYw2tdTK1S8olAzWHdgeqfy+Mtm9XNhv/xJsY65d98=
+golang.org/x/oauth2 v0.29.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8=
 golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20200930132711-30421366ff76/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw=
-golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
-golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610=
+golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
 golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
 golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191128015809-6d18c012aee9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200107162124-548cf772de50/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20201008064518-c1f3e3309c71/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@@ -1241,6 +499,7 @@ golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
 golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20=
 golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
 golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
@@ -1250,12 +509,9 @@ golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
 golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU=
 golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U=
 golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY=
-golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y=
-golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g=
-golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/term v0.31.0 h1:erwDkOK1Msy6offm1mOgvspSkslFnIGsFnxOKoufg3o=
+golang.org/x/term v0.31.0/go.mod h1:R4BeIy7D95HzImkxGkTW1UQTtP54tio2RyHz7PwK0aw=
 golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
 golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
 golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
 golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
@@ -1265,178 +521,29 @@ golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
 golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
 golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
 golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
-golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY=
-golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4=
-golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ=
-golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
-golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0=
+golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU=
+golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0=
+golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
 golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
-golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
 golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200108203644-89082a384178/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
-golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
-golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
-golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200513201620-d5fe73897c97/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
 golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
-golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
-golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
-golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE=
-golang.org/x/tools v0.0.0-20201020161133-226fd2f889ca/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU=
 golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
 golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
 golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
 golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
-golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ=
-golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0=
+golang.org/x/tools v0.30.0 h1:BgcpHewrV5AUp2G9MebG4XPFI1E2W41zU1SaqVA9vJY=
+golang.org/x/tools v0.30.0/go.mod h1:c347cR/OJfw5TI+GfX7RUPNMdDRRbjvYTS0jPyvsVtY=
 golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
 golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
 golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
 golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo=
-gonum.org/v1/gonum v0.0.0-20181121035319-3f7ecaa7e8ca/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo=
-gonum.org/v1/gonum v0.6.0/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU=
-gonum.org/v1/netlib v0.0.0-20181029234149-ec6d1f5cefe6/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw=
-gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw=
-gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc=
-google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk=
-google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
-google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
-google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
-google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
-google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
-google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
-google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
-google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
-google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
-google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
-google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
-google.golang.org/api v0.32.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg=
-google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
-google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
-google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
-google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
-google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
-google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
-google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
-google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
-google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s=
-google.golang.org/genproto v0.0.0-20190716160619-c506a9f90610/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
-google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
-google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
-google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
-google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20200108215221-bd8f9a0ef82f/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
-google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
-google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
-google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
-google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto/googleapis/api v0.0.0-20250218202821-56aae31c358a h1:nwKuGPlUAt+aR+pcrkfFRrTU1BVrSmYyYMxYbUIVHr0=
-google.golang.org/genproto/googleapis/api v0.0.0-20250218202821-56aae31c358a/go.mod h1:3kWAYMk1I75K4vykHtKt2ycnOgpA6974V7bREqbsenU=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20250218202821-56aae31c358a h1:51aaUVRocpvUOSQKM6Q7VuoaktNIaMCLuhZB6DKksq4=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20250218202821-56aae31c358a/go.mod h1:uRxBH1mhmO8PGhU89cMcHaXKZqO+OfakD8QQO0oYwlQ=
-google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
-google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
-google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=
-google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
-google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
-google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
-google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
-google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
-google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
-google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
-google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
-google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
-google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
-google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
-google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
-google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
-google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
-google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
-google.golang.org/grpc v1.32.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/api v0.224.0 h1:Ir4UPtDsNiwIOHdExr3fAj4xZ42QjK7uQte3lORLJwU=
+google.golang.org/api v0.224.0/go.mod h1:3V39my2xAGkodXy0vEqcEtkqgw2GtrFL5WuBZlCTCOQ=
+google.golang.org/genproto/googleapis/api v0.0.0-20250421163800-61c742ae3ef0 h1:bphwUhSYYbcKacmc2crgiMvwARwqeNCtAI5g1PohT34=
+google.golang.org/genproto/googleapis/api v0.0.0-20250421163800-61c742ae3ef0/go.mod h1:Cd8IzgPo5Akum2c9R6FsXNaZbH3Jpa2gpHlW89FqlyQ=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20250414145226-207652e42e2e h1:ztQaXfzEXTmCBvbtWYRhJxW+0iJcz2qXfd38/e9l7bA=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20250414145226-207652e42e2e/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A=
 google.golang.org/grpc v1.71.1 h1:ffsFWr7ygTUscGPI0KKK6TLrGz0476KUvvsbqWK0rPI=
 google.golang.org/grpc v1.71.1/go.mod h1:H0GRtasmQOh9LkFoCPDu3ZrwUtD1YGE+b2vYBYd/8Ec=
 google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
@@ -1444,25 +551,16 @@ google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ
 google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
 google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
 google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
-google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
 google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
-google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
 google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
 google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
 google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
 google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
 gopkg.in/DATA-DOG/go-sqlmock.v1 v1.3.0 h1:FVCohIoYO7IJoDDVpV2pdq7SgrMH6wHnuTyrdrxJNoY=
 gopkg.in/DATA-DOG/go-sqlmock.v1 v1.3.0/go.mod h1:OdE7CF6DbADk7lN8LIKRzRJTTZXIjtWgA5THM5lhBAw=
-gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
 gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
 gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
 gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
-gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
-gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
 gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4=
 gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=
 gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
@@ -1472,83 +570,51 @@ gopkg.in/gcfg.v1 v1.2.3 h1:m8OOJ4ccYHnx2f4gQwpno8nAX5OGOh7RLaaz0pj3Ogs=
 gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o=
 gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
 gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
-gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
-gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
 gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc=
 gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc=
-gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
 gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
 gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
 gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME=
 gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
-gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
-gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
 gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
 gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
 gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
 gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
 gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
 gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
 gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
 gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
-honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
-honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
-honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
-k8s.io/api v0.19.2/go.mod h1:IQpK0zFQ1xc5iNIQPqzgoOwuFugaYHK4iCknlAQP9nI=
 k8s.io/api v0.32.3 h1:Hw7KqxRusq+6QSplE3NYG4MBxZw1BZnq4aP4cJVINls=
 k8s.io/api v0.32.3/go.mod h1:2wEDTXADtm/HA7CCMD8D8bK4yuBUptzaRhYcYEEYA3k=
-k8s.io/apiextensions-apiserver v0.32.0 h1:S0Xlqt51qzzqjKPxfgX1xh4HBZE+p8KKBq+k2SWNOE0=
-k8s.io/apiextensions-apiserver v0.32.0/go.mod h1:86hblMvN5yxMvZrZFX2OhIHAuFIMJIZ19bTvzkP+Fmw=
-k8s.io/apimachinery v0.19.2/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA=
+k8s.io/apiextensions-apiserver v0.32.1 h1:hjkALhRUeCariC8DiVmb5jj0VjIc1N0DREP32+6UXZw=
+k8s.io/apiextensions-apiserver v0.32.1/go.mod h1:sxWIGuGiYov7Io1fAS2X06NjMIk5CbRHc2StSmbaQto=
 k8s.io/apimachinery v0.32.3 h1:JmDuDarhDmA/Li7j3aPrwhpNBA94Nvk5zLeOge9HH1U=
 k8s.io/apimachinery v0.32.3/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE=
-k8s.io/client-go v0.19.2/go.mod h1:S5wPhCqyDNAlzM9CnEdgTGV4OqhsW3jGO1UM1epwfJA=
 k8s.io/client-go v0.32.3 h1:RKPVltzopkSgHS7aS98QdscAgtgah/+zmpAogooIqVU=
 k8s.io/client-go v0.32.3/go.mod h1:3v0+3k4IcT9bXTc4V2rt+d2ZPPG700Xy6Oi0Gdl2PaY=
-k8s.io/component-base v0.32.1 h1:/5IfJ0dHIKBWysGV0yKTFfacZ5yNV1sulPh3ilJjRZk=
-k8s.io/component-base v0.32.1/go.mod h1:j1iMMHi/sqAHeG5z+O9BFNCF698a1u0186zkjMZQ28w=
-k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
-k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
-k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
-k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
-k8s.io/klog/v2 v2.3.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
+k8s.io/component-base v0.32.3 h1:98WJvvMs3QZ2LYHBzvltFSeJjEx7t5+8s71P7M74u8k=
+k8s.io/component-base v0.32.3/go.mod h1:LWi9cR+yPAv7cu2X9rZanTiFKB2kHA+JjmhkKjCZRpI=
 k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
 k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
-k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o=
-k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f h1:GA7//TjRY9yWGy1poLzYYJJ4JRdzg3+O6e8I+e+8T5Y=
-k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f/go.mod h1:R/HEjbvWI0qdfb8viZUeVZm0X6IZnxAydC7YU42CMw4=
-k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
+k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4=
+k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8=
 k8s.io/utils v0.0.0-20250321185631-1f6e0b77f77e h1:KqK5c/ghOm8xkHYhlodbp6i6+r+ChV2vuAuVRdFbLro=
 k8s.io/utils v0.0.0-20250321185631-1f6e0b77f77e/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
-lukechampine.com/uint128 v1.2.0 h1:mBi/5l91vocEN8otkC5bDLhi2KdCticRiwbdB0O+rjI=
 lukechampine.com/uint128 v1.2.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk=
-rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
-rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
-rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
-rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
-sigs.k8s.io/controller-runtime v0.20.0 h1:jjkMo29xEXH+02Md9qaVXfEIaMESSpy3TBWPrsfQkQs=
-sigs.k8s.io/controller-runtime v0.20.0/go.mod h1:BrP3w158MwvB3ZbNpaAcIKkHQ7YGpYnzpoSTZ8E14WU=
+lukechampine.com/uint128 v1.3.0 h1:cDdUVfRwDUDovz610ABgFD17nXD4/uDgVHl2sC3+sbo=
+lukechampine.com/uint128 v1.3.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk=
+sigs.k8s.io/controller-runtime v0.20.4 h1:X3c+Odnxz+iPTRobG4tp092+CvBU9UK0t/bRf+n0DGU=
+sigs.k8s.io/controller-runtime v0.20.4/go.mod h1:xg2XB0K5ShQzAgsoujxuKN4LNXR2LfwwHsPj7Iaw+XY=
 sigs.k8s.io/e2e-framework v0.6.0 h1:p7hFzHnLKO7eNsWGI2AbC1Mo2IYxidg49BiT4njxkrM=
 sigs.k8s.io/e2e-framework v0.6.0/go.mod h1:IREnCHnKgRCioLRmNi0hxSJ1kJ+aAdjEKK/gokcZu4k=
-sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8=
-sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo=
+sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE=
+sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg=
 sigs.k8s.io/network-policy-api v0.1.5 h1:xyS7VAaM9EfyB428oFk7WjWaCK6B129i+ILUF4C8l6E=
 sigs.k8s.io/network-policy-api v0.1.5/go.mod h1:D7Nkr43VLNd7iYryemnj8qf0N/WjBzTZDxYA+g4u1/Y=
-sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
-sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA=
-sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4=
-sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
-sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
+sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
+sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU=
+sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
+sigs.k8s.io/structured-merge-diff/v4 v4.7.0 h1:qPeWmscJcXP0snki5IYF79Z8xrl8ETFxgMd7wez1XkI=
+sigs.k8s.io/structured-merge-diff/v4 v4.7.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps=
 sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
 sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
-sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU=
diff --git a/vendor/github.com/cenkalti/hub/.travis.yml b/vendor/github.com/cenkalti/hub/.travis.yml
index b05e4c53fa829889472c8bc28e83f1fb6df8406b..d8cecb0dfe951490fbb9db68b90bd774269e08bf 100644
--- a/vendor/github.com/cenkalti/hub/.travis.yml
+++ b/vendor/github.com/cenkalti/hub/.travis.yml
@@ -1,3 +1,5 @@
 language: go
-go: 1.2
-
+go: 1.13
+arch:
+   - amd64
+   - ppc64le
diff --git a/vendor/github.com/dennwc/varint/.gitignore b/vendor/github.com/dennwc/varint/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..9385b6db18ba2a7f947abf24e4add8786fe4335b
--- /dev/null
+++ b/vendor/github.com/dennwc/varint/.gitignore
@@ -0,0 +1,2 @@
+*.o
+*.txt
\ No newline at end of file
diff --git a/vendor/github.com/dennwc/varint/.travis.yml b/vendor/github.com/dennwc/varint/.travis.yml
new file mode 100644
index 0000000000000000000000000000000000000000..b3da258f520ef8f76767cf1989080989dc3dfedc
--- /dev/null
+++ b/vendor/github.com/dennwc/varint/.travis.yml
@@ -0,0 +1,7 @@
+language: go
+
+go:
+  - 1.12.x
+
+env:
+  - GO111MODULE=on
\ No newline at end of file
diff --git a/vendor/github.com/dennwc/varint/LICENSE b/vendor/github.com/dennwc/varint/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..8b3f68715c1b0729a4d0697596ce77691251891d
--- /dev/null
+++ b/vendor/github.com/dennwc/varint/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2019 Denys Smirnov
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/dennwc/varint/README.md b/vendor/github.com/dennwc/varint/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..fe15b3b5003869071197f94bb9a7b488837f34e4
--- /dev/null
+++ b/vendor/github.com/dennwc/varint/README.md
@@ -0,0 +1,47 @@
+# varint
+
+This package provides an optimized implementation of protobuf's varint encoding/decoding.
+It has no dependencies.
+
+Benchmarks comparing to a `binary.Uvarint`:
+
+```
+benchmark                      old ns/op     new ns/op     delta
+BenchmarkUvarint/1-8           4.13          2.85          -30.99%
+BenchmarkUvarint/1_large-8     4.01          2.28          -43.14%
+BenchmarkUvarint/2-8           6.23          2.87          -53.93%
+BenchmarkUvarint/2_large-8     5.60          2.86          -48.93%
+BenchmarkUvarint/3-8           6.55          3.44          -47.48%
+BenchmarkUvarint/3_large-8     6.54          2.86          -56.27%
+BenchmarkUvarint/4-8           7.30          3.71          -49.18%
+BenchmarkUvarint/4_large-8     7.46          3.10          -58.45%
+BenchmarkUvarint/5-8           8.31          4.12          -50.42%
+BenchmarkUvarint/5_large-8     8.56          3.48          -59.35%
+BenchmarkUvarint/6-8           9.42          4.66          -50.53%
+BenchmarkUvarint/6_large-8     9.91          4.07          -58.93%
+BenchmarkUvarint/7-8           10.6          5.28          -50.19%
+BenchmarkUvarint/7_large-8     11.0          4.70          -57.27%
+BenchmarkUvarint/8-8           11.7          6.02          -48.55%
+BenchmarkUvarint/8_large-8     12.1          5.19          -57.11%
+BenchmarkUvarint/9-8           12.9          6.83          -47.05%
+BenchmarkUvarint/9_large-8     13.1          5.71          -56.41%
+```
+
+It also provides additional functionality like `UvarintSize` (similar to `sov*` in `gogo/protobuf`):
+
+```
+benchmark                    old ns/op     new ns/op     delta
+BenchmarkUvarintSize/1-8     1.71          0.43          -74.85%
+BenchmarkUvarintSize/2-8     2.56          0.57          -77.73%
+BenchmarkUvarintSize/3-8     3.22          0.72          -77.64%
+BenchmarkUvarintSize/4-8     3.74          0.72          -80.75%
+BenchmarkUvarintSize/5-8     4.29          0.57          -86.71%
+BenchmarkUvarintSize/6-8     4.85          0.58          -88.04%
+BenchmarkUvarintSize/7-8     5.43          0.71          -86.92%
+BenchmarkUvarintSize/8-8     6.01          0.86          -85.69%
+BenchmarkUvarintSize/9-8     6.64          1.00          -84.94%
+```
+
+# License
+
+MIT
\ No newline at end of file
diff --git a/vendor/github.com/dennwc/varint/proto.go b/vendor/github.com/dennwc/varint/proto.go
new file mode 100644
index 0000000000000000000000000000000000000000..e3b458547f9f47420b7283ec1ee0e8eeba0a93c2
--- /dev/null
+++ b/vendor/github.com/dennwc/varint/proto.go
@@ -0,0 +1,244 @@
+package varint
+
+// ProtoTag decodes a protobuf's field number and wire type pair
+// from buf and returns that value and the number of bytes read (> 0).
+// If an error occurred, n = 0 is returned.
+func ProtoTag(buf []byte) (num int, typ byte, n int) {
+	// Same unrolled implementation as in Uvarint.
+	//
+	// But this time we can check if the wire type and field num
+	// are valid when reading the first byte.
+	//
+	// Also, the swifts are now different, because first 3 bits
+	// are for the wire type.
+	//
+	// The implementation will stop at 9 bytes, returning an error.
+	sz := len(buf)
+	if sz == 0 {
+		return 0, 0, 0
+	}
+	const (
+		bit  = 1 << 7
+		mask = bit - 1
+		step = 7
+
+		// protobuf
+		typBits = 3
+		typMask = 1<<3 - 1
+	)
+	if sz >= 9 { // no bound checks
+		// i == 0
+		b := buf[0]
+		if b == 0 {
+			return 0, 0, 0
+		}
+		typ = b & typMask
+		if typ > 5 {
+			return 0, 0, 0
+		}
+		if b < bit {
+			num = int(b >> typBits)
+			if num == 0 {
+				return 0, 0, 0
+			}
+			n = 1
+			return
+		}
+		num = int((b & mask) >> typBits)
+		var s uint = step - typBits
+
+		// i == 1
+		b = buf[1]
+		if b < bit {
+			num |= int(b) << s
+			n = 2
+			return
+		}
+		num |= int(b&mask) << s
+		s += step
+
+		// i == 2
+		b = buf[2]
+		if b < bit {
+			num |= int(b) << s
+			n = 3
+			return
+		}
+		num |= int(b&mask) << s
+		s += step
+
+		// i == 3
+		b = buf[3]
+		if b < bit {
+			num |= int(b) << s
+			n = 4
+			return
+		}
+		num |= int(b&mask) << s
+		s += step
+
+		// i == 4
+		b = buf[4]
+		if b < bit {
+			num |= int(b) << s
+			n = 5
+			return
+		}
+		num |= int(b&mask) << s
+		s += step
+
+		// i == 5
+		b = buf[5]
+		if b < bit {
+			num |= int(b) << s
+			n = 6
+			return
+		}
+		num |= int(b&mask) << s
+		s += step
+
+		// i == 6
+		b = buf[6]
+		if b < bit {
+			num |= int(b) << s
+			n = 7
+			return
+		}
+		num |= int(b&mask) << s
+		s += step
+
+		// i == 7
+		b = buf[7]
+		if b < bit {
+			num |= int(b) << s
+			n = 8
+			return
+		}
+		num |= int(b&mask) << s
+		s += step
+
+		// i == 8
+		b = buf[8]
+		if b < bit {
+			num |= int(b) << s
+			n = 9
+			return
+		}
+		return 0, 0, 0 // too much
+	}
+
+	// i == 0
+	b := buf[0]
+	if b == 0 {
+		return 0, 0, 0
+	}
+	typ = b & typMask
+	if typ > 5 {
+		return 0, 0, 0
+	}
+	if b < bit {
+		num = int(b >> typBits)
+		if num == 0 {
+			return 0, 0, 0
+		}
+		n = 1
+		return
+	} else if sz == 1 {
+		return 0, 0, 0
+	}
+	num = int((b & mask) >> typBits)
+	var s uint = step - typBits
+
+	// i == 1
+	b = buf[1]
+	if b < bit {
+		num |= int(b) << s
+		n = 2
+		return
+	} else if sz == 2 {
+		return 0, 0, 0
+	}
+	num |= int(b&mask) << s
+	s += step
+
+	// i == 2
+	b = buf[2]
+	if b < bit {
+		num |= int(b) << s
+		n = 3
+		return
+	} else if sz == 3 {
+		return 0, 0, 0
+	}
+	num |= int(b&mask) << s
+	s += step
+
+	// i == 3
+	b = buf[3]
+	if b < bit {
+		num |= int(b) << s
+		n = 4
+		return
+	} else if sz == 4 {
+		return 0, 0, 0
+	}
+	num |= int(b&mask) << s
+	s += step
+
+	// i == 4
+	b = buf[4]
+	if b < bit {
+		num |= int(b) << s
+		n = 5
+		return
+	} else if sz == 5 {
+		return 0, 0, 0
+	}
+	num |= int(b&mask) << s
+	s += step
+
+	// i == 5
+	b = buf[5]
+	if b < bit {
+		num |= int(b) << s
+		n = 6
+		return
+	} else if sz == 6 {
+		return 0, 0, 0
+	}
+	num |= int(b&mask) << s
+	s += step
+
+	// i == 6
+	b = buf[6]
+	if b < bit {
+		num |= int(b) << s
+		n = 7
+		return
+	} else if sz == 7 {
+		return 0, 0, 0
+	}
+	num |= int(b&mask) << s
+	s += step
+
+	// i == 7
+	b = buf[7]
+	if b < bit {
+		num |= int(b) << s
+		n = 8
+		return
+	} else if sz == 8 {
+		return 0, 0, 0
+	}
+	num |= int(b&mask) << s
+	s += step
+
+	// i == 8
+	b = buf[8]
+	if b < bit {
+		num |= int(b) << s
+		n = 9
+		return
+	}
+	return 0, 0, 0 // too much
+}
diff --git a/vendor/github.com/dennwc/varint/varint.go b/vendor/github.com/dennwc/varint/varint.go
new file mode 100644
index 0000000000000000000000000000000000000000..83278c2d7d2c6c98392d50772ab34bdb67ab7ce8
--- /dev/null
+++ b/vendor/github.com/dennwc/varint/varint.go
@@ -0,0 +1,270 @@
+package varint
+
+const maxUint64 = uint64(1<<64 - 1)
+
+// MaxLenN is the maximum length of a varint-encoded N-bit integer.
+const (
+	MaxLen8  = 2
+	MaxLen16 = 3
+	MaxLen32 = 5
+	MaxLen64 = 10
+)
+
+// MaxValN is the maximum varint-encoded integer that fits in N bytes.
+const (
+	MaxVal9 = maxUint64 >> (1 + iota*7)
+	MaxVal8
+	MaxVal7
+	MaxVal6
+	MaxVal5
+	MaxVal4
+	MaxVal3
+	MaxVal2
+	MaxVal1
+)
+
+// UvarintSize returns the number of bytes necessary to encode a given uint.
+func UvarintSize(x uint64) int {
+	if x <= MaxVal4 {
+		if x <= MaxVal1 {
+			return 1
+		} else if x <= MaxVal2 {
+			return 2
+		} else if x <= MaxVal3 {
+			return 3
+		}
+		return 4
+	}
+	if x <= MaxVal5 {
+		return 5
+	} else if x <= MaxVal6 {
+		return 6
+	} else if x <= MaxVal7 {
+		return 7
+	} else if x <= MaxVal8 {
+		return 8
+	} else if x <= MaxVal9 {
+		return 9
+	}
+	return 10
+}
+
+// Uvarint decodes a uint64 from buf and returns that value and the
+// number of bytes read (> 0). If an error occurred, the value is 0
+// and the number of bytes n is <= 0 meaning:
+//
+// 	n == 0: buf too small
+// 	n  < 0: value larger than 64 bits (overflow)
+// 	        and -n is the number of bytes read
+//
+func Uvarint(buf []byte) (uint64, int) {
+	// Fully unrolled implementation of binary.Uvarint.
+	//
+	// It will also eliminate bound checks for buffers larger than 9 bytes.
+	sz := len(buf)
+	if sz == 0 {
+		return 0, 0
+	}
+	const (
+		step = 7
+		bit  = 1 << 7
+		mask = bit - 1
+	)
+	if sz >= 10 { // no bound checks
+		// i == 0
+		b := buf[0]
+		if b < bit {
+			return uint64(b), 1
+		}
+		x := uint64(b & mask)
+		var s uint = step
+
+		// i == 1
+		b = buf[1]
+		if b < bit {
+			return x | uint64(b)<<s, 2
+		}
+		x |= uint64(b&mask) << s
+		s += step
+
+		// i == 2
+		b = buf[2]
+		if b < bit {
+			return x | uint64(b)<<s, 3
+		}
+		x |= uint64(b&mask) << s
+		s += step
+
+		// i == 3
+		b = buf[3]
+		if b < bit {
+			return x | uint64(b)<<s, 4
+		}
+		x |= uint64(b&mask) << s
+		s += step
+
+		// i == 4
+		b = buf[4]
+		if b < bit {
+			return x | uint64(b)<<s, 5
+		}
+		x |= uint64(b&mask) << s
+		s += step
+
+		// i == 5
+		b = buf[5]
+		if b < bit {
+			return x | uint64(b)<<s, 6
+		}
+		x |= uint64(b&mask) << s
+		s += step
+
+		// i == 6
+		b = buf[6]
+		if b < bit {
+			return x | uint64(b)<<s, 7
+		}
+		x |= uint64(b&mask) << s
+		s += step
+
+		// i == 7
+		b = buf[7]
+		if b < bit {
+			return x | uint64(b)<<s, 8
+		}
+		x |= uint64(b&mask) << s
+		s += step
+
+		// i == 8
+		b = buf[8]
+		if b < bit {
+			return x | uint64(b)<<s, 9
+		}
+		x |= uint64(b&mask) << s
+		s += step
+
+		// i == 9
+		b = buf[9]
+		if b < bit {
+			if b > 1 {
+				return 0, -10 // overflow
+			}
+			return x | uint64(b)<<s, 10
+		} else if sz == 10 {
+			return 0, 0
+		}
+		for j, b := range buf[10:] {
+			if b < bit {
+				return 0, -(11 + j)
+			}
+		}
+		return 0, 0
+	}
+
+	// i == 0
+	b := buf[0]
+	if b < bit {
+		return uint64(b), 1
+	} else if sz == 1 {
+		return 0, 0
+	}
+	x := uint64(b & mask)
+	var s uint = step
+
+	// i == 1
+	b = buf[1]
+	if b < bit {
+		return x | uint64(b)<<s, 2
+	} else if sz == 2 {
+		return 0, 0
+	}
+	x |= uint64(b&mask) << s
+	s += step
+
+	// i == 2
+	b = buf[2]
+	if b < bit {
+		return x | uint64(b)<<s, 3
+	} else if sz == 3 {
+		return 0, 0
+	}
+	x |= uint64(b&mask) << s
+	s += step
+
+	// i == 3
+	b = buf[3]
+	if b < bit {
+		return x | uint64(b)<<s, 4
+	} else if sz == 4 {
+		return 0, 0
+	}
+	x |= uint64(b&mask) << s
+	s += step
+
+	// i == 4
+	b = buf[4]
+	if b < bit {
+		return x | uint64(b)<<s, 5
+	} else if sz == 5 {
+		return 0, 0
+	}
+	x |= uint64(b&mask) << s
+	s += step
+
+	// i == 5
+	b = buf[5]
+	if b < bit {
+		return x | uint64(b)<<s, 6
+	} else if sz == 6 {
+		return 0, 0
+	}
+	x |= uint64(b&mask) << s
+	s += step
+
+	// i == 6
+	b = buf[6]
+	if b < bit {
+		return x | uint64(b)<<s, 7
+	} else if sz == 7 {
+		return 0, 0
+	}
+	x |= uint64(b&mask) << s
+	s += step
+
+	// i == 7
+	b = buf[7]
+	if b < bit {
+		return x | uint64(b)<<s, 8
+	} else if sz == 8 {
+		return 0, 0
+	}
+	x |= uint64(b&mask) << s
+	s += step
+
+	// i == 8
+	b = buf[8]
+	if b < bit {
+		return x | uint64(b)<<s, 9
+	} else if sz == 9 {
+		return 0, 0
+	}
+	x |= uint64(b&mask) << s
+	s += step
+
+	// i == 9
+	b = buf[9]
+	if b < bit {
+		if b > 1 {
+			return 0, -10 // overflow
+		}
+		return x | uint64(b)<<s, 10
+	} else if sz == 10 {
+		return 0, 0
+	}
+	for j, b := range buf[10:] {
+		if b < bit {
+			return 0, -(11 + j)
+		}
+	}
+	return 0, 0
+}
diff --git a/vendor/github.com/emicklei/go-restful/v3/CHANGES.md b/vendor/github.com/emicklei/go-restful/v3/CHANGES.md
index 92b78048e23d46a0f3a61625faf933116339ab31..6f24dfff562ecb95ad3811265889661d8a1a65d9 100644
--- a/vendor/github.com/emicklei/go-restful/v3/CHANGES.md
+++ b/vendor/github.com/emicklei/go-restful/v3/CHANGES.md
@@ -1,5 +1,8 @@
 # Change history of go-restful
 
+## [v3.12.2] - 2025-02-21
+
+- allow empty payloads in post,put,patch, issue #580 ( thanks @liggitt, Jordan Liggitt)
 
 ## [v3.12.1] - 2024-05-28
 
@@ -18,7 +21,7 @@
 
 - fix by restoring custom JSON handler functions (Mike Beaumont #540)
 
-## [v3.12.0] - 2023-08-19
+## [v3.11.0] - 2023-08-19
 
 - restored behavior as <= v3.9.0 with option to change path strategy using TrimRightSlashEnabled. 
 
diff --git a/vendor/github.com/emicklei/go-restful/v3/README.md b/vendor/github.com/emicklei/go-restful/v3/README.md
index 7234604e47b8d85f314121c6ec6cca970c32b4f8..3fb40d198087a45263d5bfb9b360479d47dd348f 100644
--- a/vendor/github.com/emicklei/go-restful/v3/README.md
+++ b/vendor/github.com/emicklei/go-restful/v3/README.md
@@ -3,7 +3,7 @@ go-restful
 package for building REST-style Web Services using Google Go
 
 [![Go Report Card](https://goreportcard.com/badge/github.com/emicklei/go-restful)](https://goreportcard.com/report/github.com/emicklei/go-restful)
-[![GoDoc](https://godoc.org/github.com/emicklei/go-restful?status.svg)](https://pkg.go.dev/github.com/emicklei/go-restful)
+[![Go Reference](https://pkg.go.dev/badge/github.com/emicklei/go-restful.svg)](https://pkg.go.dev/github.com/emicklei/go-restful/v3)
 [![codecov](https://codecov.io/gh/emicklei/go-restful/branch/master/graph/badge.svg)](https://codecov.io/gh/emicklei/go-restful)
 
 - [Code examples use v3](https://github.com/emicklei/go-restful/tree/v3/examples)
diff --git a/vendor/github.com/emicklei/go-restful/v3/jsr311.go b/vendor/github.com/emicklei/go-restful/v3/jsr311.go
index a9b3faaa81fabb324a1463b47c306c174e6ac498..7f04bd90533608703a6dc24fb3b1223b2c138531 100644
--- a/vendor/github.com/emicklei/go-restful/v3/jsr311.go
+++ b/vendor/github.com/emicklei/go-restful/v3/jsr311.go
@@ -65,7 +65,7 @@ func (RouterJSR311) extractParams(pathExpr *pathExpression, matches []string) ma
 	return params
 }
 
-// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2
+// https://download.oracle.com/otndocs/jcp/jaxrs-1.1-mrel-eval-oth-JSpec/
 func (r RouterJSR311) detectRoute(routes []Route, httpRequest *http.Request) (*Route, error) {
 	candidates := make([]*Route, 0, 8)
 	for i, each := range routes {
@@ -126,9 +126,7 @@ func (r RouterJSR311) detectRoute(routes []Route, httpRequest *http.Request) (*R
 		if trace {
 			traceLogger.Printf("no Route found (from %d) that matches HTTP Content-Type: %s\n", len(previous), contentType)
 		}
-		if httpRequest.ContentLength > 0 {
-			return nil, NewError(http.StatusUnsupportedMediaType, "415: Unsupported Media Type")
-		}
+		return nil, NewError(http.StatusUnsupportedMediaType, "415: Unsupported Media Type")
 	}
 
 	// accept
@@ -151,20 +149,9 @@ func (r RouterJSR311) detectRoute(routes []Route, httpRequest *http.Request) (*R
 		for _, candidate := range previous {
 			available = append(available, candidate.Produces...)
 		}
-		// if POST,PUT,PATCH without body
-		method, length := httpRequest.Method, httpRequest.Header.Get("Content-Length")
-		if (method == http.MethodPost ||
-			method == http.MethodPut ||
-			method == http.MethodPatch) && (length == "" || length == "0") {
-			return nil, NewError(
-				http.StatusUnsupportedMediaType,
-				fmt.Sprintf("415: Unsupported Media Type\n\nAvailable representations: %s", strings.Join(available, ", ")),
-			)
-		}
 		return nil, NewError(
 			http.StatusNotAcceptable,
-			fmt.Sprintf("406: Not Acceptable\n\nAvailable representations: %s", strings.Join(available, ", ")),
-		)
+			fmt.Sprintf("406: Not Acceptable\n\nAvailable representations: %s", strings.Join(available, ", ")))
 	}
 	// return r.bestMatchByMedia(outputMediaOk, contentType, accept), nil
 	return candidates[0], nil
diff --git a/vendor/github.com/emicklei/go-restful/v3/route.go b/vendor/github.com/emicklei/go-restful/v3/route.go
index 306c44be7794df895efed0686ec2a3c373f34b33..a2056e2acbbc34444b47338ad571d02002557cd5 100644
--- a/vendor/github.com/emicklei/go-restful/v3/route.go
+++ b/vendor/github.com/emicklei/go-restful/v3/route.go
@@ -111,6 +111,8 @@ func (r Route) matchesAccept(mimeTypesWithQuality string) bool {
 }
 
 // Return whether this Route can consume content with a type specified by mimeTypes (can be empty).
+// If the route does not specify Consumes then return true (*/*).
+// If no content type is set then return true for GET,HEAD,OPTIONS,DELETE and TRACE.
 func (r Route) matchesContentType(mimeTypes string) bool {
 
 	if len(r.Consumes) == 0 {
diff --git a/vendor/github.com/evanphx/json-patch/v5/merge.go b/vendor/github.com/evanphx/json-patch/v5/merge.go
index f79caf3135a645176ed6f9edef94ac73152321fb..d60afadcf1e846d6dc51da06aa737a2f1b9925d8 100644
--- a/vendor/github.com/evanphx/json-patch/v5/merge.go
+++ b/vendor/github.com/evanphx/json-patch/v5/merge.go
@@ -103,8 +103,8 @@ func pruneAryNulls(ary *partialArray, options *ApplyOptions) *partialArray {
 	return ary
 }
 
-var errBadJSONDoc = fmt.Errorf("Invalid JSON Document")
-var errBadJSONPatch = fmt.Errorf("Invalid JSON Patch")
+var ErrBadJSONDoc = fmt.Errorf("Invalid JSON Document")
+var ErrBadJSONPatch = fmt.Errorf("Invalid JSON Patch")
 var errBadMergeTypes = fmt.Errorf("Mismatched JSON Documents")
 
 // MergeMergePatches merges two merge patches together, such that
@@ -121,11 +121,11 @@ func MergePatch(docData, patchData []byte) ([]byte, error) {
 
 func doMergePatch(docData, patchData []byte, mergeMerge bool) ([]byte, error) {
 	if !json.Valid(docData) {
-		return nil, errBadJSONDoc
+		return nil, ErrBadJSONDoc
 	}
 
 	if !json.Valid(patchData) {
-		return nil, errBadJSONPatch
+		return nil, ErrBadJSONPatch
 	}
 
 	options := NewApplyOptions()
@@ -143,7 +143,7 @@ func doMergePatch(docData, patchData []byte, mergeMerge bool) ([]byte, error) {
 	patchErr := patch.UnmarshalJSON(patchData)
 
 	if isSyntaxError(docErr) {
-		return nil, errBadJSONDoc
+		return nil, ErrBadJSONDoc
 	}
 
 	if isSyntaxError(patchErr) {
@@ -151,7 +151,7 @@ func doMergePatch(docData, patchData []byte, mergeMerge bool) ([]byte, error) {
 	}
 
 	if docErr == nil && doc.obj == nil {
-		return nil, errBadJSONDoc
+		return nil, ErrBadJSONDoc
 	}
 
 	if patchErr == nil && patch.obj == nil {
@@ -175,7 +175,7 @@ func doMergePatch(docData, patchData []byte, mergeMerge bool) ([]byte, error) {
 				if json.Valid(patchData) {
 					return patchData, nil
 				}
-				return nil, errBadJSONPatch
+				return nil, ErrBadJSONPatch
 			}
 
 			pruneAryNulls(patchAry, options)
@@ -183,7 +183,7 @@ func doMergePatch(docData, patchData []byte, mergeMerge bool) ([]byte, error) {
 			out, patchErr := json.Marshal(patchAry.nodes)
 
 			if patchErr != nil {
-				return nil, errBadJSONPatch
+				return nil, ErrBadJSONPatch
 			}
 
 			return out, nil
@@ -256,12 +256,12 @@ func createObjectMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) {
 
 	err := unmarshal(originalJSON, &originalDoc)
 	if err != nil {
-		return nil, errBadJSONDoc
+		return nil, ErrBadJSONDoc
 	}
 
 	err = unmarshal(modifiedJSON, &modifiedDoc)
 	if err != nil {
-		return nil, errBadJSONDoc
+		return nil, ErrBadJSONDoc
 	}
 
 	dest, err := getDiff(originalDoc, modifiedDoc)
@@ -286,17 +286,17 @@ func createArrayMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) {
 
 	err := unmarshal(originalJSON, &originalDocs)
 	if err != nil {
-		return nil, errBadJSONDoc
+		return nil, ErrBadJSONDoc
 	}
 
 	err = unmarshal(modifiedJSON, &modifiedDocs)
 	if err != nil {
-		return nil, errBadJSONDoc
+		return nil, ErrBadJSONDoc
 	}
 
 	total := len(originalDocs)
 	if len(modifiedDocs) != total {
-		return nil, errBadJSONDoc
+		return nil, ErrBadJSONDoc
 	}
 
 	result := []json.RawMessage{}
diff --git a/vendor/github.com/evanphx/json-patch/v5/patch.go b/vendor/github.com/evanphx/json-patch/v5/patch.go
index 7a7f71c8b66b003147f8d2fa9fd016608d0f1910..83102e5570ad921d60efd36b789096af9a2aaa3f 100644
--- a/vendor/github.com/evanphx/json-patch/v5/patch.go
+++ b/vendor/github.com/evanphx/json-patch/v5/patch.go
@@ -2,13 +2,13 @@ package jsonpatch
 
 import (
 	"bytes"
+	"errors"
 	"fmt"
 	"strconv"
 	"strings"
 	"unicode"
 
 	"github.com/evanphx/json-patch/v5/internal/json"
-	"github.com/pkg/errors"
 )
 
 const (
@@ -461,7 +461,7 @@ func (o Operation) Path() (string, error) {
 		return op, nil
 	}
 
-	return "unknown", errors.Wrapf(ErrMissing, "operation missing path field")
+	return "unknown", fmt.Errorf("operation missing path field: %w", ErrMissing)
 }
 
 // From reads the "from" field of the Operation.
@@ -478,7 +478,7 @@ func (o Operation) From() (string, error) {
 		return op, nil
 	}
 
-	return "unknown", errors.Wrapf(ErrMissing, "operation, missing from field")
+	return "unknown", fmt.Errorf("operation, missing from field: %w", ErrMissing)
 }
 
 func (o Operation) value() *lazyNode {
@@ -511,7 +511,7 @@ func (o Operation) ValueInterface() (interface{}, error) {
 		return v, nil
 	}
 
-	return nil, errors.Wrapf(ErrMissing, "operation, missing value field")
+	return nil, fmt.Errorf("operation, missing value field: %w", ErrMissing)
 }
 
 func isArray(buf []byte) bool {
@@ -610,7 +610,7 @@ func (d *partialDoc) get(key string, options *ApplyOptions) (*lazyNode, error) {
 
 	v, ok := d.obj[key]
 	if !ok {
-		return v, errors.Wrapf(ErrMissing, "unable to get nonexistent key: %s", key)
+		return v, fmt.Errorf("unable to get nonexistent key: %s: %w", key, ErrMissing)
 	}
 	return v, nil
 }
@@ -625,7 +625,7 @@ func (d *partialDoc) remove(key string, options *ApplyOptions) error {
 		if options.AllowMissingPathOnRemove {
 			return nil
 		}
-		return errors.Wrapf(ErrMissing, "unable to remove nonexistent key: %s", key)
+		return fmt.Errorf("unable to remove nonexistent key: %s: %w", key, ErrMissing)
 	}
 	idx := -1
 	for i, k := range d.keys {
@@ -649,10 +649,10 @@ func (d *partialArray) set(key string, val *lazyNode, options *ApplyOptions) err
 
 	if idx < 0 {
 		if !options.SupportNegativeIndices {
-			return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
+			return fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex)
 		}
 		if idx < -len(d.nodes) {
-			return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
+			return fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex)
 		}
 		idx += len(d.nodes)
 	}
@@ -669,7 +669,7 @@ func (d *partialArray) add(key string, val *lazyNode, options *ApplyOptions) err
 
 	idx, err := strconv.Atoi(key)
 	if err != nil {
-		return errors.Wrapf(err, "value was not a proper array index: '%s'", key)
+		return fmt.Errorf("value was not a proper array index: '%s': %w", key, err)
 	}
 
 	sz := len(d.nodes) + 1
@@ -679,15 +679,15 @@ func (d *partialArray) add(key string, val *lazyNode, options *ApplyOptions) err
 	cur := d
 
 	if idx >= len(ary) {
-		return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
+		return fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex)
 	}
 
 	if idx < 0 {
 		if !options.SupportNegativeIndices {
-			return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
+			return fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex)
 		}
 		if idx < -len(ary) {
-			return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
+			return fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex)
 		}
 		idx += len(ary)
 	}
@@ -713,16 +713,16 @@ func (d *partialArray) get(key string, options *ApplyOptions) (*lazyNode, error)
 
 	if idx < 0 {
 		if !options.SupportNegativeIndices {
-			return nil, errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
+			return nil, fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex)
 		}
 		if idx < -len(d.nodes) {
-			return nil, errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
+			return nil, fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex)
 		}
 		idx += len(d.nodes)
 	}
 
 	if idx >= len(d.nodes) {
-		return nil, errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
+		return nil, fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex)
 	}
 
 	return d.nodes[idx], nil
@@ -740,18 +740,18 @@ func (d *partialArray) remove(key string, options *ApplyOptions) error {
 		if options.AllowMissingPathOnRemove {
 			return nil
 		}
-		return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
+		return fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex)
 	}
 
 	if idx < 0 {
 		if !options.SupportNegativeIndices {
-			return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
+			return fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex)
 		}
 		if idx < -len(cur.nodes) {
 			if options.AllowMissingPathOnRemove {
 				return nil
 			}
-			return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
+			return fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex)
 		}
 		idx += len(cur.nodes)
 	}
@@ -768,7 +768,7 @@ func (d *partialArray) remove(key string, options *ApplyOptions) error {
 func (p Patch) add(doc *container, op Operation, options *ApplyOptions) error {
 	path, err := op.Path()
 	if err != nil {
-		return errors.Wrapf(ErrMissing, "add operation failed to decode path")
+		return fmt.Errorf("add operation failed to decode path: %w", ErrMissing)
 	}
 
 	// special case, adding to empty means replacing the container with the value given
@@ -809,12 +809,12 @@ func (p Patch) add(doc *container, op Operation, options *ApplyOptions) error {
 	con, key := findObject(doc, path, options)
 
 	if con == nil {
-		return errors.Wrapf(ErrMissing, "add operation does not apply: doc is missing path: \"%s\"", path)
+		return fmt.Errorf("add operation does not apply: doc is missing path: \"%s\": %w", path, ErrMissing)
 	}
 
 	err = con.add(key, op.value(), options)
 	if err != nil {
-		return errors.Wrapf(err, "error in add for path: '%s'", path)
+		return fmt.Errorf("error in add for path: '%s': %w", path, err)
 	}
 
 	return nil
@@ -867,11 +867,11 @@ func ensurePathExists(pd *container, path string, options *ApplyOptions) error {
 				if arrIndex < 0 {
 
 					if !options.SupportNegativeIndices {
-						return errors.Wrapf(ErrInvalidIndex, "Unable to ensure path for invalid index: %d", arrIndex)
+						return fmt.Errorf("Unable to ensure path for invalid index: %d: %w", arrIndex, ErrInvalidIndex)
 					}
 
 					if arrIndex < -1 {
-						return errors.Wrapf(ErrInvalidIndex, "Unable to ensure path for negative index other than -1: %d", arrIndex)
+						return fmt.Errorf("Unable to ensure path for negative index other than -1: %d: %w", arrIndex, ErrInvalidIndex)
 					}
 
 					arrIndex = 0
@@ -918,11 +918,11 @@ func validateOperation(op Operation) error {
 	switch op.Kind() {
 	case "add", "replace":
 		if _, err := op.ValueInterface(); err != nil {
-			return errors.Wrapf(err, "failed to decode 'value'")
+			return fmt.Errorf("failed to decode 'value': %w", err)
 		}
 	case "move", "copy":
 		if _, err := op.From(); err != nil {
-			return errors.Wrapf(err, "failed to decode 'from'")
+			return fmt.Errorf("failed to decode 'from': %w", err)
 		}
 	case "remove", "test":
 	default:
@@ -930,7 +930,7 @@ func validateOperation(op Operation) error {
 	}
 
 	if _, err := op.Path(); err != nil {
-		return errors.Wrapf(err, "failed to decode 'path'")
+		return fmt.Errorf("failed to decode 'path': %w", err)
 	}
 
 	return nil
@@ -941,10 +941,10 @@ func validatePatch(p Patch) error {
 		if err := validateOperation(op); err != nil {
 			opData, infoErr := json.Marshal(op)
 			if infoErr != nil {
-				return errors.Wrapf(err, "invalid operation")
+				return fmt.Errorf("invalid operation: %w", err)
 			}
 
-			return errors.Wrapf(err, "invalid operation %s", opData)
+			return fmt.Errorf("invalid operation %s: %w", opData, err)
 		}
 	}
 
@@ -954,7 +954,7 @@ func validatePatch(p Patch) error {
 func (p Patch) remove(doc *container, op Operation, options *ApplyOptions) error {
 	path, err := op.Path()
 	if err != nil {
-		return errors.Wrapf(ErrMissing, "remove operation failed to decode path")
+		return fmt.Errorf("remove operation failed to decode path: %w", ErrMissing)
 	}
 
 	con, key := findObject(doc, path, options)
@@ -963,12 +963,12 @@ func (p Patch) remove(doc *container, op Operation, options *ApplyOptions) error
 		if options.AllowMissingPathOnRemove {
 			return nil
 		}
-		return errors.Wrapf(ErrMissing, "remove operation does not apply: doc is missing path: \"%s\"", path)
+		return fmt.Errorf("remove operation does not apply: doc is missing path: \"%s\": %w", path, ErrMissing)
 	}
 
 	err = con.remove(key, options)
 	if err != nil {
-		return errors.Wrapf(err, "error in remove for path: '%s'", path)
+		return fmt.Errorf("error in remove for path: '%s': %w", path, err)
 	}
 
 	return nil
@@ -977,7 +977,7 @@ func (p Patch) remove(doc *container, op Operation, options *ApplyOptions) error
 func (p Patch) replace(doc *container, op Operation, options *ApplyOptions) error {
 	path, err := op.Path()
 	if err != nil {
-		return errors.Wrapf(err, "replace operation failed to decode path")
+		return fmt.Errorf("replace operation failed to decode path: %w", err)
 	}
 
 	if path == "" {
@@ -986,7 +986,7 @@ func (p Patch) replace(doc *container, op Operation, options *ApplyOptions) erro
 		if val.which == eRaw {
 			if !val.tryDoc() {
 				if !val.tryAry() {
-					return errors.Wrapf(err, "replace operation value must be object or array")
+					return fmt.Errorf("replace operation value must be object or array: %w", err)
 				}
 			} else {
 				val.doc.opts = options
@@ -999,7 +999,7 @@ func (p Patch) replace(doc *container, op Operation, options *ApplyOptions) erro
 		case eDoc:
 			*doc = val.doc
 		case eRaw:
-			return errors.Wrapf(err, "replace operation hit impossible case")
+			return fmt.Errorf("replace operation hit impossible case: %w", err)
 		}
 
 		return nil
@@ -1008,17 +1008,17 @@ func (p Patch) replace(doc *container, op Operation, options *ApplyOptions) erro
 	con, key := findObject(doc, path, options)
 
 	if con == nil {
-		return errors.Wrapf(ErrMissing, "replace operation does not apply: doc is missing path: %s", path)
+		return fmt.Errorf("replace operation does not apply: doc is missing path: %s: %w", path, ErrMissing)
 	}
 
 	_, ok := con.get(key, options)
 	if ok != nil {
-		return errors.Wrapf(ErrMissing, "replace operation does not apply: doc is missing key: %s", path)
+		return fmt.Errorf("replace operation does not apply: doc is missing key: %s: %w", path, ErrMissing)
 	}
 
 	err = con.set(key, op.value(), options)
 	if err != nil {
-		return errors.Wrapf(err, "error in remove for path: '%s'", path)
+		return fmt.Errorf("error in remove for path: '%s': %w", path, err)
 	}
 
 	return nil
@@ -1027,43 +1027,43 @@ func (p Patch) replace(doc *container, op Operation, options *ApplyOptions) erro
 func (p Patch) move(doc *container, op Operation, options *ApplyOptions) error {
 	from, err := op.From()
 	if err != nil {
-		return errors.Wrapf(err, "move operation failed to decode from")
+		return fmt.Errorf("move operation failed to decode from: %w", err)
 	}
 
 	if from == "" {
-		return errors.Wrapf(ErrInvalid, "unable to move entire document to another path")
+		return fmt.Errorf("unable to move entire document to another path: %w", ErrInvalid)
 	}
 
 	con, key := findObject(doc, from, options)
 
 	if con == nil {
-		return errors.Wrapf(ErrMissing, "move operation does not apply: doc is missing from path: %s", from)
+		return fmt.Errorf("move operation does not apply: doc is missing from path: %s: %w", from, ErrMissing)
 	}
 
 	val, err := con.get(key, options)
 	if err != nil {
-		return errors.Wrapf(err, "error in move for path: '%s'", key)
+		return fmt.Errorf("error in move for path: '%s': %w", key, err)
 	}
 
 	err = con.remove(key, options)
 	if err != nil {
-		return errors.Wrapf(err, "error in move for path: '%s'", key)
+		return fmt.Errorf("error in move for path: '%s': %w", key, err)
 	}
 
 	path, err := op.Path()
 	if err != nil {
-		return errors.Wrapf(err, "move operation failed to decode path")
+		return fmt.Errorf("move operation failed to decode path: %w", err)
 	}
 
 	con, key = findObject(doc, path, options)
 
 	if con == nil {
-		return errors.Wrapf(ErrMissing, "move operation does not apply: doc is missing destination path: %s", path)
+		return fmt.Errorf("move operation does not apply: doc is missing destination path: %s: %w", path, ErrMissing)
 	}
 
 	err = con.add(key, val, options)
 	if err != nil {
-		return errors.Wrapf(err, "error in move for path: '%s'", path)
+		return fmt.Errorf("error in move for path: '%s': %w", path, err)
 	}
 
 	return nil
@@ -1072,7 +1072,7 @@ func (p Patch) move(doc *container, op Operation, options *ApplyOptions) error {
 func (p Patch) test(doc *container, op Operation, options *ApplyOptions) error {
 	path, err := op.Path()
 	if err != nil {
-		return errors.Wrapf(err, "test operation failed to decode path")
+		return fmt.Errorf("test operation failed to decode path: %w", err)
 	}
 
 	if path == "" {
@@ -1091,18 +1091,18 @@ func (p Patch) test(doc *container, op Operation, options *ApplyOptions) error {
 			return nil
 		}
 
-		return errors.Wrapf(ErrTestFailed, "testing value %s failed", path)
+		return fmt.Errorf("testing value %s failed: %w", path, ErrTestFailed)
 	}
 
 	con, key := findObject(doc, path, options)
 
 	if con == nil {
-		return errors.Wrapf(ErrMissing, "test operation does not apply: is missing path: %s", path)
+		return fmt.Errorf("test operation does not apply: is missing path: %s: %w", path, ErrMissing)
 	}
 
 	val, err := con.get(key, options)
-	if err != nil && errors.Cause(err) != ErrMissing {
-		return errors.Wrapf(err, "error in test for path: '%s'", path)
+	if err != nil && errors.Unwrap(err) != ErrMissing {
+		return fmt.Errorf("error in test for path: '%s': %w", path, err)
 	}
 
 	ov := op.value()
@@ -1111,49 +1111,49 @@ func (p Patch) test(doc *container, op Operation, options *ApplyOptions) error {
 		if ov.isNull() {
 			return nil
 		}
-		return errors.Wrapf(ErrTestFailed, "testing value %s failed", path)
+		return fmt.Errorf("testing value %s failed: %w", path, ErrTestFailed)
 	} else if ov.isNull() {
-		return errors.Wrapf(ErrTestFailed, "testing value %s failed", path)
+		return fmt.Errorf("testing value %s failed: %w", path, ErrTestFailed)
 	}
 
 	if val.equal(op.value()) {
 		return nil
 	}
 
-	return errors.Wrapf(ErrTestFailed, "testing value %s failed", path)
+	return fmt.Errorf("testing value %s failed: %w", path, ErrTestFailed)
 }
 
 func (p Patch) copy(doc *container, op Operation, accumulatedCopySize *int64, options *ApplyOptions) error {
 	from, err := op.From()
 	if err != nil {
-		return errors.Wrapf(err, "copy operation failed to decode from")
+		return fmt.Errorf("copy operation failed to decode from: %w", err)
 	}
 
 	con, key := findObject(doc, from, options)
 
 	if con == nil {
-		return errors.Wrapf(ErrMissing, "copy operation does not apply: doc is missing from path: \"%s\"", from)
+		return fmt.Errorf("copy operation does not apply: doc is missing from path: \"%s\": %w", from, ErrMissing)
 	}
 
 	val, err := con.get(key, options)
 	if err != nil {
-		return errors.Wrapf(err, "error in copy for from: '%s'", from)
+		return fmt.Errorf("error in copy for from: '%s': %w", from, err)
 	}
 
 	path, err := op.Path()
 	if err != nil {
-		return errors.Wrapf(ErrMissing, "copy operation failed to decode path")
+		return fmt.Errorf("copy operation failed to decode path: %w", ErrMissing)
 	}
 
 	con, key = findObject(doc, path, options)
 
 	if con == nil {
-		return errors.Wrapf(ErrMissing, "copy operation does not apply: doc is missing destination path: %s", path)
+		return fmt.Errorf("copy operation does not apply: doc is missing destination path: %s: %w", path, ErrMissing)
 	}
 
 	valCopy, sz, err := deepCopy(val, options)
 	if err != nil {
-		return errors.Wrapf(err, "error while performing deep copy")
+		return fmt.Errorf("error while performing deep copy: %w", err)
 	}
 
 	(*accumulatedCopySize) += int64(sz)
@@ -1163,7 +1163,7 @@ func (p Patch) copy(doc *container, op Operation, accumulatedCopySize *int64, op
 
 	err = con.add(key, valCopy, options)
 	if err != nil {
-		return errors.Wrapf(err, "error while adding value during copy")
+		return fmt.Errorf("error while adding value during copy: %w", err)
 	}
 
 	return nil
diff --git a/vendor/github.com/fxamacker/cbor/v2/README.md b/vendor/github.com/fxamacker/cbor/v2/README.md
index af0a79507e599b3911c538c537ed6c9122e450c5..da9f9e6f09ecb9646a7228612b984a9b41bb16b3 100644
--- a/vendor/github.com/fxamacker/cbor/v2/README.md
+++ b/vendor/github.com/fxamacker/cbor/v2/README.md
@@ -1,6 +1,4 @@
-# CBOR Codec in Go
-
-<!-- [![](https://github.com/fxamacker/images/raw/master/cbor/v2.5.0/fxamacker_cbor_banner.png)](#cbor-library-in-go) -->
+<h1>CBOR Codec <a href="https://pkg.go.dev/github.com/fxamacker/cbor/v2"><img src="https://raw.githubusercontent.com/fxamacker/images/refs/heads/master/cbor/go-logo-blue.svg" alt="Go logo" style="height: 1em;" align="right"></a></h1>
 
 [fxamacker/cbor](https://github.com/fxamacker/cbor) is a library for encoding and decoding [CBOR](https://www.rfc-editor.org/info/std94) and [CBOR Sequences](https://www.rfc-editor.org/rfc/rfc8742.html).
 
@@ -8,23 +6,26 @@ CBOR is a [trusted alternative](https://www.rfc-editor.org/rfc/rfc8949.html#name
 
 `fxamacker/cbor` is used in projects by Arm Ltd., Cisco, EdgeX&nbsp;Foundry, Flow Foundation, Fraunhofer&#8209;AISEC, Kubernetes, Let's&nbsp;Encrypt (ISRG), Linux&nbsp;Foundation, Microsoft, Mozilla, Oasis&nbsp;Protocol, Tailscale, Teleport, [etc](https://github.com/fxamacker/cbor#who-uses-fxamackercbor).
 
-See [Quick&nbsp;Start](#quick-start) and [Releases](https://github.com/fxamacker/cbor/releases/).  🆕 `UnmarshalFirst` and `DiagnoseFirst` can decode CBOR Sequences.  `cbor.MarshalToBuffer()` and `UserBufferEncMode` accepts user-specified buffer.
+See [Quick&nbsp;Start](#quick-start) and [Releases](https://github.com/fxamacker/cbor/releases/).  🆕 `UnmarshalFirst` and `DiagnoseFirst` can decode CBOR Sequences.  `MarshalToBuffer` and `UserBufferEncMode` accepts user-specified buffer.
 
 ## fxamacker/cbor
 
 [![](https://github.com/fxamacker/cbor/workflows/ci/badge.svg)](https://github.com/fxamacker/cbor/actions?query=workflow%3Aci)
-[![](https://github.com/fxamacker/cbor/workflows/cover%20%E2%89%A596%25/badge.svg)](https://github.com/fxamacker/cbor/actions?query=workflow%3A%22cover+%E2%89%A596%25%22)
+[![](https://github.com/fxamacker/cbor/workflows/cover%20%E2%89%A597%25/badge.svg)](https://github.com/fxamacker/cbor/actions?query=workflow%3A%22cover+%E2%89%A597%25%22)
 [![CodeQL](https://github.com/fxamacker/cbor/actions/workflows/codeql-analysis.yml/badge.svg)](https://github.com/fxamacker/cbor/actions/workflows/codeql-analysis.yml)
 [![](https://img.shields.io/badge/fuzzing-passing-44c010)](#fuzzing-and-code-coverage)
 [![Go Report Card](https://goreportcard.com/badge/github.com/fxamacker/cbor)](https://goreportcard.com/report/github.com/fxamacker/cbor)
+[![](https://img.shields.io/ossf-scorecard/github.com/fxamacker/cbor?label=openssf%20scorecard)](https://github.com/fxamacker/cbor#fuzzing-and-code-coverage)
 
 `fxamacker/cbor` is a CBOR codec in full conformance with [IETF STD&nbsp;94 (RFC&nbsp;8949)](https://www.rfc-editor.org/info/std94). It also supports CBOR Sequences ([RFC&nbsp;8742](https://www.rfc-editor.org/rfc/rfc8742.html)) and Extended Diagnostic Notation ([Appendix G of RFC&nbsp;8610](https://www.rfc-editor.org/rfc/rfc8610.html#appendix-G)).
 
 Features include full support for CBOR tags, [Core Deterministic Encoding](https://www.rfc-editor.org/rfc/rfc8949.html#name-core-deterministic-encoding), duplicate map key detection, etc.
 
+API is mostly same as `encoding/json`, plus interfaces that simplify concurrency and CBOR options.
+
 Design balances trade-offs between security, speed, concurrency, encoded data size, usability, etc.
 
-<details><summary>Highlights</summary><p/>
+<details><summary> 🔎&nbsp; Highlights</summary><p/>
 
 __🚀&nbsp; Speed__
 
@@ -38,7 +39,7 @@ Codec passed multiple confidential security assessments in 2022.  No vulnerabili
 
 __🗜️&nbsp; Data Size__
 
-Struct tags (`toarray`, `keyasint`, `omitempty`) automatically reduce size of encoded structs. Encoding optionally shrinks float64→32→16 when values fit.
+Struct tag options (`toarray`, `keyasint`, `omitempty`, `omitzero`) automatically reduce size of encoded structs. Encoding optionally shrinks float64→32→16 when values fit.
 
 __:jigsaw:&nbsp; Usability__
 
@@ -58,164 +59,201 @@ Features include CBOR [extension points](https://www.rfc-editor.org/rfc/rfc8949.
 
 `fxamacker/cbor` has configurable limits, etc. that defend against malicious CBOR data.
 
-By contrast, `encoding/gob` is [not designed to be hardened against adversarial inputs](https://pkg.go.dev/encoding/gob#hdr-Security).
-
-<details><summary>Example decoding with encoding/gob 💥 fatal error (out of memory)</summary><p/>
-
-```Go
-// Example of encoding/gob having "fatal error: runtime: out of memory"
-// while decoding 181 bytes.
-package main
-import (
-	"bytes"
-	"encoding/gob"
-	"encoding/hex"
-	"fmt"
-)
-
-// Example data is from https://github.com/golang/go/issues/24446
-// (shortened to 181 bytes).
-const data = "4dffb503010102303001ff30000109010130010800010130010800010130" +
-	"01ffb80001014a01ffb60001014b01ff860001013001ff860001013001ff" +
-	"860001013001ff860001013001ffb80000001eff850401010e3030303030" +
-	"30303030303030303001ff3000010c0104000016ffb70201010830303030" +
-	"3030303001ff3000010c000030ffb6040405fcff00303030303030303030" +
-	"303030303030303030303030303030303030303030303030303030303030" +
-	"30"
-
-type X struct {
-	J *X
-	K map[string]int
-}
-
-func main() {
-	raw, _ := hex.DecodeString(data)
-	decoder := gob.NewDecoder(bytes.NewReader(raw))
-
-	var x X
-	decoder.Decode(&x) // fatal error: runtime: out of memory
-	fmt.Println("Decoding finished.")
-}
-```
-
-<hr/>
-
-</details>
-
-`fxamacker/cbor` is fast at rejecting malformed CBOR data.  E.g. attempts to  
-decode 10 bytes of malicious CBOR data to `[]byte` (with default settings):
-
-| Codec | Speed (ns/op) | Memory | Allocs |
-| :---- | ------------: | -----: | -----: |
-| fxamacker/cbor 2.5.0 | 44 ± 5% | 32 B/op | 2 allocs/op |
-| ugorji/go 1.2.11 | 5353261 ± 4% | 67111321 B/op |  13 allocs/op |
-
-<details><summary>Benchmark details</summary><p/>
-
-Latest comparison used:
-- Input: `[]byte{0x9B, 0x00, 0x00, 0x42, 0xFA, 0x42, 0xFA, 0x42, 0xFA, 0x42}`
-- go1.19.10, linux/amd64, i5-13600K (disabled all e-cores, DDR4 @2933)
-- go test -bench=. -benchmem -count=20
-
-#### Prior comparisons
-
-| Codec | Speed (ns/op) | Memory | Allocs |
-| :---- | ------------: | -----: | -----: |
-| fxamacker/cbor 2.5.0-beta2 | 44.33 ± 2% | 32 B/op | 2 allocs/op |
-| fxamacker/cbor 0.1.0 - 2.4.0 | ~44.68 ± 6% | 32 B/op |  2 allocs/op |
-| ugorji/go 1.2.10 | 5524792.50 ± 3% | 67110491 B/op |  12 allocs/op |
-| ugorji/go 1.1.0 - 1.2.6 | 💥 runtime: | out of memory: | cannot allocate |
-
-- Input: `[]byte{0x9B, 0x00, 0x00, 0x42, 0xFA, 0x42, 0xFA, 0x42, 0xFA, 0x42}`
-- go1.19.6, linux/amd64, i5-13600K (DDR4)
-- go test -bench=. -benchmem -count=20
-
-<hr/>
-
-</details>
-
-### Smaller Encodings with Struct Tags
-
-Struct tags (`toarray`, `keyasint`, `omitempty`) reduce encoded size of structs.
-
-<details><summary>Example encoding 3-level nested Go struct to 1 byte CBOR</summary><p/>
-
-https://go.dev/play/p/YxwvfPdFQG2
-
-```Go
-// Example encoding nested struct (with omitempty tag)
-// - encoding/json:  18 byte JSON
-// - fxamacker/cbor:  1 byte CBOR
-package main
-
-import (
-	"encoding/hex"
-	"encoding/json"
-	"fmt"
-
-	"github.com/fxamacker/cbor/v2"
-)
-
-type GrandChild struct {
-	Quux int `json:",omitempty"`
-}
-
-type Child struct {
-	Baz int        `json:",omitempty"`
-	Qux GrandChild `json:",omitempty"`
-}
-
-type Parent struct {
-	Foo Child `json:",omitempty"`
-	Bar int   `json:",omitempty"`
-}
-
-func cb() {
-	results, _ := cbor.Marshal(Parent{})
-	fmt.Println("hex(CBOR): " + hex.EncodeToString(results))
-
-	text, _ := cbor.Diagnose(results) // Diagnostic Notation
-	fmt.Println("DN: " + text)
-}
-
-func js() {
-	results, _ := json.Marshal(Parent{})
-	fmt.Println("hex(JSON): " + hex.EncodeToString(results))
-
-	text := string(results) // JSON
-	fmt.Println("JSON: " + text)
-}
-
-func main() {
-	cb()
-	fmt.Println("-------------")
-	js()
-}
-```
-
-Output (DN is Diagnostic Notation):
-```
-hex(CBOR): a0
-DN: {}
--------------
-hex(JSON): 7b22466f6f223a7b22517578223a7b7d7d7d
-JSON: {"Foo":{"Qux":{}}}
-```
-
-<hr/>
-
-</details>
-
-Example using different struct tags together:
+Notably, `fxamacker/cbor` is fast at rejecting malformed CBOR data.
+
+> [!NOTE]  
+> Benchmarks rejecting 10 bytes of malicious CBOR data decoding to `[]byte`:
+> 
+> | Codec | Speed (ns/op) | Memory | Allocs |
+> | :---- | ------------: | -----: | -----: |
+> | fxamacker/cbor 2.7.0 | 47 ± 7% | 32 B/op | 2 allocs/op |
+> | ugorji/go 1.2.12 | 5878187 ± 3% | 67111556 B/op |  13 allocs/op |
+>
+> Faster hardware (overclocked DDR4 or DDR5) can reduce speed difference.
+> 
+> <details><summary> 🔎&nbsp; Benchmark details </summary><p/>
+> 
+> Latest comparison for decoding CBOR data to Go `[]byte`:
+> - Input: `[]byte{0x9B, 0x00, 0x00, 0x42, 0xFA, 0x42, 0xFA, 0x42, 0xFA, 0x42}`
+> - go1.22.7, linux/amd64, i5-13600K (DDR4-2933, disabled e-cores)
+> - go test -bench=. -benchmem -count=20
+> 
+> #### Prior comparisons
+> 
+> | Codec | Speed (ns/op) | Memory | Allocs |
+> | :---- | ------------: | -----: | -----: |
+> | fxamacker/cbor 2.5.0-beta2 | 44.33 ± 2% | 32 B/op | 2 allocs/op |
+> | fxamacker/cbor 0.1.0 - 2.4.0 | ~44.68 ± 6% | 32 B/op |  2 allocs/op |
+> | ugorji/go 1.2.10 | 5524792.50 ± 3% | 67110491 B/op |  12 allocs/op |
+> | ugorji/go 1.1.0 - 1.2.6 | 💥 runtime: | out of memory: | cannot allocate |
+> 
+> - Input: `[]byte{0x9B, 0x00, 0x00, 0x42, 0xFA, 0x42, 0xFA, 0x42, 0xFA, 0x42}`
+> - go1.19.6, linux/amd64, i5-13600K (DDR4)
+> - go test -bench=. -benchmem -count=20
+> 
+> </details>
+
+In contrast, some codecs can crash or use excessive resources while decoding bad data.
+
+> [!WARNING]  
+> Go's `encoding/gob` is [not designed to be hardened against adversarial inputs](https://pkg.go.dev/encoding/gob#hdr-Security).
+> 
+> <details><summary> 🔎&nbsp; gob fatal error (out of memory) 💥 decoding 181 bytes</summary><p/>
+>
+> ```Go
+> // Example of encoding/gob having "fatal error: runtime: out of memory"
+> // while decoding 181 bytes (all Go versions as of Dec. 8, 2024).
+> package main
+> import (
+> 	"bytes"
+> 	"encoding/gob"
+> 	"encoding/hex"
+> 	"fmt"
+> )
+> 
+> // Example data is from https://github.com/golang/go/issues/24446
+> // (shortened to 181 bytes).
+> const data = "4dffb503010102303001ff30000109010130010800010130010800010130" +
+> 	"01ffb80001014a01ffb60001014b01ff860001013001ff860001013001ff" +
+> 	"860001013001ff860001013001ffb80000001eff850401010e3030303030" +
+> 	"30303030303030303001ff3000010c0104000016ffb70201010830303030" +
+> 	"3030303001ff3000010c000030ffb6040405fcff00303030303030303030" +
+> 	"303030303030303030303030303030303030303030303030303030303030" +
+> 	"30"
+> 
+> type X struct {
+> 	J *X
+> 	K map[string]int
+> }
+> 
+> func main() {
+> 	raw, _ := hex.DecodeString(data)
+> 	decoder := gob.NewDecoder(bytes.NewReader(raw))
+> 
+> 	var x X
+> 	decoder.Decode(&x) // fatal error: runtime: out of memory
+> 	fmt.Println("Decoding finished.")
+> }
+> ```
+>
+>
+> </details>
+
+### Smaller Encodings with Struct Tag Options
+
+Struct tags automatically reduce encoded size of structs and improve speed.
+
+We can write less code by using struct tag options:
+- `toarray`: encode without field names (decode back to original struct)
+- `keyasint`: encode field names as integers (decode back to original struct)
+- `omitempty`: omit empty fields when encoding
+- `omitzero`: omit zero-value fields when encoding
 
 ![alt text](https://github.com/fxamacker/images/raw/master/cbor/v2.3.0/cbor_struct_tags_api.svg?sanitize=1 "CBOR API and Go Struct Tags")
 
-API is mostly same as `encoding/json`, plus interfaces that simplify concurrency for CBOR options.
+> [!NOTE]  
+>  `fxamacker/cbor` can encode a 3-level nested Go struct to 1 byte!
+> - `encoding/json`:  18 bytes of JSON
+> - `fxamacker/cbor`:  1 byte of CBOR  
+>
+> <details><summary> 🔎&nbsp; Encoding 3-level nested Go struct with omitempty</summary><p/>
+>
+> https://go.dev/play/p/YxwvfPdFQG2
+> 
+> ```Go
+> // Example encoding nested struct (with omitempty tag)
+> // - encoding/json:  18 byte JSON
+> // - fxamacker/cbor:  1 byte CBOR
+> 
+> package main
+> 
+> import (
+> 	"encoding/hex"
+> 	"encoding/json"
+> 	"fmt"
+> 
+> 	"github.com/fxamacker/cbor/v2"
+> )
+> 
+> type GrandChild struct {
+> 	Quux int `json:",omitempty"`
+> }
+> 
+> type Child struct {
+> 	Baz int        `json:",omitempty"`
+> 	Qux GrandChild `json:",omitempty"`
+> }
+> 
+> type Parent struct {
+> 	Foo Child `json:",omitempty"`
+> 	Bar int   `json:",omitempty"`
+> }
+> 
+> func cb() {
+> 	results, _ := cbor.Marshal(Parent{})
+> 	fmt.Println("hex(CBOR): " + hex.EncodeToString(results))
+> 
+> 	text, _ := cbor.Diagnose(results) // Diagnostic Notation
+> 	fmt.Println("DN: " + text)
+> }
+> 
+> func js() {
+> 	results, _ := json.Marshal(Parent{})
+> 	fmt.Println("hex(JSON): " + hex.EncodeToString(results))
+> 
+> 	text := string(results) // JSON
+> 	fmt.Println("JSON: " + text)
+> }
+> 
+> func main() {
+> 	cb()
+> 	fmt.Println("-------------")
+> 	js()
+> }
+> ```
+> 
+> Output (DN is Diagnostic Notation):
+> ```
+> hex(CBOR): a0
+> DN: {}
+> -------------
+> hex(JSON): 7b22466f6f223a7b22517578223a7b7d7d7d
+> JSON: {"Foo":{"Qux":{}}}
+> ```
+> 
+> </details>
+
 
 ## Quick Start
 
 __Install__: `go get github.com/fxamacker/cbor/v2` and `import "github.com/fxamacker/cbor/v2"`.
 
+> [!TIP]  
+>
+> Tinygo users can try beta/experimental branch [feature/cbor-tinygo-beta](https://github.com/fxamacker/cbor/tree/feature/cbor-tinygo-beta).
+>
+> <details><summary> 🔎&nbsp; More about tinygo feature branch</summary>
+>
+> ### Tinygo
+>
+> Branch [feature/cbor-tinygo-beta](https://github.com/fxamacker/cbor/tree/feature/cbor-tinygo-beta) is based on fxamacker/cbor v2.7.0 and it can be compiled using tinygo v0.33 (also compiles with golang/go).
+>
+> It passes unit tests (with both go1.22 and tinygo v0.33) and is considered beta/experimental for tinygo.
+>
+> :warning: The `feature/cbor-tinygo-beta` branch does not get fuzz tested yet.
+>
+> Changes in this feature branch only affect tinygo compiled software.  Summary of changes:
+> - default `DecOptions.MaxNestedLevels` is reduced to 16 (was 32).  User can specify higher limit but 24+ crashes tests when compiled with tinygo v0.33.
+> - disabled decoding CBOR tag data to Go interface because tinygo v0.33 is missing needed feature.
+> - encoding error message can be different when encoding function type.
+>
+> Related tinygo issues:
+> - https://github.com/tinygo-org/tinygo/issues/4277
+> - https://github.com/tinygo-org/tinygo/issues/4458
+>
+> </details>
+
+
 ### Key Points
 
 This library can encode and decode CBOR (RFC 8949) and CBOR Sequences (RFC 8742).
@@ -252,16 +290,17 @@ rest, err = cbor.UnmarshalFirst(b, &v)   // decode []byte b to v
 // DiagnoseFirst translates first CBOR data item to text and returns remaining bytes.
 text, rest, err = cbor.DiagnoseFirst(b)  // decode []byte b to Diagnostic Notation text
 
-// NOTE: Unmarshal returns ExtraneousDataError if there are remaining bytes,
-// but new funcs UnmarshalFirst and DiagnoseFirst do not.
+// NOTE: Unmarshal() returns ExtraneousDataError if there are remaining bytes, but
+// UnmarshalFirst() and DiagnoseFirst() allow trailing bytes.
 ```
 
-__IMPORTANT__: 👉  CBOR settings allow trade-offs between speed, security, encoding size, etc.
-
-- Different CBOR libraries may use different default settings.
-- CBOR-based formats or protocols usually require specific settings.
-
-For example, WebAuthn uses "CTAP2 Canonical CBOR" which is available as a preset.
+> [!IMPORTANT]  
+> CBOR settings allow trade-offs between speed, security, encoding size, etc.
+>
+> - Different CBOR libraries may use different default settings.
+> - CBOR-based formats or protocols usually require specific settings.
+>
+> For example, WebAuthn uses "CTAP2 Canonical CBOR" which is available as a preset.
 
 ### Presets
 
@@ -312,9 +351,9 @@ err = em.MarshalToBuffer(v, &buf) // encode v to provided buf
 
 ### Struct Tags
 
-Struct tags (`toarray`, `keyasint`, `omitempty`) reduce encoded size of structs.
+Struct tag options (`toarray`, `keyasint`, `omitempty`, `omitzero`) reduce encoded size of structs.
 
-<details><summary>Example encoding 3-level nested Go struct to 1 byte CBOR</summary><p/>
+<details><summary> 🔎&nbsp; Example encoding 3-level nested Go struct to 1 byte CBOR</summary><p/>
 
 https://go.dev/play/p/YxwvfPdFQG2
 
@@ -382,13 +421,13 @@ JSON: {"Foo":{"Qux":{}}}
 
 </details>
 
-<details><summary>Example using several struct tags</summary><p/>
+<details><summary> 🔎&nbsp; Example using struct tag options</summary><p/>
 	
 ![alt text](https://github.com/fxamacker/images/raw/master/cbor/v2.3.0/cbor_struct_tags_api.svg?sanitize=1 "CBOR API and Go Struct Tags")
 
 </details>
 
-Struct tags simplify use of CBOR-based protocols that require CBOR arrays or maps with integer keys.
+Struct tag options simplify use of CBOR-based protocols that require CBOR arrays or maps with integer keys.
 
 ### CBOR Tags
 
@@ -404,7 +443,7 @@ em, err := opts.EncModeWithSharedTags(ts)  // mutable shared CBOR tags
 
 `TagSet` and modes using it are safe for concurrent use.  Equivalent API is available for `DecMode`.
 
-<details><summary>Example using TagSet and TagOptions</summary><p/>
+<details><summary> 🔎&nbsp; Example using TagSet and TagOptions</summary><p/>
 
 ```go
 // Use signedCWT struct defined in "Decoding CWT" example.
@@ -430,7 +469,7 @@ if err := dm.Unmarshal(data, &v); err != nil {
 em, _ := cbor.EncOptions{}.EncModeWithTags(tags)
 
 // Marshal signedCWT with tag number.
-if data, err := cbor.Marshal(v); err != nil {
+if data, err := em.Marshal(v); err != nil {
 	return err
 }
 ```
@@ -439,7 +478,7 @@ if data, err := cbor.Marshal(v); err != nil {
 
 ### Functions and Interfaces
 
-<details><summary>Functions and interfaces at a glance</summary><p/>
+<details><summary> 🔎&nbsp; Functions and interfaces at a glance</summary><p/>
 
 Common functions with same API as `encoding/json`:  
 - `Marshal`, `Unmarshal`
@@ -472,11 +511,24 @@ Default limits may need to be increased for systems handling very large data (e.
 
 ## Status
 
-v2.7.0 (June 23, 2024) adds features and improvements that help large projects (e.g. Kubernetes) use CBOR as an alternative to JSON and Protocol Buffers. Other improvements include speedups, improved memory use, bug fixes, new serialization options, etc.   It passed fuzz tests (5+ billion executions) and is production quality.
+v2.8.0 (March 30, 2025) is a small release primarily to add `omitzero` option to struct field tags and fix bugs.   It passed fuzz tests (billions of executions) and is production quality.
+
+v2.8.0 and v2.7.1 fixes these 3 functions (when called directly by user apps) to use same error handling on bad inputs as `cbor.Unmarshal()`:
+- `ByteString.UnmarshalCBOR()`
+- `RawTag.UnmarshalCBOR()`
+- `SimpleValue.UnmarshalCBOR()`
+
+The above 3 `UnmarshalCBOR()` functions were initially created for internal use and are deprecated now, so please use `Unmarshal()` or `UnmarshalFirst()` instead.  To preserve backward compatibility, these deprecated functions were added to fuzz tests and will not be removed in v2.
+
+The minimum version of Go required to build:
+- v2.8.0 requires go 1.20.
+- v2.7.1 and older releases require go 1.17.
 
 For more details, see [release notes](https://github.com/fxamacker/cbor/releases).
 
-### Prior Release
+### Prior Releases
+
+v2.7.0 (June 23, 2024) adds features and improvements that help large projects (e.g. Kubernetes) use CBOR as an alternative to JSON and Protocol Buffers. Other improvements include speedups, improved memory use, bug fixes, new serialization options, etc.   It passed fuzz tests (5+ billion executions) and is production quality.
 
 [v2.6.0](https://github.com/fxamacker/cbor/releases/tag/v2.6.0) (February 2024) adds important new features, optimizations, and bug fixes. It is especially useful to systems that need to convert data between CBOR and JSON.  New options and optimizations improve handling of bignum, integers, maps, and strings.
 
@@ -489,7 +541,7 @@ See [v2.5.0 release notes](https://github.com/fxamacker/cbor/releases/tag/v2.5.0
 See ["Version and API Changes"](https://github.com/fxamacker/cbor#versions-and-api-changes) section for more info about version numbering, etc.
 
 <!--
-<details><summary>👉 Benchmark Comparison: v2.4.0 vs v2.5.0</summary><p/>
+<details><summary> 🔎&nbsp; Benchmark Comparison: v2.4.0 vs v2.5.0</summary><p/>
 
 TODO: Update to v2.4.0 vs 2.5.0 (not beta2).
 
@@ -549,7 +601,7 @@ geomean                                                      2.782
 
 ## Who uses fxamacker/cbor
 
-`fxamacker/cbor` is used in projects by Arm Ltd., Berlin Institute of Health at Charité, Chainlink, Cisco, Confidential Computing Consortium, ConsenSys, Dapper&nbsp;Labs, EdgeX&nbsp;Foundry, F5, FIDO Alliance, Fraunhofer&#8209;AISEC, Kubernetes, Let's Encrypt (ISRG), Linux&nbsp;Foundation, Matrix.org, Microsoft, Mozilla, National&nbsp;Cybersecurity&nbsp;Agency&nbsp;of&nbsp;France (govt), Netherlands (govt), Oasis Protocol, Smallstep, Tailscale, Taurus SA, Teleport, TIBCO, and others.
+`fxamacker/cbor` is used in projects by Arm Ltd., Berlin Institute of Health at Charité, Chainlink, Cisco, Confidential&nbsp;Computing&nbsp;Consortium, ConsenSys, EdgeX&nbsp;Foundry, F5, Flow&nbsp;Foundation, Fraunhofer&#8209;AISEC, IBM, Kubernetes, Let's&nbsp;Encrypt&nbsp;(ISRG), Linux&nbsp;Foundation, Matrix.org, Microsoft, Mozilla, National&nbsp;Cybersecurity&nbsp;Agency&nbsp;of&nbsp;France&nbsp;(govt), Netherlands&nbsp;(govt), Oasis&nbsp;Protocol, Smallstep, Tailscale, Taurus SA, Teleport, TIBCO, and others.
 
 `fxamacker/cbor` passed multiple confidential security assessments.  A [nonconfidential security assessment](https://github.com/veraison/go-cose/blob/v1.0.0-rc.1/reports/NCC_Microsoft-go-cose-Report_2022-05-26_v1.0.pdf) (prepared by NCC Group for Microsoft Corporation) includes a subset of fxamacker/cbor v2.4.0 in its scope.
 
@@ -588,7 +640,7 @@ By default, decoder treats time values of floating-point NaN and Infinity as if
 __Click to expand topic:__
 
 <details>
- <summary>Duplicate Map Keys</summary><p>
+ <summary> 🔎&nbsp; Duplicate Map Keys</summary><p>
 
 This library provides options for fast detection and rejection of duplicate map keys based on applying a Go-specific data model to CBOR's extended generic data model in order to determine duplicate vs distinct map keys. Detection relies on whether the CBOR map key would be a duplicate "key" when decoded and applied to the user-provided Go map or struct. 
 
@@ -601,7 +653,7 @@ APF suffix means "Allow Partial Fill" so the destination map or struct can conta
 </details>
 
 <details>
- <summary>Tag Validity</summary><p>
+ <summary> 🔎&nbsp; Tag Validity</summary><p>
 
 This library checks tag validity for built-in tags (currently tag numbers 0, 1, 2, 3, and 55799):
 
diff --git a/vendor/github.com/fxamacker/cbor/v2/bytestring.go b/vendor/github.com/fxamacker/cbor/v2/bytestring.go
index 823bff12ce14f7ac6bb3edc55c40de221da5d807..23c5724d2e41d6562a9edc0dd548420799d1c9b4 100644
--- a/vendor/github.com/fxamacker/cbor/v2/bytestring.go
+++ b/vendor/github.com/fxamacker/cbor/v2/bytestring.go
@@ -38,11 +38,38 @@ func (bs ByteString) MarshalCBOR() ([]byte, error) {
 
 // UnmarshalCBOR decodes CBOR byte string (major type 2) to ByteString.
 // Decoding CBOR null and CBOR undefined sets ByteString to be empty.
+//
+// Deprecated: No longer used by this codec; kept for compatibility
+// with user apps that directly call this function.
 func (bs *ByteString) UnmarshalCBOR(data []byte) error {
 	if bs == nil {
 		return errors.New("cbor.ByteString: UnmarshalCBOR on nil pointer")
 	}
 
+	d := decoder{data: data, dm: defaultDecMode}
+
+	// Check well-formedness of CBOR data item.
+	// ByteString.UnmarshalCBOR() is exported, so
+	// the codec needs to support same behavior for:
+	// - Unmarshal(data, *ByteString)
+	// - ByteString.UnmarshalCBOR(data)
+	err := d.wellformed(false, false)
+	if err != nil {
+		return err
+	}
+
+	return bs.unmarshalCBOR(data)
+}
+
+// unmarshalCBOR decodes CBOR byte string (major type 2) to ByteString.
+// Decoding CBOR null and CBOR undefined sets ByteString to be empty.
+// This function assumes data is well-formed, and does not perform bounds checking.
+// This function is called by Unmarshal().
+func (bs *ByteString) unmarshalCBOR(data []byte) error {
+	if bs == nil {
+		return errors.New("cbor.ByteString: UnmarshalCBOR on nil pointer")
+	}
+
 	// Decoding CBOR null and CBOR undefined to ByteString resets data.
 	// This behavior is similar to decoding CBOR null and CBOR undefined to []byte.
 	if len(data) == 1 && (data[0] == 0xf6 || data[0] == 0xf7) {
diff --git a/vendor/github.com/fxamacker/cbor/v2/cache.go b/vendor/github.com/fxamacker/cbor/v2/cache.go
index ea0f39e24f0da9411bc281b22091f6b19e0e4831..0d96b9882cc09bf65e1303b3c108afc90d328cfd 100644
--- a/vendor/github.com/fxamacker/cbor/v2/cache.go
+++ b/vendor/github.com/fxamacker/cbor/v2/cache.go
@@ -17,6 +17,7 @@ import (
 type encodeFuncs struct {
 	ef  encodeFunc
 	ief isEmptyFunc
+	izf isZeroFunc
 }
 
 var (
@@ -31,6 +32,7 @@ type specialType int
 const (
 	specialTypeNone specialType = iota
 	specialTypeUnmarshalerIface
+	specialTypeUnexportedUnmarshalerIface
 	specialTypeEmptyIface
 	specialTypeIface
 	specialTypeTag
@@ -50,7 +52,7 @@ type typeInfo struct {
 func newTypeInfo(t reflect.Type) *typeInfo {
 	tInfo := typeInfo{typ: t, kind: t.Kind()}
 
-	for t.Kind() == reflect.Ptr {
+	for t.Kind() == reflect.Pointer {
 		t = t.Elem()
 	}
 
@@ -69,7 +71,9 @@ func newTypeInfo(t reflect.Type) *typeInfo {
 		tInfo.spclType = specialTypeTag
 	} else if t == typeTime {
 		tInfo.spclType = specialTypeTime
-	} else if reflect.PtrTo(t).Implements(typeUnmarshaler) {
+	} else if reflect.PointerTo(t).Implements(typeUnexportedUnmarshaler) {
+		tInfo.spclType = specialTypeUnexportedUnmarshalerIface
+	} else if reflect.PointerTo(t).Implements(typeUnmarshaler) {
 		tInfo.spclType = specialTypeUnmarshalerIface
 	}
 
@@ -237,7 +241,7 @@ func getEncodingStructType(t reflect.Type) (*encodingStructType, error) {
 	e := getEncodeBuffer()
 	for i := 0; i < len(flds); i++ {
 		// Get field's encodeFunc
-		flds[i].ef, flds[i].ief = getEncodeFunc(flds[i].typ)
+		flds[i].ef, flds[i].ief, flds[i].izf = getEncodeFunc(flds[i].typ)
 		if flds[i].ef == nil {
 			err = &UnsupportedTypeError{t}
 			break
@@ -321,7 +325,7 @@ func getEncodingStructType(t reflect.Type) (*encodingStructType, error) {
 func getEncodingStructToArrayType(t reflect.Type, flds fields) (*encodingStructType, error) {
 	for i := 0; i < len(flds); i++ {
 		// Get field's encodeFunc
-		flds[i].ef, flds[i].ief = getEncodeFunc(flds[i].typ)
+		flds[i].ef, flds[i].ief, flds[i].izf = getEncodeFunc(flds[i].typ)
 		if flds[i].ef == nil {
 			structType := &encodingStructType{err: &UnsupportedTypeError{t}}
 			encodingStructTypeCache.Store(t, structType)
@@ -337,14 +341,14 @@ func getEncodingStructToArrayType(t reflect.Type, flds fields) (*encodingStructT
 	return structType, structType.err
 }
 
-func getEncodeFunc(t reflect.Type) (encodeFunc, isEmptyFunc) {
+func getEncodeFunc(t reflect.Type) (encodeFunc, isEmptyFunc, isZeroFunc) {
 	if v, _ := encodeFuncCache.Load(t); v != nil {
 		fs := v.(encodeFuncs)
-		return fs.ef, fs.ief
+		return fs.ef, fs.ief, fs.izf
 	}
-	ef, ief := getEncodeFuncInternal(t)
-	encodeFuncCache.Store(t, encodeFuncs{ef, ief})
-	return ef, ief
+	ef, ief, izf := getEncodeFuncInternal(t)
+	encodeFuncCache.Store(t, encodeFuncs{ef, ief, izf})
+	return ef, ief, izf
 }
 
 func getTypeInfo(t reflect.Type) *typeInfo {
diff --git a/vendor/github.com/fxamacker/cbor/v2/decode.go b/vendor/github.com/fxamacker/cbor/v2/decode.go
index 85842ac7367cc93b438dba3fc743d962b035c8cb..3c1c168f381fe12d1ab50cde419e7d7c7636dd36 100644
--- a/vendor/github.com/fxamacker/cbor/v2/decode.go
+++ b/vendor/github.com/fxamacker/cbor/v2/decode.go
@@ -104,7 +104,7 @@ import (
 // if there are any remaining bytes following the first valid CBOR data item.
 // See UnmarshalFirst, if you want to unmarshal only the first
 // CBOR data item without ExtraneousDataError caused by remaining bytes.
-func Unmarshal(data []byte, v interface{}) error {
+func Unmarshal(data []byte, v any) error {
 	return defaultDecMode.Unmarshal(data, v)
 }
 
@@ -114,7 +114,7 @@ func Unmarshal(data []byte, v interface{}) error {
 // If v is nil, not a pointer, or a nil pointer, UnmarshalFirst returns an error.
 //
 // See the documentation for Unmarshal for details.
-func UnmarshalFirst(data []byte, v interface{}) (rest []byte, err error) {
+func UnmarshalFirst(data []byte, v any) (rest []byte, err error) {
 	return defaultDecMode.UnmarshalFirst(data, v)
 }
 
@@ -151,6 +151,10 @@ type Unmarshaler interface {
 	UnmarshalCBOR([]byte) error
 }
 
+type unmarshaler interface {
+	unmarshalCBOR([]byte) error
+}
+
 // InvalidUnmarshalError describes an invalid argument passed to Unmarshal.
 type InvalidUnmarshalError struct {
 	s string
@@ -193,7 +197,7 @@ func (e *InvalidMapKeyTypeError) Error() string {
 
 // DupMapKeyError describes detected duplicate map key in CBOR map.
 type DupMapKeyError struct {
-	Key   interface{}
+	Key   any
 	Index int
 }
 
@@ -1130,7 +1134,7 @@ type DecMode interface {
 	// Unmarshal returns an error.
 	//
 	// See the documentation for Unmarshal for details.
-	Unmarshal(data []byte, v interface{}) error
+	Unmarshal(data []byte, v any) error
 
 	// UnmarshalFirst parses the first CBOR data item into the value pointed to by v
 	// using the decoding mode.  Any remaining bytes are returned in rest.
@@ -1138,7 +1142,7 @@ type DecMode interface {
 	// If v is nil, not a pointer, or a nil pointer, UnmarshalFirst returns an error.
 	//
 	// See the documentation for Unmarshal for details.
-	UnmarshalFirst(data []byte, v interface{}) (rest []byte, err error)
+	UnmarshalFirst(data []byte, v any) (rest []byte, err error)
 
 	// Valid checks whether data is a well-formed encoded CBOR data item and
 	// that it complies with configurable restrictions such as MaxNestedLevels,
@@ -1245,7 +1249,7 @@ func (dm *decMode) DecOptions() DecOptions {
 // Unmarshal returns an error.
 //
 // See the documentation for Unmarshal for details.
-func (dm *decMode) Unmarshal(data []byte, v interface{}) error {
+func (dm *decMode) Unmarshal(data []byte, v any) error {
 	d := decoder{data: data, dm: dm}
 
 	// Check well-formedness.
@@ -1265,7 +1269,7 @@ func (dm *decMode) Unmarshal(data []byte, v interface{}) error {
 // If v is nil, not a pointer, or a nil pointer, UnmarshalFirst returns an error.
 //
 // See the documentation for Unmarshal for details.
-func (dm *decMode) UnmarshalFirst(data []byte, v interface{}) (rest []byte, err error) {
+func (dm *decMode) UnmarshalFirst(data []byte, v any) (rest []byte, err error) {
 	d := decoder{data: data, dm: dm}
 
 	// check well-formedness.
@@ -1341,13 +1345,13 @@ type decoder struct {
 // If CBOR data item fails to be decoded into v,
 // error is returned and offset is moved to the next CBOR data item.
 // Precondition: d.data contains at least one well-formed CBOR data item.
-func (d *decoder) value(v interface{}) error {
+func (d *decoder) value(v any) error {
 	// v can't be nil, non-pointer, or nil pointer value.
 	if v == nil {
 		return &InvalidUnmarshalError{"cbor: Unmarshal(nil)"}
 	}
 	rv := reflect.ValueOf(v)
-	if rv.Kind() != reflect.Ptr {
+	if rv.Kind() != reflect.Pointer {
 		return &InvalidUnmarshalError{"cbor: Unmarshal(non-pointer " + rv.Type().String() + ")"}
 	} else if rv.IsNil() {
 		return &InvalidUnmarshalError{"cbor: Unmarshal(nil " + rv.Type().String() + ")"}
@@ -1361,7 +1365,7 @@ func (d *decoder) value(v interface{}) error {
 func (d *decoder) parseToValue(v reflect.Value, tInfo *typeInfo) error { //nolint:gocyclo
 
 	// Decode CBOR nil or CBOR undefined to pointer value by setting pointer value to nil.
-	if d.nextCBORNil() && v.Kind() == reflect.Ptr {
+	if d.nextCBORNil() && v.Kind() == reflect.Pointer {
 		d.skip()
 		v.Set(reflect.Zero(v.Type()))
 		return nil
@@ -1387,7 +1391,7 @@ func (d *decoder) parseToValue(v reflect.Value, tInfo *typeInfo) error { //nolin
 				registeredType := d.dm.tags.getTypeFromTagNum(tagNums)
 				if registeredType != nil {
 					if registeredType.Implements(tInfo.nonPtrType) ||
-						reflect.PtrTo(registeredType).Implements(tInfo.nonPtrType) {
+						reflect.PointerTo(registeredType).Implements(tInfo.nonPtrType) {
 						v.Set(reflect.New(registeredType))
 						v = v.Elem()
 						tInfo = getTypeInfo(registeredType)
@@ -1399,7 +1403,7 @@ func (d *decoder) parseToValue(v reflect.Value, tInfo *typeInfo) error { //nolin
 
 	// Create new value for the pointer v to point to.
 	// At this point, CBOR value is not nil/undefined if v is a pointer.
-	for v.Kind() == reflect.Ptr {
+	for v.Kind() == reflect.Pointer {
 		if v.IsNil() {
 			if !v.CanSet() {
 				d.skip()
@@ -1460,6 +1464,9 @@ func (d *decoder) parseToValue(v reflect.Value, tInfo *typeInfo) error { //nolin
 
 		case specialTypeUnmarshalerIface:
 			return d.parseToUnmarshaler(v)
+
+		case specialTypeUnexportedUnmarshalerIface:
+			return d.parseToUnexportedUnmarshaler(v)
 		}
 	}
 
@@ -1788,12 +1795,12 @@ func (d *decoder) parseToTime() (time.Time, bool, error) {
 // parseToUnmarshaler parses CBOR data to value implementing Unmarshaler interface.
 // It assumes data is well-formed, and does not perform bounds checking.
 func (d *decoder) parseToUnmarshaler(v reflect.Value) error {
-	if d.nextCBORNil() && v.Kind() == reflect.Ptr && v.IsNil() {
+	if d.nextCBORNil() && v.Kind() == reflect.Pointer && v.IsNil() {
 		d.skip()
 		return nil
 	}
 
-	if v.Kind() != reflect.Ptr && v.CanAddr() {
+	if v.Kind() != reflect.Pointer && v.CanAddr() {
 		v = v.Addr()
 	}
 	if u, ok := v.Interface().(Unmarshaler); ok {
@@ -1805,9 +1812,29 @@ func (d *decoder) parseToUnmarshaler(v reflect.Value) error {
 	return errors.New("cbor: failed to assert " + v.Type().String() + " as cbor.Unmarshaler")
 }
 
+// parseToUnexportedUnmarshaler parses CBOR data to value implementing unmarshaler interface.
+// It assumes data is well-formed, and does not perform bounds checking.
+func (d *decoder) parseToUnexportedUnmarshaler(v reflect.Value) error {
+	if d.nextCBORNil() && v.Kind() == reflect.Pointer && v.IsNil() {
+		d.skip()
+		return nil
+	}
+
+	if v.Kind() != reflect.Pointer && v.CanAddr() {
+		v = v.Addr()
+	}
+	if u, ok := v.Interface().(unmarshaler); ok {
+		start := d.off
+		d.skip()
+		return u.unmarshalCBOR(d.data[start:d.off])
+	}
+	d.skip()
+	return errors.New("cbor: failed to assert " + v.Type().String() + " as cbor.unmarshaler")
+}
+
 // parse parses CBOR data and returns value in default Go type.
 // It assumes data is well-formed, and does not perform bounds checking.
-func (d *decoder) parse(skipSelfDescribedTag bool) (interface{}, error) { //nolint:gocyclo
+func (d *decoder) parse(skipSelfDescribedTag bool) (any, error) { //nolint:gocyclo
 	// Strip self-described CBOR tag number.
 	if skipSelfDescribedTag {
 		for d.nextCBORType() == cborTypeTag {
@@ -2224,15 +2251,15 @@ func (d *decoder) parseTextString() ([]byte, error) {
 	return b, nil
 }
 
-func (d *decoder) parseArray() ([]interface{}, error) {
+func (d *decoder) parseArray() ([]any, error) {
 	_, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag()
 	hasSize := !indefiniteLength
 	count := int(val)
 	if !hasSize {
 		count = d.numOfItemsUntilBreak() // peek ahead to get array size to preallocate slice for better performance
 	}
-	v := make([]interface{}, count)
-	var e interface{}
+	v := make([]any, count)
+	var e any
 	var err, lastErr error
 	for i := 0; (hasSize && i < count) || (!hasSize && !d.foundBreak()); i++ {
 		if e, lastErr = d.parse(true); lastErr != nil {
@@ -2298,12 +2325,12 @@ func (d *decoder) parseArrayToArray(v reflect.Value, tInfo *typeInfo) error {
 	return err
 }
 
-func (d *decoder) parseMap() (interface{}, error) {
+func (d *decoder) parseMap() (any, error) {
 	_, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag()
 	hasSize := !indefiniteLength
 	count := int(val)
-	m := make(map[interface{}]interface{})
-	var k, e interface{}
+	m := make(map[any]any)
+	var k, e any
 	var err, lastErr error
 	keyCount := 0
 	for i := 0; (hasSize && i < count) || (!hasSize && !d.foundBreak()); i++ {
@@ -2380,9 +2407,9 @@ func (d *decoder) parseMapToMap(v reflect.Value, tInfo *typeInfo) error { //noli
 	keyIsInterfaceType := keyType == typeIntf // If key type is interface{}, need to check if key value is hashable.
 	var err, lastErr error
 	keyCount := v.Len()
-	var existingKeys map[interface{}]bool // Store existing map keys, used for detecting duplicate map key.
+	var existingKeys map[any]bool // Store existing map keys, used for detecting duplicate map key.
 	if d.dm.dupMapKey == DupMapKeyEnforcedAPF {
-		existingKeys = make(map[interface{}]bool, keyCount)
+		existingKeys = make(map[any]bool, keyCount)
 		if keyCount > 0 {
 			vKeys := v.MapKeys()
 			for i := 0; i < len(vKeys); i++ {
@@ -2413,7 +2440,7 @@ func (d *decoder) parseMapToMap(v reflect.Value, tInfo *typeInfo) error { //noli
 			if !isHashableValue(keyValue.Elem()) {
 				var converted bool
 				if d.dm.mapKeyByteString == MapKeyByteStringAllowed {
-					var k interface{}
+					var k any
 					k, converted = convertByteSliceToByteString(keyValue.Elem().Interface())
 					if converted {
 						keyValue.Set(reflect.ValueOf(k))
@@ -2584,7 +2611,7 @@ func (d *decoder) parseMapToStruct(v reflect.Value, tInfo *typeInfo) error { //n
 
 	// Keeps track of CBOR map keys to detect duplicate map key
 	keyCount := 0
-	var mapKeys map[interface{}]struct{}
+	var mapKeys map[any]struct{}
 
 	errOnUnknownField := (d.dm.extraReturnErrors & ExtraDecErrorUnknownField) > 0
 
@@ -2594,7 +2621,7 @@ MapEntryLoop:
 
 		// If duplicate field detection is enabled and the key at index j did not match any
 		// field, k will hold the map key.
-		var k interface{}
+		var k any
 
 		t := d.nextCBORType()
 		if t == cborTypeTextString || (t == cborTypeByteString && d.dm.fieldNameByteString == FieldNameByteStringAllowed) {
@@ -2764,7 +2791,7 @@ MapEntryLoop:
 			// check is never reached.
 			if d.dm.dupMapKey == DupMapKeyEnforcedAPF {
 				if mapKeys == nil {
-					mapKeys = make(map[interface{}]struct{}, 1)
+					mapKeys = make(map[any]struct{}, 1)
 				}
 				mapKeys[k] = struct{}{}
 				newKeyCount := len(mapKeys)
@@ -2969,18 +2996,19 @@ func (d *decoder) nextCBORNil() bool {
 }
 
 var (
-	typeIntf              = reflect.TypeOf([]interface{}(nil)).Elem()
-	typeTime              = reflect.TypeOf(time.Time{})
-	typeBigInt            = reflect.TypeOf(big.Int{})
-	typeUnmarshaler       = reflect.TypeOf((*Unmarshaler)(nil)).Elem()
-	typeBinaryUnmarshaler = reflect.TypeOf((*encoding.BinaryUnmarshaler)(nil)).Elem()
-	typeString            = reflect.TypeOf("")
-	typeByteSlice         = reflect.TypeOf([]byte(nil))
+	typeIntf                  = reflect.TypeOf([]any(nil)).Elem()
+	typeTime                  = reflect.TypeOf(time.Time{})
+	typeBigInt                = reflect.TypeOf(big.Int{})
+	typeUnmarshaler           = reflect.TypeOf((*Unmarshaler)(nil)).Elem()
+	typeUnexportedUnmarshaler = reflect.TypeOf((*unmarshaler)(nil)).Elem()
+	typeBinaryUnmarshaler     = reflect.TypeOf((*encoding.BinaryUnmarshaler)(nil)).Elem()
+	typeString                = reflect.TypeOf("")
+	typeByteSlice             = reflect.TypeOf([]byte(nil))
 )
 
 func fillNil(_ cborType, v reflect.Value) error {
 	switch v.Kind() {
-	case reflect.Slice, reflect.Map, reflect.Interface, reflect.Ptr:
+	case reflect.Slice, reflect.Map, reflect.Interface, reflect.Pointer:
 		v.Set(reflect.Zero(v.Type()))
 		return nil
 	}
@@ -3083,7 +3111,7 @@ func fillFloat(t cborType, val float64, v reflect.Value) error {
 }
 
 func fillByteString(t cborType, val []byte, shared bool, v reflect.Value, bsts ByteStringToStringMode, bum BinaryUnmarshalerMode) error {
-	if bum == BinaryUnmarshalerByteString && reflect.PtrTo(v.Type()).Implements(typeBinaryUnmarshaler) {
+	if bum == BinaryUnmarshalerByteString && reflect.PointerTo(v.Type()).Implements(typeBinaryUnmarshaler) {
 		if v.CanAddr() {
 			v = v.Addr()
 			if u, ok := v.Interface().(encoding.BinaryUnmarshaler); ok {
@@ -3172,7 +3200,7 @@ func isHashableValue(rv reflect.Value) bool {
 // This function also handles nested tags.
 // CBOR data is already verified to be well-formed before this function is used,
 // so the recursion won't exceed max nested levels.
-func convertByteSliceToByteString(v interface{}) (interface{}, bool) {
+func convertByteSliceToByteString(v any) (any, bool) {
 	switch v := v.(type) {
 	case []byte:
 		return ByteString(v), true
diff --git a/vendor/github.com/fxamacker/cbor/v2/doc.go b/vendor/github.com/fxamacker/cbor/v2/doc.go
index 23f68b984c63b024c1f79d77f0632ea8d8a310a2..c758b7374893feaf1b94669b943e63c1831a1ee4 100644
--- a/vendor/github.com/fxamacker/cbor/v2/doc.go
+++ b/vendor/github.com/fxamacker/cbor/v2/doc.go
@@ -2,15 +2,15 @@
 // Licensed under the MIT License. See LICENSE in the project root for license information.
 
 /*
-Package cbor is a modern CBOR codec (RFC 8949 & RFC 7049) with CBOR tags,
-Go struct tags (toarray/keyasint/omitempty), Core Deterministic Encoding,
+Package cbor is a modern CBOR codec (RFC 8949 & RFC 8742) with CBOR tags,
+Go struct tag options (toarray/keyasint/omitempty/omitzero), Core Deterministic Encoding,
 CTAP2, Canonical CBOR, float64->32->16, and duplicate map key detection.
 
 Encoding options allow "preferred serialization" by encoding integers and floats
 to their smallest forms (e.g. float16) when values fit.
 
-Struct tags like "keyasint", "toarray" and "omitempty" make CBOR data smaller
-and easier to use with structs.
+Struct tag options "keyasint", "toarray", "omitempty", and "omitzero" reduce encoding size
+and reduce programming effort.
 
 For example, "toarray" tag makes struct fields encode to CBOR array elements.  And
 "keyasint" makes a field encode to an element of CBOR map with specified int key.
@@ -23,11 +23,19 @@ The Quick Start guide is at https://github.com/fxamacker/cbor#quick-start
 
 Function signatures identical to encoding/json include:
 
-	Marshal, Unmarshal, NewEncoder, NewDecoder, (*Encoder).Encode, (*Decoder).Decode.
+	Marshal, Unmarshal, NewEncoder, NewDecoder, (*Encoder).Encode, (*Decoder).Decode
 
 Standard interfaces include:
 
-	BinaryMarshaler, BinaryUnmarshaler, Marshaler, and Unmarshaler.
+	BinaryMarshaler, BinaryUnmarshaler, Marshaler, and Unmarshaler
+
+Diagnostic functions translate CBOR data item into Diagnostic Notation:
+
+	Diagnose, DiagnoseFirst
+
+Functions that simplify using CBOR Sequences (RFC 8742) include:
+
+	UnmarshalFirst
 
 Custom encoding and decoding is possible by implementing standard interfaces for
 user-defined Go types.
@@ -50,19 +58,19 @@ Modes are intended to be reused and are safe for concurrent use.
 
 EncMode and DecMode Interfaces
 
-	    // EncMode interface uses immutable options and is safe for concurrent use.
-	    type EncMode interface {
+	// EncMode interface uses immutable options and is safe for concurrent use.
+	type EncMode interface {
 		Marshal(v interface{}) ([]byte, error)
 		NewEncoder(w io.Writer) *Encoder
 		EncOptions() EncOptions  // returns copy of options
-	    }
+	}
 
-	    // DecMode interface uses immutable options and is safe for concurrent use.
-	    type DecMode interface {
+	// DecMode interface uses immutable options and is safe for concurrent use.
+	type DecMode interface {
 		Unmarshal(data []byte, v interface{}) error
 		NewDecoder(r io.Reader) *Decoder
 		DecOptions() DecOptions  // returns copy of options
-	    }
+	}
 
 Using Default Encoding Mode
 
@@ -78,6 +86,16 @@ Using Default Decoding Mode
 	decoder := cbor.NewDecoder(r)
 	err = decoder.Decode(&v)
 
+Using Default Mode of UnmarshalFirst to Decode CBOR Sequences
+
+	// Decode the first CBOR data item and return remaining bytes:
+	rest, err = cbor.UnmarshalFirst(b, &v)   // decode []byte b to v
+
+Using Extended Diagnostic Notation (EDN) to represent CBOR data
+
+	// Translate the first CBOR data item into text and return remaining bytes.
+	text, rest, err = cbor.DiagnoseFirst(b)  // decode []byte b to text
+
 Creating and Using Encoding Modes
 
 	// Create EncOptions using either struct literal or a function.
@@ -111,15 +129,20 @@ Decoding Options: https://github.com/fxamacker/cbor#decoding-options
 Struct tags like `cbor:"name,omitempty"` and `json:"name,omitempty"` work as expected.
 If both struct tags are specified then `cbor` is used.
 
-Struct tags like "keyasint", "toarray", and "omitempty" make it easy to use
+Struct tag options like "keyasint", "toarray", "omitempty", and "omitzero" make it easy to use
 very compact formats like COSE and CWT (CBOR Web Tokens) with structs.
 
+The "omitzero" option omits zero values from encoding, matching
+[stdlib encoding/json behavior](https://pkg.go.dev/encoding/json#Marshal).
+When specified in the `cbor` tag, the option is always honored.
+When specified in the `json` tag, the option is honored when building with Go 1.24+.
+
 For example, "toarray" makes struct fields encode to array elements.  And "keyasint"
 makes struct fields encode to elements of CBOR map with int keys.
 
 https://raw.githubusercontent.com/fxamacker/images/master/cbor/v2.0.0/cbor_easy_api.png
 
-Struct tags are listed at https://github.com/fxamacker/cbor#struct-tags-1
+Struct tag options are listed at https://github.com/fxamacker/cbor#struct-tags-1
 
 # Tests and Fuzzing
 
diff --git a/vendor/github.com/fxamacker/cbor/v2/encode.go b/vendor/github.com/fxamacker/cbor/v2/encode.go
index 6508e291d61809c0aff4f2d764dd1212c8b8e79b..a3f999221b8159b77a5526b02dd20801ff5a52e7 100644
--- a/vendor/github.com/fxamacker/cbor/v2/encode.go
+++ b/vendor/github.com/fxamacker/cbor/v2/encode.go
@@ -58,8 +58,10 @@ import (
 //
 // Marshal supports format string stored under the "cbor" key in the struct
 // field's tag.  CBOR format string can specify the name of the field,
-// "omitempty" and "keyasint" options, and special case "-" for field omission.
-// If "cbor" key is absent, Marshal uses "json" key.
+// "omitempty", "omitzero" and "keyasint" options, and special case "-" for
+// field omission. If "cbor" key is absent, Marshal uses "json" key.
+// When using the "json" key, the "omitzero" option is honored when building
+// with Go 1.24+ to match stdlib encoding/json behavior.
 //
 // Struct field name is treated as integer if it has "keyasint" option in
 // its format string.  The format string must specify an integer as its
@@ -67,8 +69,8 @@ import (
 //
 // Special struct field "_" is used to specify struct level options, such as
 // "toarray". "toarray" option enables Go struct to be encoded as CBOR array.
-// "omitempty" is disabled by "toarray" to ensure that the same number
-// of elements are encoded every time.
+// "omitempty" and "omitzero" are disabled by "toarray" to ensure that the
+// same number of elements are encoded every time.
 //
 // Anonymous struct fields are marshaled as if their exported fields
 // were fields in the outer struct.  Marshal follows the same struct fields
@@ -92,7 +94,7 @@ import (
 //
 // Values of other types cannot be encoded in CBOR.  Attempting
 // to encode such a value causes Marshal to return an UnsupportedTypeError.
-func Marshal(v interface{}) ([]byte, error) {
+func Marshal(v any) ([]byte, error) {
 	return defaultEncMode.Marshal(v)
 }
 
@@ -103,7 +105,7 @@ func Marshal(v interface{}) ([]byte, error) {
 // partially encoded data if error is returned.
 //
 // See Marshal for more details.
-func MarshalToBuffer(v interface{}, buf *bytes.Buffer) error {
+func MarshalToBuffer(v any, buf *bytes.Buffer) error {
 	return defaultEncMode.MarshalToBuffer(v, buf)
 }
 
@@ -773,7 +775,7 @@ func (opts EncOptions) encMode() (*encMode, error) { //nolint:gocritic // ignore
 
 // EncMode is the main interface for CBOR encoding.
 type EncMode interface {
-	Marshal(v interface{}) ([]byte, error)
+	Marshal(v any) ([]byte, error)
 	NewEncoder(w io.Writer) *Encoder
 	EncOptions() EncOptions
 }
@@ -783,7 +785,7 @@ type EncMode interface {
 // into the built-in buffer pool.
 type UserBufferEncMode interface {
 	EncMode
-	MarshalToBuffer(v interface{}, buf *bytes.Buffer) error
+	MarshalToBuffer(v any, buf *bytes.Buffer) error
 
 	// This private method is to prevent users implementing
 	// this interface and so future additions to it will
@@ -921,7 +923,7 @@ func (em *encMode) encTagBytes(t reflect.Type) []byte {
 // Marshal returns the CBOR encoding of v using em encoding mode.
 //
 // See the documentation for Marshal for details.
-func (em *encMode) Marshal(v interface{}) ([]byte, error) {
+func (em *encMode) Marshal(v any) ([]byte, error) {
 	e := getEncodeBuffer()
 
 	if err := encode(e, em, reflect.ValueOf(v)); err != nil {
@@ -943,7 +945,7 @@ func (em *encMode) Marshal(v interface{}) ([]byte, error) {
 // partially encoded data if error is returned.
 //
 // See Marshal for more details.
-func (em *encMode) MarshalToBuffer(v interface{}, buf *bytes.Buffer) error {
+func (em *encMode) MarshalToBuffer(v any, buf *bytes.Buffer) error {
 	if buf == nil {
 		return fmt.Errorf("cbor: encoding buffer provided by user is nil")
 	}
@@ -957,7 +959,7 @@ func (em *encMode) NewEncoder(w io.Writer) *Encoder {
 
 // encodeBufferPool caches unused bytes.Buffer objects for later reuse.
 var encodeBufferPool = sync.Pool{
-	New: func() interface{} {
+	New: func() any {
 		e := new(bytes.Buffer)
 		e.Grow(32) // TODO: make this configurable
 		return e
@@ -975,6 +977,7 @@ func putEncodeBuffer(e *bytes.Buffer) {
 
 type encodeFunc func(e *bytes.Buffer, em *encMode, v reflect.Value) error
 type isEmptyFunc func(em *encMode, v reflect.Value) (empty bool, err error)
+type isZeroFunc func(v reflect.Value) (zero bool, err error)
 
 func encode(e *bytes.Buffer, em *encMode, v reflect.Value) error {
 	if !v.IsValid() {
@@ -983,7 +986,7 @@ func encode(e *bytes.Buffer, em *encMode, v reflect.Value) error {
 		return nil
 	}
 	vt := v.Type()
-	f, _ := getEncodeFunc(vt)
+	f, _, _ := getEncodeFunc(vt)
 	if f == nil {
 		return &UnsupportedTypeError{vt}
 	}
@@ -1483,6 +1486,15 @@ func encodeStruct(e *bytes.Buffer, em *encMode, v reflect.Value) (err error) {
 				continue
 			}
 		}
+		if f.omitZero {
+			zero, err := f.izf(fv)
+			if err != nil {
+				return err
+			}
+			if zero {
+				continue
+			}
+		}
 
 		if !f.keyAsInt && em.fieldName == FieldNameToByteString {
 			e.Write(f.cborNameByteString)
@@ -1775,34 +1787,34 @@ var (
 	typeByteString      = reflect.TypeOf(ByteString(""))
 )
 
-func getEncodeFuncInternal(t reflect.Type) (ef encodeFunc, ief isEmptyFunc) {
+func getEncodeFuncInternal(t reflect.Type) (ef encodeFunc, ief isEmptyFunc, izf isZeroFunc) {
 	k := t.Kind()
-	if k == reflect.Ptr {
-		return getEncodeIndirectValueFunc(t), isEmptyPtr
+	if k == reflect.Pointer {
+		return getEncodeIndirectValueFunc(t), isEmptyPtr, getIsZeroFunc(t)
 	}
 	switch t {
 	case typeSimpleValue:
-		return encodeMarshalerType, isEmptyUint
+		return encodeMarshalerType, isEmptyUint, getIsZeroFunc(t)
 
 	case typeTag:
-		return encodeTag, alwaysNotEmpty
+		return encodeTag, alwaysNotEmpty, getIsZeroFunc(t)
 
 	case typeTime:
-		return encodeTime, alwaysNotEmpty
+		return encodeTime, alwaysNotEmpty, getIsZeroFunc(t)
 
 	case typeBigInt:
-		return encodeBigInt, alwaysNotEmpty
+		return encodeBigInt, alwaysNotEmpty, getIsZeroFunc(t)
 
 	case typeRawMessage:
-		return encodeMarshalerType, isEmptySlice
+		return encodeMarshalerType, isEmptySlice, getIsZeroFunc(t)
 
 	case typeByteString:
-		return encodeMarshalerType, isEmptyString
+		return encodeMarshalerType, isEmptyString, getIsZeroFunc(t)
 	}
-	if reflect.PtrTo(t).Implements(typeMarshaler) {
-		return encodeMarshalerType, alwaysNotEmpty
+	if reflect.PointerTo(t).Implements(typeMarshaler) {
+		return encodeMarshalerType, alwaysNotEmpty, getIsZeroFunc(t)
 	}
-	if reflect.PtrTo(t).Implements(typeBinaryMarshaler) {
+	if reflect.PointerTo(t).Implements(typeBinaryMarshaler) {
 		defer func() {
 			// capture encoding method used for modes that disable BinaryMarshaler
 			bme := binaryMarshalerEncoder{
@@ -1815,39 +1827,39 @@ func getEncodeFuncInternal(t reflect.Type) (ef encodeFunc, ief isEmptyFunc) {
 	}
 	switch k {
 	case reflect.Bool:
-		return encodeBool, isEmptyBool
+		return encodeBool, isEmptyBool, getIsZeroFunc(t)
 
 	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
-		return encodeInt, isEmptyInt
+		return encodeInt, isEmptyInt, getIsZeroFunc(t)
 
 	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
-		return encodeUint, isEmptyUint
+		return encodeUint, isEmptyUint, getIsZeroFunc(t)
 
 	case reflect.Float32, reflect.Float64:
-		return encodeFloat, isEmptyFloat
+		return encodeFloat, isEmptyFloat, getIsZeroFunc(t)
 
 	case reflect.String:
-		return encodeString, isEmptyString
+		return encodeString, isEmptyString, getIsZeroFunc(t)
 
 	case reflect.Slice:
 		if t.Elem().Kind() == reflect.Uint8 {
-			return encodeByteString, isEmptySlice
+			return encodeByteString, isEmptySlice, getIsZeroFunc(t)
 		}
 		fallthrough
 
 	case reflect.Array:
-		f, _ := getEncodeFunc(t.Elem())
+		f, _, _ := getEncodeFunc(t.Elem())
 		if f == nil {
-			return nil, nil
+			return nil, nil, nil
 		}
-		return arrayEncodeFunc{f: f}.encode, isEmptySlice
+		return arrayEncodeFunc{f: f}.encode, isEmptySlice, getIsZeroFunc(t)
 
 	case reflect.Map:
 		f := getEncodeMapFunc(t)
 		if f == nil {
-			return nil, nil
+			return nil, nil, nil
 		}
-		return f, isEmptyMap
+		return f, isEmptyMap, getIsZeroFunc(t)
 
 	case reflect.Struct:
 		// Get struct's special field "_" tag options
@@ -1855,31 +1867,31 @@ func getEncodeFuncInternal(t reflect.Type) (ef encodeFunc, ief isEmptyFunc) {
 			tag := f.Tag.Get("cbor")
 			if tag != "-" {
 				if hasToArrayOption(tag) {
-					return encodeStructToArray, isEmptyStruct
+					return encodeStructToArray, isEmptyStruct, isZeroFieldStruct
 				}
 			}
 		}
-		return encodeStruct, isEmptyStruct
+		return encodeStruct, isEmptyStruct, getIsZeroFunc(t)
 
 	case reflect.Interface:
-		return encodeIntf, isEmptyIntf
+		return encodeIntf, isEmptyIntf, getIsZeroFunc(t)
 	}
-	return nil, nil
+	return nil, nil, nil
 }
 
 func getEncodeIndirectValueFunc(t reflect.Type) encodeFunc {
-	for t.Kind() == reflect.Ptr {
+	for t.Kind() == reflect.Pointer {
 		t = t.Elem()
 	}
-	f, _ := getEncodeFunc(t)
+	f, _, _ := getEncodeFunc(t)
 	if f == nil {
 		return nil
 	}
 	return func(e *bytes.Buffer, em *encMode, v reflect.Value) error {
-		for v.Kind() == reflect.Ptr && !v.IsNil() {
+		for v.Kind() == reflect.Pointer && !v.IsNil() {
 			v = v.Elem()
 		}
-		if v.Kind() == reflect.Ptr && v.IsNil() {
+		if v.Kind() == reflect.Pointer && v.IsNil() {
 			e.Write(cborNil)
 			return nil
 		}
@@ -1987,3 +1999,96 @@ func float32NaNFromReflectValue(v reflect.Value) float32 {
 	f32 := p.Convert(reflect.TypeOf((*float32)(nil))).Elem().Interface().(float32)
 	return f32
 }
+
+type isZeroer interface {
+	IsZero() bool
+}
+
+var isZeroerType = reflect.TypeOf((*isZeroer)(nil)).Elem()
+
+// getIsZeroFunc returns a function for the given type that can be called to determine if a given value is zero.
+// Types that implement `IsZero() bool` are delegated to for non-nil values.
+// Types that do not implement `IsZero() bool` use the reflect.Value#IsZero() implementation.
+// The returned function matches behavior of stdlib encoding/json behavior in Go 1.24+.
+func getIsZeroFunc(t reflect.Type) isZeroFunc {
+	// Provide a function that uses a type's IsZero method if defined.
+	switch {
+	case t == nil:
+		return isZeroDefault
+	case t.Kind() == reflect.Interface && t.Implements(isZeroerType):
+		return isZeroInterfaceCustom
+	case t.Kind() == reflect.Pointer && t.Implements(isZeroerType):
+		return isZeroPointerCustom
+	case t.Implements(isZeroerType):
+		return isZeroCustom
+	case reflect.PointerTo(t).Implements(isZeroerType):
+		return isZeroAddrCustom
+	default:
+		return isZeroDefault
+	}
+}
+
+// isZeroInterfaceCustom returns true for nil or pointer-to-nil values,
+// and delegates to the custom IsZero() implementation otherwise.
+func isZeroInterfaceCustom(v reflect.Value) (bool, error) {
+	kind := v.Kind()
+
+	switch kind {
+	case reflect.Chan, reflect.Func, reflect.Map, reflect.Pointer, reflect.Interface, reflect.Slice:
+		if v.IsNil() {
+			return true, nil
+		}
+	}
+
+	switch kind {
+	case reflect.Interface, reflect.Pointer:
+		if elem := v.Elem(); elem.Kind() == reflect.Pointer && elem.IsNil() {
+			return true, nil
+		}
+	}
+
+	return v.Interface().(isZeroer).IsZero(), nil
+}
+
+// isZeroPointerCustom returns true for nil values,
+// and delegates to the custom IsZero() implementation otherwise.
+func isZeroPointerCustom(v reflect.Value) (bool, error) {
+	if v.IsNil() {
+		return true, nil
+	}
+	return v.Interface().(isZeroer).IsZero(), nil
+}
+
+// isZeroCustom delegates to the custom IsZero() implementation.
+func isZeroCustom(v reflect.Value) (bool, error) {
+	return v.Interface().(isZeroer).IsZero(), nil
+}
+
+// isZeroAddrCustom delegates to the custom IsZero() implementation of the addr of the value.
+func isZeroAddrCustom(v reflect.Value) (bool, error) {
+	if !v.CanAddr() {
+		// Temporarily box v so we can take the address.
+		v2 := reflect.New(v.Type()).Elem()
+		v2.Set(v)
+		v = v2
+	}
+	return v.Addr().Interface().(isZeroer).IsZero(), nil
+}
+
+// isZeroDefault calls reflect.Value#IsZero()
+func isZeroDefault(v reflect.Value) (bool, error) {
+	if !v.IsValid() {
+		// v is zero value
+		return true, nil
+	}
+	return v.IsZero(), nil
+}
+
+// isZeroFieldStruct is used to determine whether to omit toarray structs
+func isZeroFieldStruct(v reflect.Value) (bool, error) {
+	structType, err := getEncodingStructType(v.Type())
+	if err != nil {
+		return false, err
+	}
+	return len(structType.fields) == 0, nil
+}
diff --git a/vendor/github.com/fxamacker/cbor/v2/encode_map.go b/vendor/github.com/fxamacker/cbor/v2/encode_map.go
index 8b4b4bbc59fb20a5e5837b4ddba2de29d38f360e..2871bfdab95daef5ca7cfbcdc0191b9b81451014 100644
--- a/vendor/github.com/fxamacker/cbor/v2/encode_map.go
+++ b/vendor/github.com/fxamacker/cbor/v2/encode_map.go
@@ -1,8 +1,6 @@
 // Copyright (c) Faye Amacker. All rights reserved.
 // Licensed under the MIT License. See LICENSE in the project root for license information.
 
-//go:build go1.20
-
 package cbor
 
 import (
@@ -67,8 +65,8 @@ func (me *mapKeyValueEncodeFunc) encodeKeyValues(e *bytes.Buffer, em *encMode, v
 }
 
 func getEncodeMapFunc(t reflect.Type) encodeFunc {
-	kf, _ := getEncodeFunc(t.Key())
-	ef, _ := getEncodeFunc(t.Elem())
+	kf, _, _ := getEncodeFunc(t.Key())
+	ef, _, _ := getEncodeFunc(t.Elem())
 	if kf == nil || ef == nil {
 		return nil
 	}
@@ -76,13 +74,13 @@ func getEncodeMapFunc(t reflect.Type) encodeFunc {
 		kf: kf,
 		ef: ef,
 		kpool: sync.Pool{
-			New: func() interface{} {
+			New: func() any {
 				rk := reflect.New(t.Key()).Elem()
 				return &rk
 			},
 		},
 		vpool: sync.Pool{
-			New: func() interface{} {
+			New: func() any {
 				rv := reflect.New(t.Elem()).Elem()
 				return &rv
 			},
diff --git a/vendor/github.com/fxamacker/cbor/v2/encode_map_go117.go b/vendor/github.com/fxamacker/cbor/v2/encode_map_go117.go
deleted file mode 100644
index 31c39336ddfc8239e822b399d585ffb2670f52b3..0000000000000000000000000000000000000000
--- a/vendor/github.com/fxamacker/cbor/v2/encode_map_go117.go
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright (c) Faye Amacker. All rights reserved.
-// Licensed under the MIT License. See LICENSE in the project root for license information.
-
-//go:build !go1.20
-
-package cbor
-
-import (
-	"bytes"
-	"reflect"
-)
-
-type mapKeyValueEncodeFunc struct {
-	kf, ef encodeFunc
-}
-
-func (me *mapKeyValueEncodeFunc) encodeKeyValues(e *bytes.Buffer, em *encMode, v reflect.Value, kvs []keyValue) error {
-	if kvs == nil {
-		for i, iter := 0, v.MapRange(); iter.Next(); i++ {
-			if err := me.kf(e, em, iter.Key()); err != nil {
-				return err
-			}
-			if err := me.ef(e, em, iter.Value()); err != nil {
-				return err
-			}
-		}
-		return nil
-	}
-
-	initial := e.Len()
-	for i, iter := 0, v.MapRange(); iter.Next(); i++ {
-		offset := e.Len()
-		if err := me.kf(e, em, iter.Key()); err != nil {
-			return err
-		}
-		valueOffset := e.Len()
-		if err := me.ef(e, em, iter.Value()); err != nil {
-			return err
-		}
-		kvs[i] = keyValue{
-			offset:      offset - initial,
-			valueOffset: valueOffset - initial,
-			nextOffset:  e.Len() - initial,
-		}
-	}
-
-	return nil
-}
-
-func getEncodeMapFunc(t reflect.Type) encodeFunc {
-	kf, _ := getEncodeFunc(t.Key())
-	ef, _ := getEncodeFunc(t.Elem())
-	if kf == nil || ef == nil {
-		return nil
-	}
-	mkv := &mapKeyValueEncodeFunc{kf: kf, ef: ef}
-	return mapEncodeFunc{
-		e: mkv.encodeKeyValues,
-	}.encode
-}
diff --git a/vendor/github.com/fxamacker/cbor/v2/omitzero_go124.go b/vendor/github.com/fxamacker/cbor/v2/omitzero_go124.go
new file mode 100644
index 0000000000000000000000000000000000000000..c893a411da670fc90e93cb6fc5ad49040dc21aeb
--- /dev/null
+++ b/vendor/github.com/fxamacker/cbor/v2/omitzero_go124.go
@@ -0,0 +1,8 @@
+// Copyright (c) Faye Amacker. All rights reserved.
+// Licensed under the MIT License. See LICENSE in the project root for license information.
+
+//go:build go1.24
+
+package cbor
+
+var jsonStdlibSupportsOmitzero = true
diff --git a/vendor/github.com/fxamacker/cbor/v2/omitzero_pre_go124.go b/vendor/github.com/fxamacker/cbor/v2/omitzero_pre_go124.go
new file mode 100644
index 0000000000000000000000000000000000000000..db86a63217387e21e20beb8027d3d2a5f1688559
--- /dev/null
+++ b/vendor/github.com/fxamacker/cbor/v2/omitzero_pre_go124.go
@@ -0,0 +1,8 @@
+// Copyright (c) Faye Amacker. All rights reserved.
+// Licensed under the MIT License. See LICENSE in the project root for license information.
+
+//go:build !go1.24
+
+package cbor
+
+var jsonStdlibSupportsOmitzero = false
diff --git a/vendor/github.com/fxamacker/cbor/v2/simplevalue.go b/vendor/github.com/fxamacker/cbor/v2/simplevalue.go
index de175cee4add0af5194c1cf8f6431261fbab2305..30f72814f60d8b020c532cf3d6432a0afa6f5c3c 100644
--- a/vendor/github.com/fxamacker/cbor/v2/simplevalue.go
+++ b/vendor/github.com/fxamacker/cbor/v2/simplevalue.go
@@ -1,3 +1,6 @@
+// Copyright (c) Faye Amacker. All rights reserved.
+// Licensed under the MIT License. See LICENSE in the project root for license information.
+
 package cbor
 
 import (
@@ -45,6 +48,9 @@ func (sv SimpleValue) MarshalCBOR() ([]byte, error) {
 }
 
 // UnmarshalCBOR decodes CBOR simple value (major type 7) to SimpleValue.
+//
+// Deprecated: No longer used by this codec; kept for compatibility
+// with user apps that directly call this function.
 func (sv *SimpleValue) UnmarshalCBOR(data []byte) error {
 	if sv == nil {
 		return errors.New("cbor.SimpleValue: UnmarshalCBOR on nil pointer")
@@ -52,6 +58,29 @@ func (sv *SimpleValue) UnmarshalCBOR(data []byte) error {
 
 	d := decoder{data: data, dm: defaultDecMode}
 
+	// Check well-formedness of CBOR data item.
+	// SimpleValue.UnmarshalCBOR() is exported, so
+	// the codec needs to support same behavior for:
+	// - Unmarshal(data, *SimpleValue)
+	// - SimpleValue.UnmarshalCBOR(data)
+	err := d.wellformed(false, false)
+	if err != nil {
+		return err
+	}
+
+	return sv.unmarshalCBOR(data)
+}
+
+// unmarshalCBOR decodes CBOR simple value (major type 7) to SimpleValue.
+// This function assumes data is well-formed, and does not perform bounds checking.
+// This function is called by Unmarshal().
+func (sv *SimpleValue) unmarshalCBOR(data []byte) error {
+	if sv == nil {
+		return errors.New("cbor.SimpleValue: UnmarshalCBOR on nil pointer")
+	}
+
+	d := decoder{data: data, dm: defaultDecMode}
+
 	typ, ai, val := d.getHead()
 
 	if typ != cborTypePrimitives {
diff --git a/vendor/github.com/fxamacker/cbor/v2/stream.go b/vendor/github.com/fxamacker/cbor/v2/stream.go
index 507ab6c184948a94d87ce5a07659e8d7b67fe59d..7ac6d7d671295e10f68eb815a670faffb024848c 100644
--- a/vendor/github.com/fxamacker/cbor/v2/stream.go
+++ b/vendor/github.com/fxamacker/cbor/v2/stream.go
@@ -26,7 +26,7 @@ func NewDecoder(r io.Reader) *Decoder {
 }
 
 // Decode reads CBOR value and decodes it into the value pointed to by v.
-func (dec *Decoder) Decode(v interface{}) error {
+func (dec *Decoder) Decode(v any) error {
 	_, err := dec.readNext()
 	if err != nil {
 		// Return validation error or read error.
@@ -170,7 +170,7 @@ func NewEncoder(w io.Writer) *Encoder {
 }
 
 // Encode writes the CBOR encoding of v.
-func (enc *Encoder) Encode(v interface{}) error {
+func (enc *Encoder) Encode(v any) error {
 	if len(enc.indefTypes) > 0 && v != nil {
 		indefType := enc.indefTypes[len(enc.indefTypes)-1]
 		if indefType == cborTypeTextString {
diff --git a/vendor/github.com/fxamacker/cbor/v2/structfields.go b/vendor/github.com/fxamacker/cbor/v2/structfields.go
index 81228acf0fdceb5cae4aaf2761bcccf1cbd2816f..593508d9ceb70968f3a548a37e2b1fcfd57c966d 100644
--- a/vendor/github.com/fxamacker/cbor/v2/structfields.go
+++ b/vendor/github.com/fxamacker/cbor/v2/structfields.go
@@ -18,9 +18,11 @@ type field struct {
 	typ                reflect.Type
 	ef                 encodeFunc
 	ief                isEmptyFunc
+	izf                isZeroFunc
 	typInfo            *typeInfo // used to decoder to reuse type info
 	tagged             bool      // used to choose dominant field (at the same level tagged fields dominate untagged fields)
 	omitEmpty          bool      // used to skip empty field
+	omitZero           bool      // used to skip zero field
 	keyAsInt           bool      // used to encode/decode field name as int
 }
 
@@ -157,7 +159,7 @@ func appendFields(
 		f := t.Field(i)
 
 		ft := f.Type
-		for ft.Kind() == reflect.Ptr {
+		for ft.Kind() == reflect.Pointer {
 			ft = ft.Elem()
 		}
 
@@ -165,9 +167,11 @@ func appendFields(
 			continue
 		}
 
+		cborTag := true
 		tag := f.Tag.Get("cbor")
 		if tag == "" {
 			tag = f.Tag.Get("json")
+			cborTag = false
 		}
 		if tag == "-" {
 			continue
@@ -177,7 +181,7 @@ func appendFields(
 
 		// Parse field tag options
 		var tagFieldName string
-		var omitempty, keyasint bool
+		var omitempty, omitzero, keyasint bool
 		for j := 0; tag != ""; j++ {
 			var token string
 			idx := strings.IndexByte(tag, ',')
@@ -192,6 +196,10 @@ func appendFields(
 				switch token {
 				case "omitempty":
 					omitempty = true
+				case "omitzero":
+					if cborTag || jsonStdlibSupportsOmitzero {
+						omitzero = true
+					}
 				case "keyasint":
 					keyasint = true
 				}
@@ -213,6 +221,7 @@ func appendFields(
 				idx:       fIdx,
 				typ:       f.Type,
 				omitEmpty: omitempty,
+				omitZero:  omitzero,
 				keyAsInt:  keyasint,
 				tagged:    tagged})
 		} else {
@@ -244,7 +253,7 @@ func getFieldValue(v reflect.Value, idx []int, f embeddedFieldNullPtrFunc) (fv r
 		fv = fv.Field(n)
 
 		if i < len(idx)-1 {
-			if fv.Kind() == reflect.Ptr && fv.Type().Elem().Kind() == reflect.Struct {
+			if fv.Kind() == reflect.Pointer && fv.Type().Elem().Kind() == reflect.Struct {
 				if fv.IsNil() {
 					// Null pointer to embedded struct field
 					fv, err = f(fv)
diff --git a/vendor/github.com/fxamacker/cbor/v2/tag.go b/vendor/github.com/fxamacker/cbor/v2/tag.go
index 5c4d2b7a42fc5866e89c423266b52fe5425f5760..47bcca801815719dadbec2567871231f740600a5 100644
--- a/vendor/github.com/fxamacker/cbor/v2/tag.go
+++ b/vendor/github.com/fxamacker/cbor/v2/tag.go
@@ -1,3 +1,6 @@
+// Copyright (c) Faye Amacker. All rights reserved.
+// Licensed under the MIT License. See LICENSE in the project root for license information.
+
 package cbor
 
 import (
@@ -12,7 +15,7 @@ import (
 // enclosed data item if it were to appear outside of a tag.
 type Tag struct {
 	Number  uint64
-	Content interface{}
+	Content any
 }
 
 // RawTag represents CBOR tag data, including tag number and raw tag content.
@@ -23,11 +26,37 @@ type RawTag struct {
 }
 
 // UnmarshalCBOR sets *t with tag number and raw tag content copied from data.
+//
+// Deprecated: No longer used by this codec; kept for compatibility
+// with user apps that directly call this function.
 func (t *RawTag) UnmarshalCBOR(data []byte) error {
 	if t == nil {
 		return errors.New("cbor.RawTag: UnmarshalCBOR on nil pointer")
 	}
 
+	d := decoder{data: data, dm: defaultDecMode}
+
+	// Check if data is a well-formed CBOR data item.
+	// RawTag.UnmarshalCBOR() is exported, so
+	// the codec needs to support same behavior for:
+	// - Unmarshal(data, *RawTag)
+	// - RawTag.UnmarshalCBOR(data)
+	err := d.wellformed(false, false)
+	if err != nil {
+		return err
+	}
+
+	return t.unmarshalCBOR(data)
+}
+
+// unmarshalCBOR sets *t with tag number and raw tag content copied from data.
+// This function assumes data is well-formed, and does not perform bounds checking.
+// This function is called by Unmarshal().
+func (t *RawTag) unmarshalCBOR(data []byte) error {
+	if t == nil {
+		return errors.New("cbor.RawTag: UnmarshalCBOR on nil pointer")
+	}
+
 	// Decoding CBOR null and undefined to cbor.RawTag is no-op.
 	if len(data) == 1 && (data[0] == 0xf6 || data[0] == 0xf7) {
 		return nil
@@ -193,7 +222,7 @@ func (t *syncTagSet) Add(opts TagOptions, contentType reflect.Type, num uint64,
 	if contentType == nil {
 		return errors.New("cbor: cannot add nil content type to TagSet")
 	}
-	for contentType.Kind() == reflect.Ptr {
+	for contentType.Kind() == reflect.Pointer {
 		contentType = contentType.Elem()
 	}
 	tag, err := newTagItem(opts, contentType, num, nestedNum...)
@@ -216,7 +245,7 @@ func (t *syncTagSet) Add(opts TagOptions, contentType reflect.Type, num uint64,
 
 // Remove removes given tag content type from TagSet.
 func (t *syncTagSet) Remove(contentType reflect.Type) {
-	for contentType.Kind() == reflect.Ptr {
+	for contentType.Kind() == reflect.Pointer {
 		contentType = contentType.Elem()
 	}
 	t.Lock()
diff --git a/vendor/github.com/go-logfmt/logfmt/CHANGELOG.md b/vendor/github.com/go-logfmt/logfmt/CHANGELOG.md
index 1a9a27bcf6e5c6362f4774e49085969c93fbced9..8f349c4b8f5ebb466f0b5e35501fafa295d319da 100644
--- a/vendor/github.com/go-logfmt/logfmt/CHANGELOG.md
+++ b/vendor/github.com/go-logfmt/logfmt/CHANGELOG.md
@@ -1,48 +1,82 @@
 # Changelog
+
 All notable changes to this project will be documented in this file.
 
 The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
 and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
 
+## [0.6.0] - 2023-01-30
+
+[0.6.0]: https://github.com/go-logfmt/logfmt/compare/v0.5.1...v0.6.0
+
+### Added
+
+- NewDecoderSize by [@alexanderjophus]
+
+## [0.5.1] - 2021-08-18
+
+[0.5.1]: https://github.com/go-logfmt/logfmt/compare/v0.5.0...v0.5.1
+
+### Changed
+
+- Update the `go.mod` file for Go 1.17 as described in the [Go 1.17 release
+  notes](https://golang.org/doc/go1.17#go-command)
+
 ## [0.5.0] - 2020-01-03
 
+[0.5.0]: https://github.com/go-logfmt/logfmt/compare/v0.4.0...v0.5.0
+
 ### Changed
+
 - Remove the dependency on github.com/kr/logfmt by [@ChrisHines]
 - Move fuzz code to github.com/go-logfmt/fuzzlogfmt by [@ChrisHines]
 
 ## [0.4.0] - 2018-11-21
 
+[0.4.0]: https://github.com/go-logfmt/logfmt/compare/v0.3.0...v0.4.0
+
 ### Added
+
 - Go module support by [@ChrisHines]
 - CHANGELOG by [@ChrisHines]
 
 ### Changed
+
 - Drop invalid runes from keys instead of returning ErrInvalidKey by [@ChrisHines]
 - On panic while printing, attempt to print panic value by [@bboreham]
 
 ## [0.3.0] - 2016-11-15
+
+[0.3.0]: https://github.com/go-logfmt/logfmt/compare/v0.2.0...v0.3.0
+
 ### Added
+
 - Pool buffers for quoted strings and byte slices by [@nussjustin]
+
 ### Fixed
+
 - Fuzz fix, quote invalid UTF-8 values by [@judwhite]
 
 ## [0.2.0] - 2016-05-08
+
+[0.2.0]: https://github.com/go-logfmt/logfmt/compare/v0.1.0...v0.2.0
+
 ### Added
+
 - Encoder.EncodeKeyvals by [@ChrisHines]
 
 ## [0.1.0] - 2016-03-28
+
+[0.1.0]: https://github.com/go-logfmt/logfmt/commits/v0.1.0
+
 ### Added
+
 - Encoder by [@ChrisHines]
 - Decoder by [@ChrisHines]
 - MarshalKeyvals by [@ChrisHines]
 
-[0.5.0]: https://github.com/go-logfmt/logfmt/compare/v0.4.0...v0.5.0
-[0.4.0]: https://github.com/go-logfmt/logfmt/compare/v0.3.0...v0.4.0
-[0.3.0]: https://github.com/go-logfmt/logfmt/compare/v0.2.0...v0.3.0
-[0.2.0]: https://github.com/go-logfmt/logfmt/compare/v0.1.0...v0.2.0
-[0.1.0]: https://github.com/go-logfmt/logfmt/commits/v0.1.0
-
 [@ChrisHines]: https://github.com/ChrisHines
 [@bboreham]: https://github.com/bboreham
 [@judwhite]: https://github.com/judwhite
 [@nussjustin]: https://github.com/nussjustin
+[@alexanderjophus]: https://github.com/alexanderjophus
diff --git a/vendor/github.com/go-logfmt/logfmt/README.md b/vendor/github.com/go-logfmt/logfmt/README.md
index 8e48fcd3ab709cb0841bc0bb25570e672aac3801..71c57944e23f884f75db49fdb244deff8bc529fe 100644
--- a/vendor/github.com/go-logfmt/logfmt/README.md
+++ b/vendor/github.com/go-logfmt/logfmt/README.md
@@ -1,20 +1,25 @@
+# logfmt
+
 [![Go Reference](https://pkg.go.dev/badge/github.com/go-logfmt/logfmt.svg)](https://pkg.go.dev/github.com/go-logfmt/logfmt)
 [![Go Report Card](https://goreportcard.com/badge/go-logfmt/logfmt)](https://goreportcard.com/report/go-logfmt/logfmt)
 [![Github Actions](https://github.com/go-logfmt/logfmt/actions/workflows/test.yml/badge.svg)](https://github.com/go-logfmt/logfmt/actions/workflows/test.yml)
-[![Coverage Status](https://coveralls.io/repos/github/go-logfmt/logfmt/badge.svg?branch=master)](https://coveralls.io/github/go-logfmt/logfmt?branch=master)
-
-# logfmt
+[![Coverage Status](https://coveralls.io/repos/github/go-logfmt/logfmt/badge.svg?branch=master)](https://coveralls.io/github/go-logfmt/logfmt?branch=main)
 
 Package logfmt implements utilities to marshal and unmarshal data in the [logfmt
-format](https://brandur.org/logfmt). It provides an API similar to
-[encoding/json](http://golang.org/pkg/encoding/json/) and
-[encoding/xml](http://golang.org/pkg/encoding/xml/).
+format][fmt]. It provides an API similar to [encoding/json][json] and
+[encoding/xml][xml].
+
+[fmt]: https://brandur.org/logfmt
+[json]: https://pkg.go.dev/encoding/json
+[xml]: https://pkg.go.dev/encoding/xml
 
 The logfmt format was first documented by Brandur Leach in [this
-article](https://brandur.org/logfmt). The format has not been formally
-standardized. The most authoritative public specification to date has been the
-documentation of a Go Language [package](http://godoc.org/github.com/kr/logfmt)
-written by Blake Mizerany and Keith Rarick.
+article][origin]. The format has not been formally standardized. The most
+authoritative public specification to date has been the documentation of a Go
+Language [package][parser] written by Blake Mizerany and Keith Rarick.
+
+[origin]: https://brandur.org/logfmt
+[parser]: https://pkg.go.dev/github.com/kr/logfmt
 
 ## Goals
 
@@ -30,4 +35,7 @@ standard as a goal.
 
 ## Versioning
 
-Package logfmt publishes releases via [semver](http://semver.org/) compatible Git tags prefixed with a single 'v'.
+This project publishes releases according to the Go language guidelines for
+[developing and publishing modules][pub].
+
+[pub]: https://go.dev/doc/modules/developing
diff --git a/vendor/github.com/go-logfmt/logfmt/decode.go b/vendor/github.com/go-logfmt/logfmt/decode.go
index 2013708e4857f63b2a543f140fee0e5c97d17848..a1c22dcbda95b727b3a4848967803a623adda9cd 100644
--- a/vendor/github.com/go-logfmt/logfmt/decode.go
+++ b/vendor/github.com/go-logfmt/logfmt/decode.go
@@ -29,6 +29,23 @@ func NewDecoder(r io.Reader) *Decoder {
 	return dec
 }
 
+// NewDecoderSize returns a new decoder that reads from r.
+//
+// The decoder introduces its own buffering and may read data from r beyond
+// the logfmt records requested.
+// The size argument specifies the size of the initial buffer that the
+// Decoder will use to read records from r.
+// If a log line is longer than the size argument, the Decoder will return
+// a bufio.ErrTooLong error.
+func NewDecoderSize(r io.Reader, size int) *Decoder {
+	scanner := bufio.NewScanner(r)
+	scanner.Buffer(make([]byte, 0, size), size)
+	dec := &Decoder{
+		s: scanner,
+	}
+	return dec
+}
+
 // ScanRecord advances the Decoder to the next record, which can then be
 // parsed with the ScanKeyval method. It returns false when decoding stops,
 // either by reaching the end of the input or an error. After ScanRecord
diff --git a/vendor/github.com/go-openapi/jsonpointer/.golangci.yml b/vendor/github.com/go-openapi/jsonpointer/.golangci.yml
index 22f8d21cca19b5b67ae5b5bb9d717b81bc8a6fab..d2fafb8a2bb07b8bdc3691498829cf20774225af 100644
--- a/vendor/github.com/go-openapi/jsonpointer/.golangci.yml
+++ b/vendor/github.com/go-openapi/jsonpointer/.golangci.yml
@@ -1,12 +1,6 @@
 linters-settings:
-  govet:
-    check-shadowing: true
-  golint:
-    min-confidence: 0
   gocyclo:
     min-complexity: 45
-  maligned:
-    suggest-new: true
   dupl:
     threshold: 200
   goconst:
@@ -16,7 +10,7 @@ linters-settings:
 linters:
   enable-all: true
   disable:
-    - maligned
+    - recvcheck
     - unparam
     - lll
     - gochecknoinits
@@ -29,9 +23,6 @@ linters:
     - wrapcheck
     - testpackage
     - nlreturn
-    - gomnd
-    - exhaustivestruct
-    - goerr113
     - errorlint
     - nestif
     - godot
@@ -39,7 +30,6 @@ linters:
     - paralleltest
     - tparallel
     - thelper
-    - ifshort
     - exhaustruct
     - varnamelen
     - gci
@@ -52,10 +42,15 @@ linters:
     - forcetypeassert
     - cyclop
     # deprecated linters
-    - deadcode
-    - interfacer
-    - scopelint
-    - varcheck
-    - structcheck
-    - golint
-    - nosnakecase
+    #- deadcode
+    #- interfacer
+    #- scopelint
+    #- varcheck
+    #- structcheck
+    #- golint
+    #- nosnakecase
+    #- maligned
+    #- goerr113
+    #- ifshort
+    #- gomnd
+    #- exhaustivestruct
diff --git a/vendor/github.com/go-openapi/jsonpointer/errors.go b/vendor/github.com/go-openapi/jsonpointer/errors.go
new file mode 100644
index 0000000000000000000000000000000000000000..b84343d9d74e289b64eb4c8b272f1ea376d4779a
--- /dev/null
+++ b/vendor/github.com/go-openapi/jsonpointer/errors.go
@@ -0,0 +1,18 @@
+package jsonpointer
+
+type pointerError string
+
+func (e pointerError) Error() string {
+	return string(e)
+}
+
+const (
+	// ErrPointer is an error raised by the jsonpointer package
+	ErrPointer pointerError = "JSON pointer error"
+
+	// ErrInvalidStart states that a JSON pointer must start with a separator ("/")
+	ErrInvalidStart pointerError = `JSON pointer must be empty or start with a "` + pointerSeparator
+
+	// ErrUnsupportedValueType indicates that a value of the wrong type is being set
+	ErrUnsupportedValueType pointerError = "only structs, pointers, maps and slices are supported for setting values"
+)
diff --git a/vendor/github.com/go-openapi/jsonpointer/pointer.go b/vendor/github.com/go-openapi/jsonpointer/pointer.go
index d970c7cf448e5c327861421797adcf552bb29494..a08cd68ac04e11ccbd59be733c2720c0154e484b 100644
--- a/vendor/github.com/go-openapi/jsonpointer/pointer.go
+++ b/vendor/github.com/go-openapi/jsonpointer/pointer.go
@@ -39,9 +39,6 @@ import (
 const (
 	emptyPointer     = ``
 	pointerSeparator = `/`
-
-	invalidStart = `JSON pointer must be empty or start with a "` + pointerSeparator
-	notFound     = `Can't find the pointer in the document`
 )
 
 var jsonPointableType = reflect.TypeOf(new(JSONPointable)).Elem()
@@ -80,7 +77,7 @@ func (p *Pointer) parse(jsonPointerString string) error {
 
 	if jsonPointerString != emptyPointer {
 		if !strings.HasPrefix(jsonPointerString, pointerSeparator) {
-			err = errors.New(invalidStart)
+			err = errors.Join(ErrInvalidStart, ErrPointer)
 		} else {
 			referenceTokens := strings.Split(jsonPointerString, pointerSeparator)
 			p.referenceTokens = append(p.referenceTokens, referenceTokens[1:]...)
@@ -128,7 +125,7 @@ func getSingleImpl(node any, decodedToken string, nameProvider *swag.NameProvide
 	rValue := reflect.Indirect(reflect.ValueOf(node))
 	kind := rValue.Kind()
 	if isNil(node) {
-		return nil, kind, fmt.Errorf("nil value has not field %q", decodedToken)
+		return nil, kind, fmt.Errorf("nil value has no field %q: %w", decodedToken, ErrPointer)
 	}
 
 	switch typed := node.(type) {
@@ -146,7 +143,7 @@ func getSingleImpl(node any, decodedToken string, nameProvider *swag.NameProvide
 	case reflect.Struct:
 		nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken)
 		if !ok {
-			return nil, kind, fmt.Errorf("object has no field %q", decodedToken)
+			return nil, kind, fmt.Errorf("object has no field %q: %w", decodedToken, ErrPointer)
 		}
 		fld := rValue.FieldByName(nm)
 		return fld.Interface(), kind, nil
@@ -158,7 +155,7 @@ func getSingleImpl(node any, decodedToken string, nameProvider *swag.NameProvide
 		if mv.IsValid() {
 			return mv.Interface(), kind, nil
 		}
-		return nil, kind, fmt.Errorf("object has no key %q", decodedToken)
+		return nil, kind, fmt.Errorf("object has no key %q: %w", decodedToken, ErrPointer)
 
 	case reflect.Slice:
 		tokenIndex, err := strconv.Atoi(decodedToken)
@@ -167,14 +164,14 @@ func getSingleImpl(node any, decodedToken string, nameProvider *swag.NameProvide
 		}
 		sLength := rValue.Len()
 		if tokenIndex < 0 || tokenIndex >= sLength {
-			return nil, kind, fmt.Errorf("index out of bounds array[0,%d] index '%d'", sLength-1, tokenIndex)
+			return nil, kind, fmt.Errorf("index out of bounds array[0,%d] index '%d': %w", sLength-1, tokenIndex, ErrPointer)
 		}
 
 		elem := rValue.Index(tokenIndex)
 		return elem.Interface(), kind, nil
 
 	default:
-		return nil, kind, fmt.Errorf("invalid token reference %q", decodedToken)
+		return nil, kind, fmt.Errorf("invalid token reference %q: %w", decodedToken, ErrPointer)
 	}
 
 }
@@ -194,7 +191,7 @@ func setSingleImpl(node, data any, decodedToken string, nameProvider *swag.NameP
 	case reflect.Struct:
 		nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken)
 		if !ok {
-			return fmt.Errorf("object has no field %q", decodedToken)
+			return fmt.Errorf("object has no field %q: %w", decodedToken, ErrPointer)
 		}
 		fld := rValue.FieldByName(nm)
 		if fld.IsValid() {
@@ -214,18 +211,18 @@ func setSingleImpl(node, data any, decodedToken string, nameProvider *swag.NameP
 		}
 		sLength := rValue.Len()
 		if tokenIndex < 0 || tokenIndex >= sLength {
-			return fmt.Errorf("index out of bounds array[0,%d] index '%d'", sLength, tokenIndex)
+			return fmt.Errorf("index out of bounds array[0,%d] index '%d': %w", sLength, tokenIndex, ErrPointer)
 		}
 
 		elem := rValue.Index(tokenIndex)
 		if !elem.CanSet() {
-			return fmt.Errorf("can't set slice index %s to %v", decodedToken, data)
+			return fmt.Errorf("can't set slice index %s to %v: %w", decodedToken, data, ErrPointer)
 		}
 		elem.Set(reflect.ValueOf(data))
 		return nil
 
 	default:
-		return fmt.Errorf("invalid token reference %q", decodedToken)
+		return fmt.Errorf("invalid token reference %q: %w", decodedToken, ErrPointer)
 	}
 
 }
@@ -244,7 +241,6 @@ func (p *Pointer) get(node any, nameProvider *swag.NameProvider) (any, reflect.K
 	}
 
 	for _, token := range p.referenceTokens {
-
 		decodedToken := Unescape(token)
 
 		r, knd, err := getSingleImpl(node, decodedToken, nameProvider)
@@ -264,7 +260,10 @@ func (p *Pointer) set(node, data any, nameProvider *swag.NameProvider) error {
 	knd := reflect.ValueOf(node).Kind()
 
 	if knd != reflect.Ptr && knd != reflect.Struct && knd != reflect.Map && knd != reflect.Slice && knd != reflect.Array {
-		return errors.New("only structs, pointers, maps and slices are supported for setting values")
+		return errors.Join(
+			ErrUnsupportedValueType,
+			ErrPointer,
+		)
 	}
 
 	if nameProvider == nil {
@@ -307,7 +306,7 @@ func (p *Pointer) set(node, data any, nameProvider *swag.NameProvider) error {
 		case reflect.Struct:
 			nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken)
 			if !ok {
-				return fmt.Errorf("object has no field %q", decodedToken)
+				return fmt.Errorf("object has no field %q: %w", decodedToken, ErrPointer)
 			}
 			fld := rValue.FieldByName(nm)
 			if fld.CanAddr() && fld.Kind() != reflect.Interface && fld.Kind() != reflect.Map && fld.Kind() != reflect.Slice && fld.Kind() != reflect.Ptr {
@@ -321,7 +320,7 @@ func (p *Pointer) set(node, data any, nameProvider *swag.NameProvider) error {
 			mv := rValue.MapIndex(kv)
 
 			if !mv.IsValid() {
-				return fmt.Errorf("object has no key %q", decodedToken)
+				return fmt.Errorf("object has no key %q: %w", decodedToken, ErrPointer)
 			}
 			if mv.CanAddr() && mv.Kind() != reflect.Interface && mv.Kind() != reflect.Map && mv.Kind() != reflect.Slice && mv.Kind() != reflect.Ptr {
 				node = mv.Addr().Interface()
@@ -336,7 +335,7 @@ func (p *Pointer) set(node, data any, nameProvider *swag.NameProvider) error {
 			}
 			sLength := rValue.Len()
 			if tokenIndex < 0 || tokenIndex >= sLength {
-				return fmt.Errorf("index out of bounds array[0,%d] index '%d'", sLength, tokenIndex)
+				return fmt.Errorf("index out of bounds array[0,%d] index '%d': %w", sLength, tokenIndex, ErrPointer)
 			}
 
 			elem := rValue.Index(tokenIndex)
@@ -347,7 +346,7 @@ func (p *Pointer) set(node, data any, nameProvider *swag.NameProvider) error {
 			node = elem.Interface()
 
 		default:
-			return fmt.Errorf("invalid token reference %q", decodedToken)
+			return fmt.Errorf("invalid token reference %q: %w", decodedToken, ErrPointer)
 		}
 
 	}
@@ -404,10 +403,10 @@ func (p *Pointer) Offset(document string) (int64, error) {
 					return 0, err
 				}
 			default:
-				return 0, fmt.Errorf("invalid token %#v", tk)
+				return 0, fmt.Errorf("invalid token %#v: %w", tk, ErrPointer)
 			}
 		default:
-			return 0, fmt.Errorf("invalid token %#v", tk)
+			return 0, fmt.Errorf("invalid token %#v: %w", tk, ErrPointer)
 		}
 	}
 	return offset, nil
@@ -437,16 +436,16 @@ func offsetSingleObject(dec *json.Decoder, decodedToken string) (int64, error) {
 				return offset, nil
 			}
 		default:
-			return 0, fmt.Errorf("invalid token %#v", tk)
+			return 0, fmt.Errorf("invalid token %#v: %w", tk, ErrPointer)
 		}
 	}
-	return 0, fmt.Errorf("token reference %q not found", decodedToken)
+	return 0, fmt.Errorf("token reference %q not found: %w", decodedToken, ErrPointer)
 }
 
 func offsetSingleArray(dec *json.Decoder, decodedToken string) (int64, error) {
 	idx, err := strconv.Atoi(decodedToken)
 	if err != nil {
-		return 0, fmt.Errorf("token reference %q is not a number: %v", decodedToken, err)
+		return 0, fmt.Errorf("token reference %q is not a number: %v: %w", decodedToken, err, ErrPointer)
 	}
 	var i int
 	for i = 0; i < idx && dec.More(); i++ {
@@ -470,7 +469,7 @@ func offsetSingleArray(dec *json.Decoder, decodedToken string) (int64, error) {
 	}
 
 	if !dec.More() {
-		return 0, fmt.Errorf("token reference %q not found", decodedToken)
+		return 0, fmt.Errorf("token reference %q not found: %w", decodedToken, ErrPointer)
 	}
 	return dec.InputOffset(), nil
 }
diff --git a/vendor/github.com/go-openapi/swag/.golangci.yml b/vendor/github.com/go-openapi/swag/.golangci.yml
index 80e2be0042f1cb2809ad86696f2d687d0bf11c26..d2fafb8a2bb07b8bdc3691498829cf20774225af 100644
--- a/vendor/github.com/go-openapi/swag/.golangci.yml
+++ b/vendor/github.com/go-openapi/swag/.golangci.yml
@@ -1,22 +1,17 @@
 linters-settings:
-  govet:
-    check-shadowing: true
-  golint:
-    min-confidence: 0
   gocyclo:
     min-complexity: 45
-  maligned:
-    suggest-new: true
   dupl:
     threshold: 200
   goconst:
-    min-len: 3
+    min-len: 2
     min-occurrences: 3
 
 linters:
   enable-all: true
   disable:
-    - maligned
+    - recvcheck
+    - unparam
     - lll
     - gochecknoinits
     - gochecknoglobals
@@ -28,9 +23,6 @@ linters:
     - wrapcheck
     - testpackage
     - nlreturn
-    - gomnd
-    - exhaustivestruct
-    - goerr113
     - errorlint
     - nestif
     - godot
@@ -38,7 +30,6 @@ linters:
     - paralleltest
     - tparallel
     - thelper
-    - ifshort
     - exhaustruct
     - varnamelen
     - gci
@@ -51,10 +42,15 @@ linters:
     - forcetypeassert
     - cyclop
     # deprecated linters
-    - deadcode
-    - interfacer
-    - scopelint
-    - varcheck
-    - structcheck
-    - golint
-    - nosnakecase
+    #- deadcode
+    #- interfacer
+    #- scopelint
+    #- varcheck
+    #- structcheck
+    #- golint
+    #- nosnakecase
+    #- maligned
+    #- goerr113
+    #- ifshort
+    #- gomnd
+    #- exhaustivestruct
diff --git a/vendor/github.com/go-openapi/swag/errors.go b/vendor/github.com/go-openapi/swag/errors.go
new file mode 100644
index 0000000000000000000000000000000000000000..6c67fbf92e355a07b1b99f526f97bd5d1bfc3b26
--- /dev/null
+++ b/vendor/github.com/go-openapi/swag/errors.go
@@ -0,0 +1,15 @@
+package swag
+
+type swagError string
+
+const (
+	// ErrYAML is an error raised by YAML utilities
+	ErrYAML swagError = "yaml error"
+
+	// ErrLoader is an error raised by the file loader utility
+	ErrLoader swagError = "loader error"
+)
+
+func (e swagError) Error() string {
+	return string(e)
+}
diff --git a/vendor/github.com/go-openapi/swag/json.go b/vendor/github.com/go-openapi/swag/json.go
index 7e9902ca314bb2055bc0bbbc09b2e18e956558c1..c7caa9908fea97a3889da43ee7f1f9a287ab647a 100644
--- a/vendor/github.com/go-openapi/swag/json.go
+++ b/vendor/github.com/go-openapi/swag/json.go
@@ -126,7 +126,8 @@ func ConcatJSON(blobs ...[]byte) []byte {
 			continue // don't know how to concatenate non container objects
 		}
 
-		if len(b) < 3 { // yep empty but also the last one, so closing this thing
+		const minLengthIfNotEmpty = 3
+		if len(b) < minLengthIfNotEmpty { // yep empty but also the last one, so closing this thing
 			if i == last && a > 0 {
 				if err := buf.WriteByte(closing); err != nil {
 					log.Println(err)
diff --git a/vendor/github.com/go-openapi/swag/loading.go b/vendor/github.com/go-openapi/swag/loading.go
index 783442fddf63b88bba8109ed2e5f0452562f010c..658a24b789b5c8c3f524c4e2fd41660af1900a5b 100644
--- a/vendor/github.com/go-openapi/swag/loading.go
+++ b/vendor/github.com/go-openapi/swag/loading.go
@@ -168,7 +168,7 @@ func loadHTTPBytes(timeout time.Duration) func(path string) ([]byte, error) {
 		}
 
 		if resp.StatusCode != http.StatusOK {
-			return nil, fmt.Errorf("could not access document at %q [%s] ", path, resp.Status)
+			return nil, fmt.Errorf("could not access document at %q [%s]: %w", path, resp.Status, ErrLoader)
 		}
 
 		return io.ReadAll(resp.Body)
diff --git a/vendor/github.com/go-openapi/swag/yaml.go b/vendor/github.com/go-openapi/swag/yaml.go
index f59e0259320c3745b34380636ae42fd69eac09a7..575346539ac813b6bd14190945c2173123bec213 100644
--- a/vendor/github.com/go-openapi/swag/yaml.go
+++ b/vendor/github.com/go-openapi/swag/yaml.go
@@ -16,7 +16,6 @@ package swag
 
 import (
 	"encoding/json"
-	"errors"
 	"fmt"
 	"path/filepath"
 	"reflect"
@@ -51,7 +50,7 @@ func BytesToYAMLDoc(data []byte) (interface{}, error) {
 		return nil, err
 	}
 	if document.Kind != yaml.DocumentNode || len(document.Content) != 1 || document.Content[0].Kind != yaml.MappingNode {
-		return nil, errors.New("only YAML documents that are objects are supported")
+		return nil, fmt.Errorf("only YAML documents that are objects are supported: %w", ErrYAML)
 	}
 	return &document, nil
 }
@@ -69,31 +68,32 @@ func yamlNode(root *yaml.Node) (interface{}, error) {
 	case yaml.AliasNode:
 		return yamlNode(root.Alias)
 	default:
-		return nil, fmt.Errorf("unsupported YAML node type: %v", root.Kind)
+		return nil, fmt.Errorf("unsupported YAML node type: %v: %w", root.Kind, ErrYAML)
 	}
 }
 
 func yamlDocument(node *yaml.Node) (interface{}, error) {
 	if len(node.Content) != 1 {
-		return nil, fmt.Errorf("unexpected YAML Document node content length: %d", len(node.Content))
+		return nil, fmt.Errorf("unexpected YAML Document node content length: %d: %w", len(node.Content), ErrYAML)
 	}
 	return yamlNode(node.Content[0])
 }
 
 func yamlMapping(node *yaml.Node) (interface{}, error) {
-	m := make(JSONMapSlice, len(node.Content)/2)
+	const sensibleAllocDivider = 2
+	m := make(JSONMapSlice, len(node.Content)/sensibleAllocDivider)
 
 	var j int
 	for i := 0; i < len(node.Content); i += 2 {
 		var nmi JSONMapItem
 		k, err := yamlStringScalarC(node.Content[i])
 		if err != nil {
-			return nil, fmt.Errorf("unable to decode YAML map key: %w", err)
+			return nil, fmt.Errorf("unable to decode YAML map key: %w: %w", err, ErrYAML)
 		}
 		nmi.Key = k
 		v, err := yamlNode(node.Content[i+1])
 		if err != nil {
-			return nil, fmt.Errorf("unable to process YAML map value for key %q: %w", k, err)
+			return nil, fmt.Errorf("unable to process YAML map value for key %q: %w: %w", k, err, ErrYAML)
 		}
 		nmi.Value = v
 		m[j] = nmi
@@ -109,7 +109,7 @@ func yamlSequence(node *yaml.Node) (interface{}, error) {
 
 		v, err := yamlNode(node.Content[i])
 		if err != nil {
-			return nil, fmt.Errorf("unable to decode YAML sequence value: %w", err)
+			return nil, fmt.Errorf("unable to decode YAML sequence value: %w: %w", err, ErrYAML)
 		}
 		s = append(s, v)
 	}
@@ -132,19 +132,19 @@ func yamlScalar(node *yaml.Node) (interface{}, error) {
 	case yamlBoolScalar:
 		b, err := strconv.ParseBool(node.Value)
 		if err != nil {
-			return nil, fmt.Errorf("unable to process scalar node. Got %q. Expecting bool content: %w", node.Value, err)
+			return nil, fmt.Errorf("unable to process scalar node. Got %q. Expecting bool content: %w: %w", node.Value, err, ErrYAML)
 		}
 		return b, nil
 	case yamlIntScalar:
 		i, err := strconv.ParseInt(node.Value, 10, 64)
 		if err != nil {
-			return nil, fmt.Errorf("unable to process scalar node. Got %q. Expecting integer content: %w", node.Value, err)
+			return nil, fmt.Errorf("unable to process scalar node. Got %q. Expecting integer content: %w: %w", node.Value, err, ErrYAML)
 		}
 		return i, nil
 	case yamlFloatScalar:
 		f, err := strconv.ParseFloat(node.Value, 64)
 		if err != nil {
-			return nil, fmt.Errorf("unable to process scalar node. Got %q. Expecting float content: %w", node.Value, err)
+			return nil, fmt.Errorf("unable to process scalar node. Got %q. Expecting float content: %w: %w", node.Value, err, ErrYAML)
 		}
 		return f, nil
 	case yamlTimestamp:
@@ -152,19 +152,19 @@ func yamlScalar(node *yaml.Node) (interface{}, error) {
 	case yamlNull:
 		return nil, nil //nolint:nilnil
 	default:
-		return nil, fmt.Errorf("YAML tag %q is not supported", node.LongTag())
+		return nil, fmt.Errorf("YAML tag %q is not supported: %w", node.LongTag(), ErrYAML)
 	}
 }
 
 func yamlStringScalarC(node *yaml.Node) (string, error) {
 	if node.Kind != yaml.ScalarNode {
-		return "", fmt.Errorf("expecting a string scalar but got %q", node.Kind)
+		return "", fmt.Errorf("expecting a string scalar but got %q: %w", node.Kind, ErrYAML)
 	}
 	switch node.LongTag() {
 	case yamlStringScalar, yamlIntScalar, yamlFloatScalar:
 		return node.Value, nil
 	default:
-		return "", fmt.Errorf("YAML tag %q is not supported as map key", node.LongTag())
+		return "", fmt.Errorf("YAML tag %q is not supported as map key: %w", node.LongTag(), ErrYAML)
 	}
 }
 
@@ -349,7 +349,7 @@ func json2yaml(item interface{}) (*yaml.Node, error) {
 			Value: strconv.FormatBool(val),
 		}, nil
 	default:
-		return nil, fmt.Errorf("unhandled type: %T", val)
+		return nil, fmt.Errorf("unhandled type: %T: %w", val, ErrYAML)
 	}
 }
 
@@ -416,7 +416,7 @@ func transformData(input interface{}) (out interface{}, err error) {
 		case int64:
 			return strconv.FormatInt(k, 10), nil
 		default:
-			return "", fmt.Errorf("unexpected map key type, got: %T", k)
+			return "", fmt.Errorf("unexpected map key type, got: %T: %w", k, ErrYAML)
 		}
 	}
 
diff --git a/vendor/github.com/golang/protobuf/ptypes/any.go b/vendor/github.com/golang/protobuf/ptypes/any.go
deleted file mode 100644
index fdff3fdb4cba38e7646967f4ff4f828601abd39a..0000000000000000000000000000000000000000
--- a/vendor/github.com/golang/protobuf/ptypes/any.go
+++ /dev/null
@@ -1,180 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ptypes
-
-import (
-	"fmt"
-	"strings"
-
-	"github.com/golang/protobuf/proto"
-	"google.golang.org/protobuf/reflect/protoreflect"
-	"google.golang.org/protobuf/reflect/protoregistry"
-
-	anypb "github.com/golang/protobuf/ptypes/any"
-)
-
-const urlPrefix = "type.googleapis.com/"
-
-// AnyMessageName returns the message name contained in an anypb.Any message.
-// Most type assertions should use the Is function instead.
-//
-// Deprecated: Call the any.MessageName method instead.
-func AnyMessageName(any *anypb.Any) (string, error) {
-	name, err := anyMessageName(any)
-	return string(name), err
-}
-func anyMessageName(any *anypb.Any) (protoreflect.FullName, error) {
-	if any == nil {
-		return "", fmt.Errorf("message is nil")
-	}
-	name := protoreflect.FullName(any.TypeUrl)
-	if i := strings.LastIndex(any.TypeUrl, "/"); i >= 0 {
-		name = name[i+len("/"):]
-	}
-	if !name.IsValid() {
-		return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl)
-	}
-	return name, nil
-}
-
-// MarshalAny marshals the given message m into an anypb.Any message.
-//
-// Deprecated: Call the anypb.New function instead.
-func MarshalAny(m proto.Message) (*anypb.Any, error) {
-	switch dm := m.(type) {
-	case DynamicAny:
-		m = dm.Message
-	case *DynamicAny:
-		if dm == nil {
-			return nil, proto.ErrNil
-		}
-		m = dm.Message
-	}
-	b, err := proto.Marshal(m)
-	if err != nil {
-		return nil, err
-	}
-	return &anypb.Any{TypeUrl: urlPrefix + proto.MessageName(m), Value: b}, nil
-}
-
-// Empty returns a new message of the type specified in an anypb.Any message.
-// It returns protoregistry.NotFound if the corresponding message type could not
-// be resolved in the global registry.
-//
-// Deprecated: Use protoregistry.GlobalTypes.FindMessageByName instead
-// to resolve the message name and create a new instance of it.
-func Empty(any *anypb.Any) (proto.Message, error) {
-	name, err := anyMessageName(any)
-	if err != nil {
-		return nil, err
-	}
-	mt, err := protoregistry.GlobalTypes.FindMessageByName(name)
-	if err != nil {
-		return nil, err
-	}
-	return proto.MessageV1(mt.New().Interface()), nil
-}
-
-// UnmarshalAny unmarshals the encoded value contained in the anypb.Any message
-// into the provided message m. It returns an error if the target message
-// does not match the type in the Any message or if an unmarshal error occurs.
-//
-// The target message m may be a *DynamicAny message. If the underlying message
-// type could not be resolved, then this returns protoregistry.NotFound.
-//
-// Deprecated: Call the any.UnmarshalTo method instead.
-func UnmarshalAny(any *anypb.Any, m proto.Message) error {
-	if dm, ok := m.(*DynamicAny); ok {
-		if dm.Message == nil {
-			var err error
-			dm.Message, err = Empty(any)
-			if err != nil {
-				return err
-			}
-		}
-		m = dm.Message
-	}
-
-	anyName, err := AnyMessageName(any)
-	if err != nil {
-		return err
-	}
-	msgName := proto.MessageName(m)
-	if anyName != msgName {
-		return fmt.Errorf("mismatched message type: got %q want %q", anyName, msgName)
-	}
-	return proto.Unmarshal(any.Value, m)
-}
-
-// Is reports whether the Any message contains a message of the specified type.
-//
-// Deprecated: Call the any.MessageIs method instead.
-func Is(any *anypb.Any, m proto.Message) bool {
-	if any == nil || m == nil {
-		return false
-	}
-	name := proto.MessageName(m)
-	if !strings.HasSuffix(any.TypeUrl, name) {
-		return false
-	}
-	return len(any.TypeUrl) == len(name) || any.TypeUrl[len(any.TypeUrl)-len(name)-1] == '/'
-}
-
-// DynamicAny is a value that can be passed to UnmarshalAny to automatically
-// allocate a proto.Message for the type specified in an anypb.Any message.
-// The allocated message is stored in the embedded proto.Message.
-//
-// Example:
-//
-//	var x ptypes.DynamicAny
-//	if err := ptypes.UnmarshalAny(a, &x); err != nil { ... }
-//	fmt.Printf("unmarshaled message: %v", x.Message)
-//
-// Deprecated: Use the any.UnmarshalNew method instead to unmarshal
-// the any message contents into a new instance of the underlying message.
-type DynamicAny struct{ proto.Message }
-
-func (m DynamicAny) String() string {
-	if m.Message == nil {
-		return "<nil>"
-	}
-	return m.Message.String()
-}
-func (m DynamicAny) Reset() {
-	if m.Message == nil {
-		return
-	}
-	m.Message.Reset()
-}
-func (m DynamicAny) ProtoMessage() {
-	return
-}
-func (m DynamicAny) ProtoReflect() protoreflect.Message {
-	if m.Message == nil {
-		return nil
-	}
-	return dynamicAny{proto.MessageReflect(m.Message)}
-}
-
-type dynamicAny struct{ protoreflect.Message }
-
-func (m dynamicAny) Type() protoreflect.MessageType {
-	return dynamicAnyType{m.Message.Type()}
-}
-func (m dynamicAny) New() protoreflect.Message {
-	return dynamicAnyType{m.Message.Type()}.New()
-}
-func (m dynamicAny) Interface() protoreflect.ProtoMessage {
-	return DynamicAny{proto.MessageV1(m.Message.Interface())}
-}
-
-type dynamicAnyType struct{ protoreflect.MessageType }
-
-func (t dynamicAnyType) New() protoreflect.Message {
-	return dynamicAny{t.MessageType.New()}
-}
-func (t dynamicAnyType) Zero() protoreflect.Message {
-	return dynamicAny{t.MessageType.Zero()}
-}
diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
deleted file mode 100644
index 0ef27d33deb9b04ed6cd5b8fd7bd2018150f90bd..0000000000000000000000000000000000000000
--- a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
+++ /dev/null
@@ -1,62 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: github.com/golang/protobuf/ptypes/any/any.proto
-
-package any
-
-import (
-	protoreflect "google.golang.org/protobuf/reflect/protoreflect"
-	protoimpl "google.golang.org/protobuf/runtime/protoimpl"
-	anypb "google.golang.org/protobuf/types/known/anypb"
-	reflect "reflect"
-)
-
-// Symbols defined in public import of google/protobuf/any.proto.
-
-type Any = anypb.Any
-
-var File_github_com_golang_protobuf_ptypes_any_any_proto protoreflect.FileDescriptor
-
-var file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc = []byte{
-	0x0a, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
-	0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
-	0x70, 0x65, 0x73, 0x2f, 0x61, 0x6e, 0x79, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74,
-	0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
-	0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x2b, 0x5a, 0x29,
-	0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e,
-	0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65,
-	0x73, 0x2f, 0x61, 0x6e, 0x79, 0x3b, 0x61, 0x6e, 0x79, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f,
-	0x74, 0x6f, 0x33,
-}
-
-var file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes = []interface{}{}
-var file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs = []int32{
-	0, // [0:0] is the sub-list for method output_type
-	0, // [0:0] is the sub-list for method input_type
-	0, // [0:0] is the sub-list for extension type_name
-	0, // [0:0] is the sub-list for extension extendee
-	0, // [0:0] is the sub-list for field type_name
-}
-
-func init() { file_github_com_golang_protobuf_ptypes_any_any_proto_init() }
-func file_github_com_golang_protobuf_ptypes_any_any_proto_init() {
-	if File_github_com_golang_protobuf_ptypes_any_any_proto != nil {
-		return
-	}
-	type x struct{}
-	out := protoimpl.TypeBuilder{
-		File: protoimpl.DescBuilder{
-			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
-			RawDescriptor: file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc,
-			NumEnums:      0,
-			NumMessages:   0,
-			NumExtensions: 0,
-			NumServices:   0,
-		},
-		GoTypes:           file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes,
-		DependencyIndexes: file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs,
-	}.Build()
-	File_github_com_golang_protobuf_ptypes_any_any_proto = out.File
-	file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc = nil
-	file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes = nil
-	file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs = nil
-}
diff --git a/vendor/github.com/golang/protobuf/ptypes/doc.go b/vendor/github.com/golang/protobuf/ptypes/doc.go
deleted file mode 100644
index d3c33259d28d95dc0772803c7a89c6133c63c72c..0000000000000000000000000000000000000000
--- a/vendor/github.com/golang/protobuf/ptypes/doc.go
+++ /dev/null
@@ -1,10 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package ptypes provides functionality for interacting with well-known types.
-//
-// Deprecated: Well-known types have specialized functionality directly
-// injected into the generated packages for each message type.
-// See the deprecation notice for each function for the suggested alternative.
-package ptypes
diff --git a/vendor/github.com/golang/protobuf/ptypes/duration.go b/vendor/github.com/golang/protobuf/ptypes/duration.go
deleted file mode 100644
index b2b55dd851f5d6de0a16ec7c3b43bbe2555cdb7c..0000000000000000000000000000000000000000
--- a/vendor/github.com/golang/protobuf/ptypes/duration.go
+++ /dev/null
@@ -1,76 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ptypes
-
-import (
-	"errors"
-	"fmt"
-	"time"
-
-	durationpb "github.com/golang/protobuf/ptypes/duration"
-)
-
-// Range of google.protobuf.Duration as specified in duration.proto.
-// This is about 10,000 years in seconds.
-const (
-	maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60)
-	minSeconds = -maxSeconds
-)
-
-// Duration converts a durationpb.Duration to a time.Duration.
-// Duration returns an error if dur is invalid or overflows a time.Duration.
-//
-// Deprecated: Call the dur.AsDuration and dur.CheckValid methods instead.
-func Duration(dur *durationpb.Duration) (time.Duration, error) {
-	if err := validateDuration(dur); err != nil {
-		return 0, err
-	}
-	d := time.Duration(dur.Seconds) * time.Second
-	if int64(d/time.Second) != dur.Seconds {
-		return 0, fmt.Errorf("duration: %v is out of range for time.Duration", dur)
-	}
-	if dur.Nanos != 0 {
-		d += time.Duration(dur.Nanos) * time.Nanosecond
-		if (d < 0) != (dur.Nanos < 0) {
-			return 0, fmt.Errorf("duration: %v is out of range for time.Duration", dur)
-		}
-	}
-	return d, nil
-}
-
-// DurationProto converts a time.Duration to a durationpb.Duration.
-//
-// Deprecated: Call the durationpb.New function instead.
-func DurationProto(d time.Duration) *durationpb.Duration {
-	nanos := d.Nanoseconds()
-	secs := nanos / 1e9
-	nanos -= secs * 1e9
-	return &durationpb.Duration{
-		Seconds: int64(secs),
-		Nanos:   int32(nanos),
-	}
-}
-
-// validateDuration determines whether the durationpb.Duration is valid
-// according to the definition in google/protobuf/duration.proto.
-// A valid durpb.Duration may still be too large to fit into a time.Duration
-// Note that the range of durationpb.Duration is about 10,000 years,
-// while the range of time.Duration is about 290 years.
-func validateDuration(dur *durationpb.Duration) error {
-	if dur == nil {
-		return errors.New("duration: nil Duration")
-	}
-	if dur.Seconds < minSeconds || dur.Seconds > maxSeconds {
-		return fmt.Errorf("duration: %v: seconds out of range", dur)
-	}
-	if dur.Nanos <= -1e9 || dur.Nanos >= 1e9 {
-		return fmt.Errorf("duration: %v: nanos out of range", dur)
-	}
-	// Seconds and Nanos must have the same sign, unless d.Nanos is zero.
-	if (dur.Seconds < 0 && dur.Nanos > 0) || (dur.Seconds > 0 && dur.Nanos < 0) {
-		return fmt.Errorf("duration: %v: seconds and nanos have different signs", dur)
-	}
-	return nil
-}
diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
deleted file mode 100644
index d0079ee3ef37ee265345d6391ccbc72db9feb285..0000000000000000000000000000000000000000
--- a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
+++ /dev/null
@@ -1,63 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: github.com/golang/protobuf/ptypes/duration/duration.proto
-
-package duration
-
-import (
-	protoreflect "google.golang.org/protobuf/reflect/protoreflect"
-	protoimpl "google.golang.org/protobuf/runtime/protoimpl"
-	durationpb "google.golang.org/protobuf/types/known/durationpb"
-	reflect "reflect"
-)
-
-// Symbols defined in public import of google/protobuf/duration.proto.
-
-type Duration = durationpb.Duration
-
-var File_github_com_golang_protobuf_ptypes_duration_duration_proto protoreflect.FileDescriptor
-
-var file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = []byte{
-	0x0a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
-	0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
-	0x70, 0x65, 0x73, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x64, 0x75, 0x72,
-	0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f,
-	0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72,
-	0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x35, 0x5a, 0x33, 0x67,
-	0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67,
-	0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73,
-	0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x3b, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69,
-	0x6f, 0x6e, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
-
-var file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = []interface{}{}
-var file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = []int32{
-	0, // [0:0] is the sub-list for method output_type
-	0, // [0:0] is the sub-list for method input_type
-	0, // [0:0] is the sub-list for extension type_name
-	0, // [0:0] is the sub-list for extension extendee
-	0, // [0:0] is the sub-list for field type_name
-}
-
-func init() { file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() }
-func file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() {
-	if File_github_com_golang_protobuf_ptypes_duration_duration_proto != nil {
-		return
-	}
-	type x struct{}
-	out := protoimpl.TypeBuilder{
-		File: protoimpl.DescBuilder{
-			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
-			RawDescriptor: file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc,
-			NumEnums:      0,
-			NumMessages:   0,
-			NumExtensions: 0,
-			NumServices:   0,
-		},
-		GoTypes:           file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes,
-		DependencyIndexes: file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs,
-	}.Build()
-	File_github_com_golang_protobuf_ptypes_duration_duration_proto = out.File
-	file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = nil
-	file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = nil
-	file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = nil
-}
diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp.go b/vendor/github.com/golang/protobuf/ptypes/timestamp.go
deleted file mode 100644
index 8368a3f70d383262c41321af736849234da34a83..0000000000000000000000000000000000000000
--- a/vendor/github.com/golang/protobuf/ptypes/timestamp.go
+++ /dev/null
@@ -1,112 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ptypes
-
-import (
-	"errors"
-	"fmt"
-	"time"
-
-	timestamppb "github.com/golang/protobuf/ptypes/timestamp"
-)
-
-// Range of google.protobuf.Duration as specified in timestamp.proto.
-const (
-	// Seconds field of the earliest valid Timestamp.
-	// This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
-	minValidSeconds = -62135596800
-	// Seconds field just after the latest valid Timestamp.
-	// This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
-	maxValidSeconds = 253402300800
-)
-
-// Timestamp converts a timestamppb.Timestamp to a time.Time.
-// It returns an error if the argument is invalid.
-//
-// Unlike most Go functions, if Timestamp returns an error, the first return
-// value is not the zero time.Time. Instead, it is the value obtained from the
-// time.Unix function when passed the contents of the Timestamp, in the UTC
-// locale. This may or may not be a meaningful time; many invalid Timestamps
-// do map to valid time.Times.
-//
-// A nil Timestamp returns an error. The first return value in that case is
-// undefined.
-//
-// Deprecated: Call the ts.AsTime and ts.CheckValid methods instead.
-func Timestamp(ts *timestamppb.Timestamp) (time.Time, error) {
-	// Don't return the zero value on error, because corresponds to a valid
-	// timestamp. Instead return whatever time.Unix gives us.
-	var t time.Time
-	if ts == nil {
-		t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp
-	} else {
-		t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC()
-	}
-	return t, validateTimestamp(ts)
-}
-
-// TimestampNow returns a google.protobuf.Timestamp for the current time.
-//
-// Deprecated: Call the timestamppb.Now function instead.
-func TimestampNow() *timestamppb.Timestamp {
-	ts, err := TimestampProto(time.Now())
-	if err != nil {
-		panic("ptypes: time.Now() out of Timestamp range")
-	}
-	return ts
-}
-
-// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto.
-// It returns an error if the resulting Timestamp is invalid.
-//
-// Deprecated: Call the timestamppb.New function instead.
-func TimestampProto(t time.Time) (*timestamppb.Timestamp, error) {
-	ts := &timestamppb.Timestamp{
-		Seconds: t.Unix(),
-		Nanos:   int32(t.Nanosecond()),
-	}
-	if err := validateTimestamp(ts); err != nil {
-		return nil, err
-	}
-	return ts, nil
-}
-
-// TimestampString returns the RFC 3339 string for valid Timestamps.
-// For invalid Timestamps, it returns an error message in parentheses.
-//
-// Deprecated: Call the ts.AsTime method instead,
-// followed by a call to the Format method on the time.Time value.
-func TimestampString(ts *timestamppb.Timestamp) string {
-	t, err := Timestamp(ts)
-	if err != nil {
-		return fmt.Sprintf("(%v)", err)
-	}
-	return t.Format(time.RFC3339Nano)
-}
-
-// validateTimestamp determines whether a Timestamp is valid.
-// A valid timestamp represents a time in the range [0001-01-01, 10000-01-01)
-// and has a Nanos field in the range [0, 1e9).
-//
-// If the Timestamp is valid, validateTimestamp returns nil.
-// Otherwise, it returns an error that describes the problem.
-//
-// Every valid Timestamp can be represented by a time.Time,
-// but the converse is not true.
-func validateTimestamp(ts *timestamppb.Timestamp) error {
-	if ts == nil {
-		return errors.New("timestamp: nil Timestamp")
-	}
-	if ts.Seconds < minValidSeconds {
-		return fmt.Errorf("timestamp: %v before 0001-01-01", ts)
-	}
-	if ts.Seconds >= maxValidSeconds {
-		return fmt.Errorf("timestamp: %v after 10000-01-01", ts)
-	}
-	if ts.Nanos < 0 || ts.Nanos >= 1e9 {
-		return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts)
-	}
-	return nil
-}
diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
deleted file mode 100644
index a76f80760094b974777f2aa94c4787e06bc84573..0000000000000000000000000000000000000000
--- a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
+++ /dev/null
@@ -1,64 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
-
-package timestamp
-
-import (
-	protoreflect "google.golang.org/protobuf/reflect/protoreflect"
-	protoimpl "google.golang.org/protobuf/runtime/protoimpl"
-	timestamppb "google.golang.org/protobuf/types/known/timestamppb"
-	reflect "reflect"
-)
-
-// Symbols defined in public import of google/protobuf/timestamp.proto.
-
-type Timestamp = timestamppb.Timestamp
-
-var File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto protoreflect.FileDescriptor
-
-var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = []byte{
-	0x0a, 0x3b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
-	0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
-	0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2f, 0x74, 0x69,
-	0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67,
-	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74,
-	0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x37,
-	0x5a, 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
-	0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
-	0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x3b, 0x74, 0x69,
-	0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
-	0x33,
-}
-
-var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = []interface{}{}
-var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = []int32{
-	0, // [0:0] is the sub-list for method output_type
-	0, // [0:0] is the sub-list for method input_type
-	0, // [0:0] is the sub-list for extension type_name
-	0, // [0:0] is the sub-list for extension extendee
-	0, // [0:0] is the sub-list for field type_name
-}
-
-func init() { file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() }
-func file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() {
-	if File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto != nil {
-		return
-	}
-	type x struct{}
-	out := protoimpl.TypeBuilder{
-		File: protoimpl.DescBuilder{
-			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
-			RawDescriptor: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc,
-			NumEnums:      0,
-			NumMessages:   0,
-			NumExtensions: 0,
-			NumServices:   0,
-		},
-		GoTypes:           file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes,
-		DependencyIndexes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs,
-	}.Build()
-	File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto = out.File
-	file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = nil
-	file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = nil
-	file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = nil
-}
diff --git a/vendor/github.com/golang/snappy/README b/vendor/github.com/golang/snappy/README
index cea12879a0eae937f6ecdb6243f64591c5217fef..fd191f78c7f8725956d6da198c657c812334cccd 100644
--- a/vendor/github.com/golang/snappy/README
+++ b/vendor/github.com/golang/snappy/README
@@ -1,8 +1,13 @@
 The Snappy compression format in the Go programming language.
 
-To download and install from source:
+To use as a library:
 $ go get github.com/golang/snappy
 
+To use as a binary:
+$ go install github.com/golang/snappy/cmd/snappytool@latest
+$ cat decoded | ~/go/bin/snappytool -e > encoded
+$ cat encoded | ~/go/bin/snappytool -d > decoded
+
 Unless otherwise noted, the Snappy-Go source files are distributed
 under the BSD-style license found in the LICENSE file.
 
diff --git a/vendor/github.com/golang/snappy/encode_arm64.s b/vendor/github.com/golang/snappy/encode_arm64.s
index f8d54adfc5c1db9628a677ae5d9cd036ea6865ac..f0c876a248469054771fdc31f312faf43978127d 100644
--- a/vendor/github.com/golang/snappy/encode_arm64.s
+++ b/vendor/github.com/golang/snappy/encode_arm64.s
@@ -27,7 +27,7 @@
 // The unusual register allocation of local variables, such as R10 for the
 // source pointer, matches the allocation used at the call site in encodeBlock,
 // which makes it easier to manually inline this function.
-TEXT ·emitLiteral(SB), NOSPLIT, $32-56
+TEXT ·emitLiteral(SB), NOSPLIT, $40-56
 	MOVD dst_base+0(FP), R8
 	MOVD lit_base+24(FP), R10
 	MOVD lit_len+32(FP), R3
@@ -261,7 +261,7 @@ extendMatchEnd:
 // "var table [maxTableSize]uint16" takes up 32768 bytes of stack space. An
 // extra 64 bytes, to call other functions, and an extra 64 bytes, to spill
 // local variables (registers) during calls gives 32768 + 64 + 64 = 32896.
-TEXT ·encodeBlock(SB), 0, $32896-56
+TEXT ·encodeBlock(SB), 0, $32904-56
 	MOVD dst_base+0(FP), R8
 	MOVD src_base+24(FP), R7
 	MOVD src_len+32(FP), R14
diff --git a/vendor/github.com/google/gnostic-models/compiler/extensions.go b/vendor/github.com/google/gnostic-models/compiler/extensions.go
index 250c81e8c854d6fe084df57f730fb860e5ca2a6a..16ae66faa3cb2d97a60cdeebe6aac29b1fae6dc3 100644
--- a/vendor/github.com/google/gnostic-models/compiler/extensions.go
+++ b/vendor/github.com/google/gnostic-models/compiler/extensions.go
@@ -20,8 +20,8 @@ import (
 	"os/exec"
 	"strings"
 
-	"github.com/golang/protobuf/proto"
-	"github.com/golang/protobuf/ptypes/any"
+	"google.golang.org/protobuf/proto"
+	"google.golang.org/protobuf/types/known/anypb"
 	yaml "gopkg.in/yaml.v3"
 
 	extensions "github.com/google/gnostic-models/extensions"
@@ -33,7 +33,7 @@ type ExtensionHandler struct {
 }
 
 // CallExtension calls a binary extension handler.
-func CallExtension(context *Context, in *yaml.Node, extensionName string) (handled bool, response *any.Any, err error) {
+func CallExtension(context *Context, in *yaml.Node, extensionName string) (handled bool, response *anypb.Any, err error) {
 	if context == nil || context.ExtensionHandlers == nil {
 		return false, nil, nil
 	}
@@ -50,7 +50,7 @@ func CallExtension(context *Context, in *yaml.Node, extensionName string) (handl
 	return handled, response, err
 }
 
-func (extensionHandlers *ExtensionHandler) handle(in *yaml.Node, extensionName string) (*any.Any, error) {
+func (extensionHandlers *ExtensionHandler) handle(in *yaml.Node, extensionName string) (*anypb.Any, error) {
 	if extensionHandlers.Name != "" {
 		yamlData, _ := yaml.Marshal(in)
 		request := &extensions.ExtensionHandlerRequest{
diff --git a/vendor/github.com/google/gnostic-models/extensions/extension.pb.go b/vendor/github.com/google/gnostic-models/extensions/extension.pb.go
index a71df8abecc6e191070c82c0be2669f4aa3264f5..16c40d985fd1b66c72c3857ba019de897fc23944 100644
--- a/vendor/github.com/google/gnostic-models/extensions/extension.pb.go
+++ b/vendor/github.com/google/gnostic-models/extensions/extension.pb.go
@@ -14,8 +14,8 @@
 
 // Code generated by protoc-gen-go. DO NOT EDIT.
 // versions:
-// 	protoc-gen-go v1.27.1
-// 	protoc        v3.19.3
+// 	protoc-gen-go v1.35.1
+// 	protoc        v4.23.4
 // source: extensions/extension.proto
 
 package gnostic_extension_v1
@@ -51,11 +51,9 @@ type Version struct {
 
 func (x *Version) Reset() {
 	*x = Version{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_extensions_extension_proto_msgTypes[0]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_extensions_extension_proto_msgTypes[0]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *Version) String() string {
@@ -66,7 +64,7 @@ func (*Version) ProtoMessage() {}
 
 func (x *Version) ProtoReflect() protoreflect.Message {
 	mi := &file_extensions_extension_proto_msgTypes[0]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -123,11 +121,9 @@ type ExtensionHandlerRequest struct {
 
 func (x *ExtensionHandlerRequest) Reset() {
 	*x = ExtensionHandlerRequest{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_extensions_extension_proto_msgTypes[1]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_extensions_extension_proto_msgTypes[1]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *ExtensionHandlerRequest) String() string {
@@ -138,7 +134,7 @@ func (*ExtensionHandlerRequest) ProtoMessage() {}
 
 func (x *ExtensionHandlerRequest) ProtoReflect() protoreflect.Message {
 	mi := &file_extensions_extension_proto_msgTypes[1]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -191,11 +187,9 @@ type ExtensionHandlerResponse struct {
 
 func (x *ExtensionHandlerResponse) Reset() {
 	*x = ExtensionHandlerResponse{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_extensions_extension_proto_msgTypes[2]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_extensions_extension_proto_msgTypes[2]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *ExtensionHandlerResponse) String() string {
@@ -206,7 +200,7 @@ func (*ExtensionHandlerResponse) ProtoMessage() {}
 
 func (x *ExtensionHandlerResponse) ProtoReflect() protoreflect.Message {
 	mi := &file_extensions_extension_proto_msgTypes[2]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -257,11 +251,9 @@ type Wrapper struct {
 
 func (x *Wrapper) Reset() {
 	*x = Wrapper{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_extensions_extension_proto_msgTypes[3]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_extensions_extension_proto_msgTypes[3]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *Wrapper) String() string {
@@ -272,7 +264,7 @@ func (*Wrapper) ProtoMessage() {}
 
 func (x *Wrapper) ProtoReflect() protoreflect.Message {
 	mi := &file_extensions_extension_proto_msgTypes[3]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -367,7 +359,7 @@ func file_extensions_extension_proto_rawDescGZIP() []byte {
 }
 
 var file_extensions_extension_proto_msgTypes = make([]protoimpl.MessageInfo, 4)
-var file_extensions_extension_proto_goTypes = []interface{}{
+var file_extensions_extension_proto_goTypes = []any{
 	(*Version)(nil),                  // 0: gnostic.extension.v1.Version
 	(*ExtensionHandlerRequest)(nil),  // 1: gnostic.extension.v1.ExtensionHandlerRequest
 	(*ExtensionHandlerResponse)(nil), // 2: gnostic.extension.v1.ExtensionHandlerResponse
@@ -390,56 +382,6 @@ func file_extensions_extension_proto_init() {
 	if File_extensions_extension_proto != nil {
 		return
 	}
-	if !protoimpl.UnsafeEnabled {
-		file_extensions_extension_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*Version); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_extensions_extension_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*ExtensionHandlerRequest); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_extensions_extension_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*ExtensionHandlerResponse); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_extensions_extension_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*Wrapper); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-	}
 	type x struct{}
 	out := protoimpl.TypeBuilder{
 		File: protoimpl.DescBuilder{
diff --git a/vendor/github.com/google/gnostic-models/extensions/extensions.go b/vendor/github.com/google/gnostic-models/extensions/extensions.go
index ec8afd009239ee5125e27f131df0ade165fcb30c..0768163e5ae212c803cc3d388bcbf4d6b4e943a5 100644
--- a/vendor/github.com/google/gnostic-models/extensions/extensions.go
+++ b/vendor/github.com/google/gnostic-models/extensions/extensions.go
@@ -19,8 +19,8 @@ import (
 	"log"
 	"os"
 
-	"github.com/golang/protobuf/proto"
-	"github.com/golang/protobuf/ptypes"
+	"google.golang.org/protobuf/proto"
+	"google.golang.org/protobuf/types/known/anypb"
 )
 
 type extensionHandler func(name string, yamlInput string) (bool, proto.Message, error)
@@ -54,7 +54,7 @@ func Main(handler extensionHandler) {
 		response.Errors = append(response.Errors, err.Error())
 	} else if handled {
 		response.Handled = true
-		response.Value, err = ptypes.MarshalAny(output)
+		response.Value, err = anypb.New(output)
 		if err != nil {
 			response.Errors = append(response.Errors, err.Error())
 		}
diff --git a/vendor/github.com/google/gnostic-models/openapiv2/OpenAPIv2.pb.go b/vendor/github.com/google/gnostic-models/openapiv2/OpenAPIv2.pb.go
index 65c4c913ce702ea9048f85bcf972de9b89f97d61..3b930b3de2a8a30d8e5fc96f4e9cc258f8e4bf3c 100644
--- a/vendor/github.com/google/gnostic-models/openapiv2/OpenAPIv2.pb.go
+++ b/vendor/github.com/google/gnostic-models/openapiv2/OpenAPIv2.pb.go
@@ -16,8 +16,8 @@
 
 // Code generated by protoc-gen-go. DO NOT EDIT.
 // versions:
-// 	protoc-gen-go v1.27.1
-// 	protoc        v3.19.3
+// 	protoc-gen-go v1.35.1
+// 	protoc        v4.23.4
 // source: openapiv2/OpenAPIv2.proto
 
 package openapi_v2
@@ -43,6 +43,7 @@ type AdditionalPropertiesItem struct {
 	unknownFields protoimpl.UnknownFields
 
 	// Types that are assignable to Oneof:
+	//
 	//	*AdditionalPropertiesItem_Schema
 	//	*AdditionalPropertiesItem_Boolean
 	Oneof isAdditionalPropertiesItem_Oneof `protobuf_oneof:"oneof"`
@@ -50,11 +51,9 @@ type AdditionalPropertiesItem struct {
 
 func (x *AdditionalPropertiesItem) Reset() {
 	*x = AdditionalPropertiesItem{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[0]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[0]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *AdditionalPropertiesItem) String() string {
@@ -65,7 +64,7 @@ func (*AdditionalPropertiesItem) ProtoMessage() {}
 
 func (x *AdditionalPropertiesItem) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[0]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -128,11 +127,9 @@ type Any struct {
 
 func (x *Any) Reset() {
 	*x = Any{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[1]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[1]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *Any) String() string {
@@ -143,7 +140,7 @@ func (*Any) ProtoMessage() {}
 
 func (x *Any) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[1]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -186,11 +183,9 @@ type ApiKeySecurity struct {
 
 func (x *ApiKeySecurity) Reset() {
 	*x = ApiKeySecurity{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[2]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[2]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *ApiKeySecurity) String() string {
@@ -201,7 +196,7 @@ func (*ApiKeySecurity) ProtoMessage() {}
 
 func (x *ApiKeySecurity) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[2]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -263,11 +258,9 @@ type BasicAuthenticationSecurity struct {
 
 func (x *BasicAuthenticationSecurity) Reset() {
 	*x = BasicAuthenticationSecurity{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[3]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[3]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *BasicAuthenticationSecurity) String() string {
@@ -278,7 +271,7 @@ func (*BasicAuthenticationSecurity) ProtoMessage() {}
 
 func (x *BasicAuthenticationSecurity) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[3]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -333,11 +326,9 @@ type BodyParameter struct {
 
 func (x *BodyParameter) Reset() {
 	*x = BodyParameter{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[4]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[4]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *BodyParameter) String() string {
@@ -348,7 +339,7 @@ func (*BodyParameter) ProtoMessage() {}
 
 func (x *BodyParameter) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[4]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -422,11 +413,9 @@ type Contact struct {
 
 func (x *Contact) Reset() {
 	*x = Contact{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[5]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[5]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *Contact) String() string {
@@ -437,7 +426,7 @@ func (*Contact) ProtoMessage() {}
 
 func (x *Contact) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[5]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -490,11 +479,9 @@ type Default struct {
 
 func (x *Default) Reset() {
 	*x = Default{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[6]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[6]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *Default) String() string {
@@ -505,7 +492,7 @@ func (*Default) ProtoMessage() {}
 
 func (x *Default) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[6]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -538,11 +525,9 @@ type Definitions struct {
 
 func (x *Definitions) Reset() {
 	*x = Definitions{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[7]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[7]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *Definitions) String() string {
@@ -553,7 +538,7 @@ func (*Definitions) ProtoMessage() {}
 
 func (x *Definitions) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[7]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -606,11 +591,9 @@ type Document struct {
 
 func (x *Document) Reset() {
 	*x = Document{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[8]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[8]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *Document) String() string {
@@ -621,7 +604,7 @@ func (*Document) ProtoMessage() {}
 
 func (x *Document) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[8]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -758,11 +741,9 @@ type Examples struct {
 
 func (x *Examples) Reset() {
 	*x = Examples{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[9]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[9]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *Examples) String() string {
@@ -773,7 +754,7 @@ func (*Examples) ProtoMessage() {}
 
 func (x *Examples) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[9]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -808,11 +789,9 @@ type ExternalDocs struct {
 
 func (x *ExternalDocs) Reset() {
 	*x = ExternalDocs{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[10]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[10]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *ExternalDocs) String() string {
@@ -823,7 +802,7 @@ func (*ExternalDocs) ProtoMessage() {}
 
 func (x *ExternalDocs) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[10]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -879,11 +858,9 @@ type FileSchema struct {
 
 func (x *FileSchema) Reset() {
 	*x = FileSchema{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[11]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[11]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *FileSchema) String() string {
@@ -894,7 +871,7 @@ func (*FileSchema) ProtoMessage() {}
 
 func (x *FileSchema) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[11]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -1016,11 +993,9 @@ type FormDataParameterSubSchema struct {
 
 func (x *FormDataParameterSubSchema) Reset() {
 	*x = FormDataParameterSubSchema{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[12]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[12]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *FormDataParameterSubSchema) String() string {
@@ -1031,7 +1006,7 @@ func (*FormDataParameterSubSchema) ProtoMessage() {}
 
 func (x *FormDataParameterSubSchema) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[12]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -1235,11 +1210,9 @@ type Header struct {
 
 func (x *Header) Reset() {
 	*x = Header{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[13]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[13]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *Header) String() string {
@@ -1250,7 +1223,7 @@ func (*Header) ProtoMessage() {}
 
 func (x *Header) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[13]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -1433,11 +1406,9 @@ type HeaderParameterSubSchema struct {
 
 func (x *HeaderParameterSubSchema) Reset() {
 	*x = HeaderParameterSubSchema{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[14]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[14]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *HeaderParameterSubSchema) String() string {
@@ -1448,7 +1419,7 @@ func (*HeaderParameterSubSchema) ProtoMessage() {}
 
 func (x *HeaderParameterSubSchema) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[14]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -1627,11 +1598,9 @@ type Headers struct {
 
 func (x *Headers) Reset() {
 	*x = Headers{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[15]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[15]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *Headers) String() string {
@@ -1642,7 +1611,7 @@ func (*Headers) ProtoMessage() {}
 
 func (x *Headers) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[15]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -1685,11 +1654,9 @@ type Info struct {
 
 func (x *Info) Reset() {
 	*x = Info{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[16]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[16]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *Info) String() string {
@@ -1700,7 +1667,7 @@ func (*Info) ProtoMessage() {}
 
 func (x *Info) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[16]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -1774,11 +1741,9 @@ type ItemsItem struct {
 
 func (x *ItemsItem) Reset() {
 	*x = ItemsItem{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[17]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[17]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *ItemsItem) String() string {
@@ -1789,7 +1754,7 @@ func (*ItemsItem) ProtoMessage() {}
 
 func (x *ItemsItem) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[17]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -1822,11 +1787,9 @@ type JsonReference struct {
 
 func (x *JsonReference) Reset() {
 	*x = JsonReference{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[18]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[18]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *JsonReference) String() string {
@@ -1837,7 +1800,7 @@ func (*JsonReference) ProtoMessage() {}
 
 func (x *JsonReference) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[18]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -1880,11 +1843,9 @@ type License struct {
 
 func (x *License) Reset() {
 	*x = License{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[19]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[19]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *License) String() string {
@@ -1895,7 +1856,7 @@ func (*License) ProtoMessage() {}
 
 func (x *License) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[19]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -1945,11 +1906,9 @@ type NamedAny struct {
 
 func (x *NamedAny) Reset() {
 	*x = NamedAny{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[20]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[20]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *NamedAny) String() string {
@@ -1960,7 +1919,7 @@ func (*NamedAny) ProtoMessage() {}
 
 func (x *NamedAny) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[20]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -2003,11 +1962,9 @@ type NamedHeader struct {
 
 func (x *NamedHeader) Reset() {
 	*x = NamedHeader{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[21]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[21]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *NamedHeader) String() string {
@@ -2018,7 +1975,7 @@ func (*NamedHeader) ProtoMessage() {}
 
 func (x *NamedHeader) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[21]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -2061,11 +2018,9 @@ type NamedParameter struct {
 
 func (x *NamedParameter) Reset() {
 	*x = NamedParameter{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[22]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[22]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *NamedParameter) String() string {
@@ -2076,7 +2031,7 @@ func (*NamedParameter) ProtoMessage() {}
 
 func (x *NamedParameter) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[22]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -2119,11 +2074,9 @@ type NamedPathItem struct {
 
 func (x *NamedPathItem) Reset() {
 	*x = NamedPathItem{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[23]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[23]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *NamedPathItem) String() string {
@@ -2134,7 +2087,7 @@ func (*NamedPathItem) ProtoMessage() {}
 
 func (x *NamedPathItem) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[23]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -2177,11 +2130,9 @@ type NamedResponse struct {
 
 func (x *NamedResponse) Reset() {
 	*x = NamedResponse{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[24]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[24]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *NamedResponse) String() string {
@@ -2192,7 +2143,7 @@ func (*NamedResponse) ProtoMessage() {}
 
 func (x *NamedResponse) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[24]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -2235,11 +2186,9 @@ type NamedResponseValue struct {
 
 func (x *NamedResponseValue) Reset() {
 	*x = NamedResponseValue{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[25]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[25]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *NamedResponseValue) String() string {
@@ -2250,7 +2199,7 @@ func (*NamedResponseValue) ProtoMessage() {}
 
 func (x *NamedResponseValue) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[25]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -2293,11 +2242,9 @@ type NamedSchema struct {
 
 func (x *NamedSchema) Reset() {
 	*x = NamedSchema{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[26]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[26]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *NamedSchema) String() string {
@@ -2308,7 +2255,7 @@ func (*NamedSchema) ProtoMessage() {}
 
 func (x *NamedSchema) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[26]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -2351,11 +2298,9 @@ type NamedSecurityDefinitionsItem struct {
 
 func (x *NamedSecurityDefinitionsItem) Reset() {
 	*x = NamedSecurityDefinitionsItem{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[27]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[27]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *NamedSecurityDefinitionsItem) String() string {
@@ -2366,7 +2311,7 @@ func (*NamedSecurityDefinitionsItem) ProtoMessage() {}
 
 func (x *NamedSecurityDefinitionsItem) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[27]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -2409,11 +2354,9 @@ type NamedString struct {
 
 func (x *NamedString) Reset() {
 	*x = NamedString{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[28]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[28]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *NamedString) String() string {
@@ -2424,7 +2367,7 @@ func (*NamedString) ProtoMessage() {}
 
 func (x *NamedString) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[28]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -2467,11 +2410,9 @@ type NamedStringArray struct {
 
 func (x *NamedStringArray) Reset() {
 	*x = NamedStringArray{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[29]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[29]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *NamedStringArray) String() string {
@@ -2482,7 +2423,7 @@ func (*NamedStringArray) ProtoMessage() {}
 
 func (x *NamedStringArray) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[29]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -2517,6 +2458,7 @@ type NonBodyParameter struct {
 	unknownFields protoimpl.UnknownFields
 
 	// Types that are assignable to Oneof:
+	//
 	//	*NonBodyParameter_HeaderParameterSubSchema
 	//	*NonBodyParameter_FormDataParameterSubSchema
 	//	*NonBodyParameter_QueryParameterSubSchema
@@ -2526,11 +2468,9 @@ type NonBodyParameter struct {
 
 func (x *NonBodyParameter) Reset() {
 	*x = NonBodyParameter{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[30]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[30]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *NonBodyParameter) String() string {
@@ -2541,7 +2481,7 @@ func (*NonBodyParameter) ProtoMessage() {}
 
 func (x *NonBodyParameter) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[30]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -2635,11 +2575,9 @@ type Oauth2AccessCodeSecurity struct {
 
 func (x *Oauth2AccessCodeSecurity) Reset() {
 	*x = Oauth2AccessCodeSecurity{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[31]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[31]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *Oauth2AccessCodeSecurity) String() string {
@@ -2650,7 +2588,7 @@ func (*Oauth2AccessCodeSecurity) ProtoMessage() {}
 
 func (x *Oauth2AccessCodeSecurity) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[31]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -2729,11 +2667,9 @@ type Oauth2ApplicationSecurity struct {
 
 func (x *Oauth2ApplicationSecurity) Reset() {
 	*x = Oauth2ApplicationSecurity{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[32]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[32]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *Oauth2ApplicationSecurity) String() string {
@@ -2744,7 +2680,7 @@ func (*Oauth2ApplicationSecurity) ProtoMessage() {}
 
 func (x *Oauth2ApplicationSecurity) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[32]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -2816,11 +2752,9 @@ type Oauth2ImplicitSecurity struct {
 
 func (x *Oauth2ImplicitSecurity) Reset() {
 	*x = Oauth2ImplicitSecurity{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[33]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[33]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *Oauth2ImplicitSecurity) String() string {
@@ -2831,7 +2765,7 @@ func (*Oauth2ImplicitSecurity) ProtoMessage() {}
 
 func (x *Oauth2ImplicitSecurity) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[33]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -2903,11 +2837,9 @@ type Oauth2PasswordSecurity struct {
 
 func (x *Oauth2PasswordSecurity) Reset() {
 	*x = Oauth2PasswordSecurity{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[34]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[34]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *Oauth2PasswordSecurity) String() string {
@@ -2918,7 +2850,7 @@ func (*Oauth2PasswordSecurity) ProtoMessage() {}
 
 func (x *Oauth2PasswordSecurity) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[34]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -2985,11 +2917,9 @@ type Oauth2Scopes struct {
 
 func (x *Oauth2Scopes) Reset() {
 	*x = Oauth2Scopes{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[35]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[35]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *Oauth2Scopes) String() string {
@@ -3000,7 +2930,7 @@ func (*Oauth2Scopes) ProtoMessage() {}
 
 func (x *Oauth2Scopes) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[35]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -3051,11 +2981,9 @@ type Operation struct {
 
 func (x *Operation) Reset() {
 	*x = Operation{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[36]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[36]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *Operation) String() string {
@@ -3066,7 +2994,7 @@ func (*Operation) ProtoMessage() {}
 
 func (x *Operation) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[36]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -3178,6 +3106,7 @@ type Parameter struct {
 	unknownFields protoimpl.UnknownFields
 
 	// Types that are assignable to Oneof:
+	//
 	//	*Parameter_BodyParameter
 	//	*Parameter_NonBodyParameter
 	Oneof isParameter_Oneof `protobuf_oneof:"oneof"`
@@ -3185,11 +3114,9 @@ type Parameter struct {
 
 func (x *Parameter) Reset() {
 	*x = Parameter{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[37]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[37]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *Parameter) String() string {
@@ -3200,7 +3127,7 @@ func (*Parameter) ProtoMessage() {}
 
 func (x *Parameter) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[37]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -3263,11 +3190,9 @@ type ParameterDefinitions struct {
 
 func (x *ParameterDefinitions) Reset() {
 	*x = ParameterDefinitions{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[38]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[38]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *ParameterDefinitions) String() string {
@@ -3278,7 +3203,7 @@ func (*ParameterDefinitions) ProtoMessage() {}
 
 func (x *ParameterDefinitions) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[38]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -3306,6 +3231,7 @@ type ParametersItem struct {
 	unknownFields protoimpl.UnknownFields
 
 	// Types that are assignable to Oneof:
+	//
 	//	*ParametersItem_Parameter
 	//	*ParametersItem_JsonReference
 	Oneof isParametersItem_Oneof `protobuf_oneof:"oneof"`
@@ -3313,11 +3239,9 @@ type ParametersItem struct {
 
 func (x *ParametersItem) Reset() {
 	*x = ParametersItem{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[39]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[39]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *ParametersItem) String() string {
@@ -3328,7 +3252,7 @@ func (*ParametersItem) ProtoMessage() {}
 
 func (x *ParametersItem) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[39]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -3400,11 +3324,9 @@ type PathItem struct {
 
 func (x *PathItem) Reset() {
 	*x = PathItem{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[40]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[40]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *PathItem) String() string {
@@ -3415,7 +3337,7 @@ func (*PathItem) ProtoMessage() {}
 
 func (x *PathItem) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[40]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -3535,11 +3457,9 @@ type PathParameterSubSchema struct {
 
 func (x *PathParameterSubSchema) Reset() {
 	*x = PathParameterSubSchema{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[41]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[41]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *PathParameterSubSchema) String() string {
@@ -3550,7 +3470,7 @@ func (*PathParameterSubSchema) ProtoMessage() {}
 
 func (x *PathParameterSubSchema) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[41]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -3731,11 +3651,9 @@ type Paths struct {
 
 func (x *Paths) Reset() {
 	*x = Paths{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[42]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[42]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *Paths) String() string {
@@ -3746,7 +3664,7 @@ func (*Paths) ProtoMessage() {}
 
 func (x *Paths) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[42]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -3802,11 +3720,9 @@ type PrimitivesItems struct {
 
 func (x *PrimitivesItems) Reset() {
 	*x = PrimitivesItems{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[43]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[43]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *PrimitivesItems) String() string {
@@ -3817,7 +3733,7 @@ func (*PrimitivesItems) ProtoMessage() {}
 
 func (x *PrimitivesItems) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[43]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -3968,11 +3884,9 @@ type Properties struct {
 
 func (x *Properties) Reset() {
 	*x = Properties{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[44]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[44]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *Properties) String() string {
@@ -3983,7 +3897,7 @@ func (*Properties) ProtoMessage() {}
 
 func (x *Properties) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[44]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -4042,11 +3956,9 @@ type QueryParameterSubSchema struct {
 
 func (x *QueryParameterSubSchema) Reset() {
 	*x = QueryParameterSubSchema{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[45]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[45]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *QueryParameterSubSchema) String() string {
@@ -4057,7 +3969,7 @@ func (*QueryParameterSubSchema) ProtoMessage() {}
 
 func (x *QueryParameterSubSchema) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[45]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -4247,11 +4159,9 @@ type Response struct {
 
 func (x *Response) Reset() {
 	*x = Response{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[46]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[46]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *Response) String() string {
@@ -4262,7 +4172,7 @@ func (*Response) ProtoMessage() {}
 
 func (x *Response) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[46]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -4323,11 +4233,9 @@ type ResponseDefinitions struct {
 
 func (x *ResponseDefinitions) Reset() {
 	*x = ResponseDefinitions{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[47]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[47]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *ResponseDefinitions) String() string {
@@ -4338,7 +4246,7 @@ func (*ResponseDefinitions) ProtoMessage() {}
 
 func (x *ResponseDefinitions) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[47]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -4366,6 +4274,7 @@ type ResponseValue struct {
 	unknownFields protoimpl.UnknownFields
 
 	// Types that are assignable to Oneof:
+	//
 	//	*ResponseValue_Response
 	//	*ResponseValue_JsonReference
 	Oneof isResponseValue_Oneof `protobuf_oneof:"oneof"`
@@ -4373,11 +4282,9 @@ type ResponseValue struct {
 
 func (x *ResponseValue) Reset() {
 	*x = ResponseValue{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[48]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[48]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *ResponseValue) String() string {
@@ -4388,7 +4295,7 @@ func (*ResponseValue) ProtoMessage() {}
 
 func (x *ResponseValue) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[48]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -4452,11 +4359,9 @@ type Responses struct {
 
 func (x *Responses) Reset() {
 	*x = Responses{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[49]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[49]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *Responses) String() string {
@@ -4467,7 +4372,7 @@ func (*Responses) ProtoMessage() {}
 
 func (x *Responses) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[49]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -4537,11 +4442,9 @@ type Schema struct {
 
 func (x *Schema) Reset() {
 	*x = Schema{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[50]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[50]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *Schema) String() string {
@@ -4552,7 +4455,7 @@ func (*Schema) ProtoMessage() {}
 
 func (x *Schema) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[50]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -4790,6 +4693,7 @@ type SchemaItem struct {
 	unknownFields protoimpl.UnknownFields
 
 	// Types that are assignable to Oneof:
+	//
 	//	*SchemaItem_Schema
 	//	*SchemaItem_FileSchema
 	Oneof isSchemaItem_Oneof `protobuf_oneof:"oneof"`
@@ -4797,11 +4701,9 @@ type SchemaItem struct {
 
 func (x *SchemaItem) Reset() {
 	*x = SchemaItem{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[51]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[51]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *SchemaItem) String() string {
@@ -4812,7 +4714,7 @@ func (*SchemaItem) ProtoMessage() {}
 
 func (x *SchemaItem) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[51]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -4874,11 +4776,9 @@ type SecurityDefinitions struct {
 
 func (x *SecurityDefinitions) Reset() {
 	*x = SecurityDefinitions{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[52]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[52]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *SecurityDefinitions) String() string {
@@ -4889,7 +4789,7 @@ func (*SecurityDefinitions) ProtoMessage() {}
 
 func (x *SecurityDefinitions) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[52]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -4917,6 +4817,7 @@ type SecurityDefinitionsItem struct {
 	unknownFields protoimpl.UnknownFields
 
 	// Types that are assignable to Oneof:
+	//
 	//	*SecurityDefinitionsItem_BasicAuthenticationSecurity
 	//	*SecurityDefinitionsItem_ApiKeySecurity
 	//	*SecurityDefinitionsItem_Oauth2ImplicitSecurity
@@ -4928,11 +4829,9 @@ type SecurityDefinitionsItem struct {
 
 func (x *SecurityDefinitionsItem) Reset() {
 	*x = SecurityDefinitionsItem{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[53]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[53]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *SecurityDefinitionsItem) String() string {
@@ -4943,7 +4842,7 @@ func (*SecurityDefinitionsItem) ProtoMessage() {}
 
 func (x *SecurityDefinitionsItem) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[53]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -5057,11 +4956,9 @@ type SecurityRequirement struct {
 
 func (x *SecurityRequirement) Reset() {
 	*x = SecurityRequirement{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[54]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[54]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *SecurityRequirement) String() string {
@@ -5072,7 +4969,7 @@ func (*SecurityRequirement) ProtoMessage() {}
 
 func (x *SecurityRequirement) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[54]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -5104,11 +5001,9 @@ type StringArray struct {
 
 func (x *StringArray) Reset() {
 	*x = StringArray{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[55]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[55]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *StringArray) String() string {
@@ -5119,7 +5014,7 @@ func (*StringArray) ProtoMessage() {}
 
 func (x *StringArray) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[55]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -5154,11 +5049,9 @@ type Tag struct {
 
 func (x *Tag) Reset() {
 	*x = Tag{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[56]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[56]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *Tag) String() string {
@@ -5169,7 +5062,7 @@ func (*Tag) ProtoMessage() {}
 
 func (x *Tag) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[56]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -5222,11 +5115,9 @@ type TypeItem struct {
 
 func (x *TypeItem) Reset() {
 	*x = TypeItem{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[57]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[57]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *TypeItem) String() string {
@@ -5237,7 +5128,7 @@ func (*TypeItem) ProtoMessage() {}
 
 func (x *TypeItem) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[57]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -5270,11 +5161,9 @@ type VendorExtension struct {
 
 func (x *VendorExtension) Reset() {
 	*x = VendorExtension{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[58]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[58]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *VendorExtension) String() string {
@@ -5285,7 +5174,7 @@ func (*VendorExtension) ProtoMessage() {}
 
 func (x *VendorExtension) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[58]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -5322,11 +5211,9 @@ type Xml struct {
 
 func (x *Xml) Reset() {
 	*x = Xml{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[59]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[59]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *Xml) String() string {
@@ -5337,7 +5224,7 @@ func (*Xml) ProtoMessage() {}
 
 func (x *Xml) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[59]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -6356,7 +6243,7 @@ func file_openapiv2_OpenAPIv2_proto_rawDescGZIP() []byte {
 }
 
 var file_openapiv2_OpenAPIv2_proto_msgTypes = make([]protoimpl.MessageInfo, 60)
-var file_openapiv2_OpenAPIv2_proto_goTypes = []interface{}{
+var file_openapiv2_OpenAPIv2_proto_goTypes = []any{
 	(*AdditionalPropertiesItem)(nil),     // 0: openapi.v2.AdditionalPropertiesItem
 	(*Any)(nil),                          // 1: openapi.v2.Any
 	(*ApiKeySecurity)(nil),               // 2: openapi.v2.ApiKeySecurity
@@ -6565,755 +6452,33 @@ func file_openapiv2_OpenAPIv2_proto_init() {
 	if File_openapiv2_OpenAPIv2_proto != nil {
 		return
 	}
-	if !protoimpl.UnsafeEnabled {
-		file_openapiv2_OpenAPIv2_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*AdditionalPropertiesItem); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv2_OpenAPIv2_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*Any); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv2_OpenAPIv2_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*ApiKeySecurity); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv2_OpenAPIv2_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*BasicAuthenticationSecurity); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv2_OpenAPIv2_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*BodyParameter); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv2_OpenAPIv2_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*Contact); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv2_OpenAPIv2_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*Default); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv2_OpenAPIv2_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*Definitions); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv2_OpenAPIv2_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*Document); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv2_OpenAPIv2_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*Examples); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv2_OpenAPIv2_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*ExternalDocs); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv2_OpenAPIv2_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*FileSchema); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv2_OpenAPIv2_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*FormDataParameterSubSchema); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv2_OpenAPIv2_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*Header); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv2_OpenAPIv2_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*HeaderParameterSubSchema); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv2_OpenAPIv2_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*Headers); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv2_OpenAPIv2_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*Info); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv2_OpenAPIv2_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*ItemsItem); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv2_OpenAPIv2_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*JsonReference); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv2_OpenAPIv2_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*License); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv2_OpenAPIv2_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*NamedAny); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv2_OpenAPIv2_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*NamedHeader); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv2_OpenAPIv2_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*NamedParameter); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv2_OpenAPIv2_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*NamedPathItem); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv2_OpenAPIv2_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*NamedResponse); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv2_OpenAPIv2_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*NamedResponseValue); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv2_OpenAPIv2_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*NamedSchema); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv2_OpenAPIv2_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*NamedSecurityDefinitionsItem); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv2_OpenAPIv2_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*NamedString); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv2_OpenAPIv2_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*NamedStringArray); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv2_OpenAPIv2_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*NonBodyParameter); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv2_OpenAPIv2_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*Oauth2AccessCodeSecurity); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv2_OpenAPIv2_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*Oauth2ApplicationSecurity); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv2_OpenAPIv2_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*Oauth2ImplicitSecurity); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv2_OpenAPIv2_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*Oauth2PasswordSecurity); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv2_OpenAPIv2_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*Oauth2Scopes); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv2_OpenAPIv2_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*Operation); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv2_OpenAPIv2_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*Parameter); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv2_OpenAPIv2_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*ParameterDefinitions); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv2_OpenAPIv2_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*ParametersItem); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv2_OpenAPIv2_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*PathItem); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv2_OpenAPIv2_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*PathParameterSubSchema); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv2_OpenAPIv2_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*Paths); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv2_OpenAPIv2_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*PrimitivesItems); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv2_OpenAPIv2_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*Properties); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv2_OpenAPIv2_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*QueryParameterSubSchema); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv2_OpenAPIv2_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*Response); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv2_OpenAPIv2_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*ResponseDefinitions); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv2_OpenAPIv2_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*ResponseValue); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv2_OpenAPIv2_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*Responses); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv2_OpenAPIv2_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*Schema); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv2_OpenAPIv2_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*SchemaItem); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv2_OpenAPIv2_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*SecurityDefinitions); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv2_OpenAPIv2_proto_msgTypes[53].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*SecurityDefinitionsItem); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv2_OpenAPIv2_proto_msgTypes[54].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*SecurityRequirement); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv2_OpenAPIv2_proto_msgTypes[55].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*StringArray); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv2_OpenAPIv2_proto_msgTypes[56].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*Tag); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv2_OpenAPIv2_proto_msgTypes[57].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*TypeItem); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv2_OpenAPIv2_proto_msgTypes[58].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*VendorExtension); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv2_OpenAPIv2_proto_msgTypes[59].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*Xml); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-	}
-	file_openapiv2_OpenAPIv2_proto_msgTypes[0].OneofWrappers = []interface{}{
+	file_openapiv2_OpenAPIv2_proto_msgTypes[0].OneofWrappers = []any{
 		(*AdditionalPropertiesItem_Schema)(nil),
 		(*AdditionalPropertiesItem_Boolean)(nil),
 	}
-	file_openapiv2_OpenAPIv2_proto_msgTypes[30].OneofWrappers = []interface{}{
+	file_openapiv2_OpenAPIv2_proto_msgTypes[30].OneofWrappers = []any{
 		(*NonBodyParameter_HeaderParameterSubSchema)(nil),
 		(*NonBodyParameter_FormDataParameterSubSchema)(nil),
 		(*NonBodyParameter_QueryParameterSubSchema)(nil),
 		(*NonBodyParameter_PathParameterSubSchema)(nil),
 	}
-	file_openapiv2_OpenAPIv2_proto_msgTypes[37].OneofWrappers = []interface{}{
+	file_openapiv2_OpenAPIv2_proto_msgTypes[37].OneofWrappers = []any{
 		(*Parameter_BodyParameter)(nil),
 		(*Parameter_NonBodyParameter)(nil),
 	}
-	file_openapiv2_OpenAPIv2_proto_msgTypes[39].OneofWrappers = []interface{}{
+	file_openapiv2_OpenAPIv2_proto_msgTypes[39].OneofWrappers = []any{
 		(*ParametersItem_Parameter)(nil),
 		(*ParametersItem_JsonReference)(nil),
 	}
-	file_openapiv2_OpenAPIv2_proto_msgTypes[48].OneofWrappers = []interface{}{
+	file_openapiv2_OpenAPIv2_proto_msgTypes[48].OneofWrappers = []any{
 		(*ResponseValue_Response)(nil),
 		(*ResponseValue_JsonReference)(nil),
 	}
-	file_openapiv2_OpenAPIv2_proto_msgTypes[51].OneofWrappers = []interface{}{
+	file_openapiv2_OpenAPIv2_proto_msgTypes[51].OneofWrappers = []any{
 		(*SchemaItem_Schema)(nil),
 		(*SchemaItem_FileSchema)(nil),
 	}
-	file_openapiv2_OpenAPIv2_proto_msgTypes[53].OneofWrappers = []interface{}{
+	file_openapiv2_OpenAPIv2_proto_msgTypes[53].OneofWrappers = []any{
 		(*SecurityDefinitionsItem_BasicAuthenticationSecurity)(nil),
 		(*SecurityDefinitionsItem_ApiKeySecurity)(nil),
 		(*SecurityDefinitionsItem_Oauth2ImplicitSecurity)(nil),
diff --git a/vendor/github.com/google/gnostic-models/openapiv3/OpenAPIv3.pb.go b/vendor/github.com/google/gnostic-models/openapiv3/OpenAPIv3.pb.go
index 945b8d11ff59a948e83e972c2a904c91e37941a7..b9df95a379359d1391f8d1944eee778c0bc84a97 100644
--- a/vendor/github.com/google/gnostic-models/openapiv3/OpenAPIv3.pb.go
+++ b/vendor/github.com/google/gnostic-models/openapiv3/OpenAPIv3.pb.go
@@ -16,8 +16,8 @@
 
 // Code generated by protoc-gen-go. DO NOT EDIT.
 // versions:
-// 	protoc-gen-go v1.27.1
-// 	protoc        v3.19.3
+// 	protoc-gen-go v1.35.1
+// 	protoc        v4.23.4
 // source: openapiv3/OpenAPIv3.proto
 
 package openapi_v3
@@ -43,6 +43,7 @@ type AdditionalPropertiesItem struct {
 	unknownFields protoimpl.UnknownFields
 
 	// Types that are assignable to Oneof:
+	//
 	//	*AdditionalPropertiesItem_SchemaOrReference
 	//	*AdditionalPropertiesItem_Boolean
 	Oneof isAdditionalPropertiesItem_Oneof `protobuf_oneof:"oneof"`
@@ -50,11 +51,9 @@ type AdditionalPropertiesItem struct {
 
 func (x *AdditionalPropertiesItem) Reset() {
 	*x = AdditionalPropertiesItem{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[0]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[0]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *AdditionalPropertiesItem) String() string {
@@ -65,7 +64,7 @@ func (*AdditionalPropertiesItem) ProtoMessage() {}
 
 func (x *AdditionalPropertiesItem) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[0]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -128,11 +127,9 @@ type Any struct {
 
 func (x *Any) Reset() {
 	*x = Any{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[1]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[1]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *Any) String() string {
@@ -143,7 +140,7 @@ func (*Any) ProtoMessage() {}
 
 func (x *Any) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[1]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -178,6 +175,7 @@ type AnyOrExpression struct {
 	unknownFields protoimpl.UnknownFields
 
 	// Types that are assignable to Oneof:
+	//
 	//	*AnyOrExpression_Any
 	//	*AnyOrExpression_Expression
 	Oneof isAnyOrExpression_Oneof `protobuf_oneof:"oneof"`
@@ -185,11 +183,9 @@ type AnyOrExpression struct {
 
 func (x *AnyOrExpression) Reset() {
 	*x = AnyOrExpression{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[2]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[2]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *AnyOrExpression) String() string {
@@ -200,7 +196,7 @@ func (*AnyOrExpression) ProtoMessage() {}
 
 func (x *AnyOrExpression) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[2]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -264,11 +260,9 @@ type Callback struct {
 
 func (x *Callback) Reset() {
 	*x = Callback{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[3]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[3]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *Callback) String() string {
@@ -279,7 +273,7 @@ func (*Callback) ProtoMessage() {}
 
 func (x *Callback) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[3]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -314,6 +308,7 @@ type CallbackOrReference struct {
 	unknownFields protoimpl.UnknownFields
 
 	// Types that are assignable to Oneof:
+	//
 	//	*CallbackOrReference_Callback
 	//	*CallbackOrReference_Reference
 	Oneof isCallbackOrReference_Oneof `protobuf_oneof:"oneof"`
@@ -321,11 +316,9 @@ type CallbackOrReference struct {
 
 func (x *CallbackOrReference) Reset() {
 	*x = CallbackOrReference{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[4]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[4]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *CallbackOrReference) String() string {
@@ -336,7 +329,7 @@ func (*CallbackOrReference) ProtoMessage() {}
 
 func (x *CallbackOrReference) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[4]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -398,11 +391,9 @@ type CallbacksOrReferences struct {
 
 func (x *CallbacksOrReferences) Reset() {
 	*x = CallbacksOrReferences{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[5]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[5]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *CallbacksOrReferences) String() string {
@@ -413,7 +404,7 @@ func (*CallbacksOrReferences) ProtoMessage() {}
 
 func (x *CallbacksOrReferences) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[5]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -455,11 +446,9 @@ type Components struct {
 
 func (x *Components) Reset() {
 	*x = Components{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[6]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[6]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *Components) String() string {
@@ -470,7 +459,7 @@ func (*Components) ProtoMessage() {}
 
 func (x *Components) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[6]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -569,11 +558,9 @@ type Contact struct {
 
 func (x *Contact) Reset() {
 	*x = Contact{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[7]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[7]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *Contact) String() string {
@@ -584,7 +571,7 @@ func (*Contact) ProtoMessage() {}
 
 func (x *Contact) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[7]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -633,6 +620,7 @@ type DefaultType struct {
 	unknownFields protoimpl.UnknownFields
 
 	// Types that are assignable to Oneof:
+	//
 	//	*DefaultType_Number
 	//	*DefaultType_Boolean
 	//	*DefaultType_String_
@@ -641,11 +629,9 @@ type DefaultType struct {
 
 func (x *DefaultType) Reset() {
 	*x = DefaultType{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[8]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[8]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *DefaultType) String() string {
@@ -656,7 +642,7 @@ func (*DefaultType) ProtoMessage() {}
 
 func (x *DefaultType) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[8]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -734,11 +720,9 @@ type Discriminator struct {
 
 func (x *Discriminator) Reset() {
 	*x = Discriminator{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[9]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[9]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *Discriminator) String() string {
@@ -749,7 +733,7 @@ func (*Discriminator) ProtoMessage() {}
 
 func (x *Discriminator) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[9]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -803,11 +787,9 @@ type Document struct {
 
 func (x *Document) Reset() {
 	*x = Document{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[10]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[10]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *Document) String() string {
@@ -818,7 +800,7 @@ func (*Document) ProtoMessage() {}
 
 func (x *Document) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[10]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -912,11 +894,9 @@ type Encoding struct {
 
 func (x *Encoding) Reset() {
 	*x = Encoding{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[11]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[11]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *Encoding) String() string {
@@ -927,7 +907,7 @@ func (*Encoding) ProtoMessage() {}
 
 func (x *Encoding) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[11]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -994,11 +974,9 @@ type Encodings struct {
 
 func (x *Encodings) Reset() {
 	*x = Encodings{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[12]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[12]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *Encodings) String() string {
@@ -1009,7 +987,7 @@ func (*Encodings) ProtoMessage() {}
 
 func (x *Encodings) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[12]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -1045,11 +1023,9 @@ type Example struct {
 
 func (x *Example) Reset() {
 	*x = Example{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[13]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[13]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *Example) String() string {
@@ -1060,7 +1036,7 @@ func (*Example) ProtoMessage() {}
 
 func (x *Example) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[13]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -1116,6 +1092,7 @@ type ExampleOrReference struct {
 	unknownFields protoimpl.UnknownFields
 
 	// Types that are assignable to Oneof:
+	//
 	//	*ExampleOrReference_Example
 	//	*ExampleOrReference_Reference
 	Oneof isExampleOrReference_Oneof `protobuf_oneof:"oneof"`
@@ -1123,11 +1100,9 @@ type ExampleOrReference struct {
 
 func (x *ExampleOrReference) Reset() {
 	*x = ExampleOrReference{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[14]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[14]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *ExampleOrReference) String() string {
@@ -1138,7 +1113,7 @@ func (*ExampleOrReference) ProtoMessage() {}
 
 func (x *ExampleOrReference) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[14]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -1200,11 +1175,9 @@ type ExamplesOrReferences struct {
 
 func (x *ExamplesOrReferences) Reset() {
 	*x = ExamplesOrReferences{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[15]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[15]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *ExamplesOrReferences) String() string {
@@ -1215,7 +1188,7 @@ func (*ExamplesOrReferences) ProtoMessage() {}
 
 func (x *ExamplesOrReferences) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[15]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -1247,11 +1220,9 @@ type Expression struct {
 
 func (x *Expression) Reset() {
 	*x = Expression{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[16]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[16]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *Expression) String() string {
@@ -1262,7 +1233,7 @@ func (*Expression) ProtoMessage() {}
 
 func (x *Expression) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[16]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -1297,11 +1268,9 @@ type ExternalDocs struct {
 
 func (x *ExternalDocs) Reset() {
 	*x = ExternalDocs{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[17]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[17]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *ExternalDocs) String() string {
@@ -1312,7 +1281,7 @@ func (*ExternalDocs) ProtoMessage() {}
 
 func (x *ExternalDocs) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[17]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -1370,11 +1339,9 @@ type Header struct {
 
 func (x *Header) Reset() {
 	*x = Header{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[18]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[18]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *Header) String() string {
@@ -1385,7 +1352,7 @@ func (*Header) ProtoMessage() {}
 
 func (x *Header) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[18]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -1490,6 +1457,7 @@ type HeaderOrReference struct {
 	unknownFields protoimpl.UnknownFields
 
 	// Types that are assignable to Oneof:
+	//
 	//	*HeaderOrReference_Header
 	//	*HeaderOrReference_Reference
 	Oneof isHeaderOrReference_Oneof `protobuf_oneof:"oneof"`
@@ -1497,11 +1465,9 @@ type HeaderOrReference struct {
 
 func (x *HeaderOrReference) Reset() {
 	*x = HeaderOrReference{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[19]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[19]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *HeaderOrReference) String() string {
@@ -1512,7 +1478,7 @@ func (*HeaderOrReference) ProtoMessage() {}
 
 func (x *HeaderOrReference) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[19]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -1574,11 +1540,9 @@ type HeadersOrReferences struct {
 
 func (x *HeadersOrReferences) Reset() {
 	*x = HeadersOrReferences{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[20]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[20]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *HeadersOrReferences) String() string {
@@ -1589,7 +1553,7 @@ func (*HeadersOrReferences) ProtoMessage() {}
 
 func (x *HeadersOrReferences) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[20]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -1629,11 +1593,9 @@ type Info struct {
 
 func (x *Info) Reset() {
 	*x = Info{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[21]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[21]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *Info) String() string {
@@ -1644,7 +1606,7 @@ func (*Info) ProtoMessage() {}
 
 func (x *Info) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[21]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -1725,11 +1687,9 @@ type ItemsItem struct {
 
 func (x *ItemsItem) Reset() {
 	*x = ItemsItem{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[22]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[22]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *ItemsItem) String() string {
@@ -1740,7 +1700,7 @@ func (*ItemsItem) ProtoMessage() {}
 
 func (x *ItemsItem) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[22]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -1775,11 +1735,9 @@ type License struct {
 
 func (x *License) Reset() {
 	*x = License{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[23]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[23]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *License) String() string {
@@ -1790,7 +1748,7 @@ func (*License) ProtoMessage() {}
 
 func (x *License) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[23]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -1843,11 +1801,9 @@ type Link struct {
 
 func (x *Link) Reset() {
 	*x = Link{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[24]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[24]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *Link) String() string {
@@ -1858,7 +1814,7 @@ func (*Link) ProtoMessage() {}
 
 func (x *Link) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[24]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -1928,6 +1884,7 @@ type LinkOrReference struct {
 	unknownFields protoimpl.UnknownFields
 
 	// Types that are assignable to Oneof:
+	//
 	//	*LinkOrReference_Link
 	//	*LinkOrReference_Reference
 	Oneof isLinkOrReference_Oneof `protobuf_oneof:"oneof"`
@@ -1935,11 +1892,9 @@ type LinkOrReference struct {
 
 func (x *LinkOrReference) Reset() {
 	*x = LinkOrReference{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[25]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[25]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *LinkOrReference) String() string {
@@ -1950,7 +1905,7 @@ func (*LinkOrReference) ProtoMessage() {}
 
 func (x *LinkOrReference) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[25]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -2012,11 +1967,9 @@ type LinksOrReferences struct {
 
 func (x *LinksOrReferences) Reset() {
 	*x = LinksOrReferences{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[26]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[26]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *LinksOrReferences) String() string {
@@ -2027,7 +1980,7 @@ func (*LinksOrReferences) ProtoMessage() {}
 
 func (x *LinksOrReferences) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[26]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -2064,11 +2017,9 @@ type MediaType struct {
 
 func (x *MediaType) Reset() {
 	*x = MediaType{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[27]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[27]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *MediaType) String() string {
@@ -2079,7 +2030,7 @@ func (*MediaType) ProtoMessage() {}
 
 func (x *MediaType) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[27]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -2139,11 +2090,9 @@ type MediaTypes struct {
 
 func (x *MediaTypes) Reset() {
 	*x = MediaTypes{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[28]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[28]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *MediaTypes) String() string {
@@ -2154,7 +2103,7 @@ func (*MediaTypes) ProtoMessage() {}
 
 func (x *MediaTypes) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[28]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -2190,11 +2139,9 @@ type NamedAny struct {
 
 func (x *NamedAny) Reset() {
 	*x = NamedAny{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[29]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[29]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *NamedAny) String() string {
@@ -2205,7 +2152,7 @@ func (*NamedAny) ProtoMessage() {}
 
 func (x *NamedAny) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[29]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -2248,11 +2195,9 @@ type NamedCallbackOrReference struct {
 
 func (x *NamedCallbackOrReference) Reset() {
 	*x = NamedCallbackOrReference{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[30]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[30]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *NamedCallbackOrReference) String() string {
@@ -2263,7 +2208,7 @@ func (*NamedCallbackOrReference) ProtoMessage() {}
 
 func (x *NamedCallbackOrReference) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[30]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -2306,11 +2251,9 @@ type NamedEncoding struct {
 
 func (x *NamedEncoding) Reset() {
 	*x = NamedEncoding{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[31]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[31]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *NamedEncoding) String() string {
@@ -2321,7 +2264,7 @@ func (*NamedEncoding) ProtoMessage() {}
 
 func (x *NamedEncoding) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[31]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -2364,11 +2307,9 @@ type NamedExampleOrReference struct {
 
 func (x *NamedExampleOrReference) Reset() {
 	*x = NamedExampleOrReference{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[32]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[32]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *NamedExampleOrReference) String() string {
@@ -2379,7 +2320,7 @@ func (*NamedExampleOrReference) ProtoMessage() {}
 
 func (x *NamedExampleOrReference) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[32]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -2422,11 +2363,9 @@ type NamedHeaderOrReference struct {
 
 func (x *NamedHeaderOrReference) Reset() {
 	*x = NamedHeaderOrReference{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[33]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[33]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *NamedHeaderOrReference) String() string {
@@ -2437,7 +2376,7 @@ func (*NamedHeaderOrReference) ProtoMessage() {}
 
 func (x *NamedHeaderOrReference) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[33]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -2480,11 +2419,9 @@ type NamedLinkOrReference struct {
 
 func (x *NamedLinkOrReference) Reset() {
 	*x = NamedLinkOrReference{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[34]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[34]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *NamedLinkOrReference) String() string {
@@ -2495,7 +2432,7 @@ func (*NamedLinkOrReference) ProtoMessage() {}
 
 func (x *NamedLinkOrReference) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[34]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -2538,11 +2475,9 @@ type NamedMediaType struct {
 
 func (x *NamedMediaType) Reset() {
 	*x = NamedMediaType{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[35]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[35]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *NamedMediaType) String() string {
@@ -2553,7 +2488,7 @@ func (*NamedMediaType) ProtoMessage() {}
 
 func (x *NamedMediaType) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[35]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -2596,11 +2531,9 @@ type NamedParameterOrReference struct {
 
 func (x *NamedParameterOrReference) Reset() {
 	*x = NamedParameterOrReference{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[36]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[36]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *NamedParameterOrReference) String() string {
@@ -2611,7 +2544,7 @@ func (*NamedParameterOrReference) ProtoMessage() {}
 
 func (x *NamedParameterOrReference) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[36]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -2654,11 +2587,9 @@ type NamedPathItem struct {
 
 func (x *NamedPathItem) Reset() {
 	*x = NamedPathItem{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[37]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[37]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *NamedPathItem) String() string {
@@ -2669,7 +2600,7 @@ func (*NamedPathItem) ProtoMessage() {}
 
 func (x *NamedPathItem) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[37]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -2712,11 +2643,9 @@ type NamedRequestBodyOrReference struct {
 
 func (x *NamedRequestBodyOrReference) Reset() {
 	*x = NamedRequestBodyOrReference{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[38]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[38]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *NamedRequestBodyOrReference) String() string {
@@ -2727,7 +2656,7 @@ func (*NamedRequestBodyOrReference) ProtoMessage() {}
 
 func (x *NamedRequestBodyOrReference) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[38]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -2770,11 +2699,9 @@ type NamedResponseOrReference struct {
 
 func (x *NamedResponseOrReference) Reset() {
 	*x = NamedResponseOrReference{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[39]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[39]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *NamedResponseOrReference) String() string {
@@ -2785,7 +2712,7 @@ func (*NamedResponseOrReference) ProtoMessage() {}
 
 func (x *NamedResponseOrReference) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[39]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -2828,11 +2755,9 @@ type NamedSchemaOrReference struct {
 
 func (x *NamedSchemaOrReference) Reset() {
 	*x = NamedSchemaOrReference{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[40]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[40]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *NamedSchemaOrReference) String() string {
@@ -2843,7 +2768,7 @@ func (*NamedSchemaOrReference) ProtoMessage() {}
 
 func (x *NamedSchemaOrReference) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[40]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -2886,11 +2811,9 @@ type NamedSecuritySchemeOrReference struct {
 
 func (x *NamedSecuritySchemeOrReference) Reset() {
 	*x = NamedSecuritySchemeOrReference{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[41]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[41]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *NamedSecuritySchemeOrReference) String() string {
@@ -2901,7 +2824,7 @@ func (*NamedSecuritySchemeOrReference) ProtoMessage() {}
 
 func (x *NamedSecuritySchemeOrReference) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[41]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -2944,11 +2867,9 @@ type NamedServerVariable struct {
 
 func (x *NamedServerVariable) Reset() {
 	*x = NamedServerVariable{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[42]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[42]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *NamedServerVariable) String() string {
@@ -2959,7 +2880,7 @@ func (*NamedServerVariable) ProtoMessage() {}
 
 func (x *NamedServerVariable) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[42]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -3002,11 +2923,9 @@ type NamedString struct {
 
 func (x *NamedString) Reset() {
 	*x = NamedString{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[43]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[43]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *NamedString) String() string {
@@ -3017,7 +2936,7 @@ func (*NamedString) ProtoMessage() {}
 
 func (x *NamedString) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[43]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -3060,11 +2979,9 @@ type NamedStringArray struct {
 
 func (x *NamedStringArray) Reset() {
 	*x = NamedStringArray{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[44]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[44]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *NamedStringArray) String() string {
@@ -3075,7 +2992,7 @@ func (*NamedStringArray) ProtoMessage() {}
 
 func (x *NamedStringArray) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[44]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -3119,11 +3036,9 @@ type OauthFlow struct {
 
 func (x *OauthFlow) Reset() {
 	*x = OauthFlow{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[45]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[45]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *OauthFlow) String() string {
@@ -3134,7 +3049,7 @@ func (*OauthFlow) ProtoMessage() {}
 
 func (x *OauthFlow) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[45]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -3199,11 +3114,9 @@ type OauthFlows struct {
 
 func (x *OauthFlows) Reset() {
 	*x = OauthFlows{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[46]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[46]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *OauthFlows) String() string {
@@ -3214,7 +3127,7 @@ func (*OauthFlows) ProtoMessage() {}
 
 func (x *OauthFlows) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[46]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -3274,11 +3187,9 @@ type Object struct {
 
 func (x *Object) Reset() {
 	*x = Object{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[47]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[47]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *Object) String() string {
@@ -3289,7 +3200,7 @@ func (*Object) ProtoMessage() {}
 
 func (x *Object) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[47]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -3334,11 +3245,9 @@ type Operation struct {
 
 func (x *Operation) Reset() {
 	*x = Operation{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[48]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[48]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *Operation) String() string {
@@ -3349,7 +3258,7 @@ func (*Operation) ProtoMessage() {}
 
 func (x *Operation) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[48]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -3479,11 +3388,9 @@ type Parameter struct {
 
 func (x *Parameter) Reset() {
 	*x = Parameter{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[49]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[49]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *Parameter) String() string {
@@ -3494,7 +3401,7 @@ func (*Parameter) ProtoMessage() {}
 
 func (x *Parameter) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[49]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -3613,6 +3520,7 @@ type ParameterOrReference struct {
 	unknownFields protoimpl.UnknownFields
 
 	// Types that are assignable to Oneof:
+	//
 	//	*ParameterOrReference_Parameter
 	//	*ParameterOrReference_Reference
 	Oneof isParameterOrReference_Oneof `protobuf_oneof:"oneof"`
@@ -3620,11 +3528,9 @@ type ParameterOrReference struct {
 
 func (x *ParameterOrReference) Reset() {
 	*x = ParameterOrReference{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[50]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[50]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *ParameterOrReference) String() string {
@@ -3635,7 +3541,7 @@ func (*ParameterOrReference) ProtoMessage() {}
 
 func (x *ParameterOrReference) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[50]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -3697,11 +3603,9 @@ type ParametersOrReferences struct {
 
 func (x *ParametersOrReferences) Reset() {
 	*x = ParametersOrReferences{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[51]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[51]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *ParametersOrReferences) String() string {
@@ -3712,7 +3616,7 @@ func (*ParametersOrReferences) ProtoMessage() {}
 
 func (x *ParametersOrReferences) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[51]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -3758,11 +3662,9 @@ type PathItem struct {
 
 func (x *PathItem) Reset() {
 	*x = PathItem{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[52]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[52]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *PathItem) String() string {
@@ -3773,7 +3675,7 @@ func (*PathItem) ProtoMessage() {}
 
 func (x *PathItem) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[52]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -3898,11 +3800,9 @@ type Paths struct {
 
 func (x *Paths) Reset() {
 	*x = Paths{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[53]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[53]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *Paths) String() string {
@@ -3913,7 +3813,7 @@ func (*Paths) ProtoMessage() {}
 
 func (x *Paths) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[53]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -3952,11 +3852,9 @@ type Properties struct {
 
 func (x *Properties) Reset() {
 	*x = Properties{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[54]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[54]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *Properties) String() string {
@@ -3967,7 +3865,7 @@ func (*Properties) ProtoMessage() {}
 
 func (x *Properties) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[54]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -4002,11 +3900,9 @@ type Reference struct {
 
 func (x *Reference) Reset() {
 	*x = Reference{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[55]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[55]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *Reference) String() string {
@@ -4017,7 +3913,7 @@ func (*Reference) ProtoMessage() {}
 
 func (x *Reference) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[55]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -4063,11 +3959,9 @@ type RequestBodiesOrReferences struct {
 
 func (x *RequestBodiesOrReferences) Reset() {
 	*x = RequestBodiesOrReferences{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[56]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[56]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *RequestBodiesOrReferences) String() string {
@@ -4078,7 +3972,7 @@ func (*RequestBodiesOrReferences) ProtoMessage() {}
 
 func (x *RequestBodiesOrReferences) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[56]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -4114,11 +4008,9 @@ type RequestBody struct {
 
 func (x *RequestBody) Reset() {
 	*x = RequestBody{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[57]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[57]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *RequestBody) String() string {
@@ -4129,7 +4021,7 @@ func (*RequestBody) ProtoMessage() {}
 
 func (x *RequestBody) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[57]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -4178,6 +4070,7 @@ type RequestBodyOrReference struct {
 	unknownFields protoimpl.UnknownFields
 
 	// Types that are assignable to Oneof:
+	//
 	//	*RequestBodyOrReference_RequestBody
 	//	*RequestBodyOrReference_Reference
 	Oneof isRequestBodyOrReference_Oneof `protobuf_oneof:"oneof"`
@@ -4185,11 +4078,9 @@ type RequestBodyOrReference struct {
 
 func (x *RequestBodyOrReference) Reset() {
 	*x = RequestBodyOrReference{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[58]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[58]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *RequestBodyOrReference) String() string {
@@ -4200,7 +4091,7 @@ func (*RequestBodyOrReference) ProtoMessage() {}
 
 func (x *RequestBodyOrReference) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[58]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -4267,11 +4158,9 @@ type Response struct {
 
 func (x *Response) Reset() {
 	*x = Response{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[59]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[59]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *Response) String() string {
@@ -4282,7 +4171,7 @@ func (*Response) ProtoMessage() {}
 
 func (x *Response) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[59]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -4338,6 +4227,7 @@ type ResponseOrReference struct {
 	unknownFields protoimpl.UnknownFields
 
 	// Types that are assignable to Oneof:
+	//
 	//	*ResponseOrReference_Response
 	//	*ResponseOrReference_Reference
 	Oneof isResponseOrReference_Oneof `protobuf_oneof:"oneof"`
@@ -4345,11 +4235,9 @@ type ResponseOrReference struct {
 
 func (x *ResponseOrReference) Reset() {
 	*x = ResponseOrReference{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[60]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[60]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *ResponseOrReference) String() string {
@@ -4360,7 +4248,7 @@ func (*ResponseOrReference) ProtoMessage() {}
 
 func (x *ResponseOrReference) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[60]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -4425,11 +4313,9 @@ type Responses struct {
 
 func (x *Responses) Reset() {
 	*x = Responses{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[61]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[61]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *Responses) String() string {
@@ -4440,7 +4326,7 @@ func (*Responses) ProtoMessage() {}
 
 func (x *Responses) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[61]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -4486,11 +4372,9 @@ type ResponsesOrReferences struct {
 
 func (x *ResponsesOrReferences) Reset() {
 	*x = ResponsesOrReferences{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[62]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[62]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *ResponsesOrReferences) String() string {
@@ -4501,7 +4385,7 @@ func (*ResponsesOrReferences) ProtoMessage() {}
 
 func (x *ResponsesOrReferences) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[62]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -4569,11 +4453,9 @@ type Schema struct {
 
 func (x *Schema) Reset() {
 	*x = Schema{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[63]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[63]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *Schema) String() string {
@@ -4584,7 +4466,7 @@ func (*Schema) ProtoMessage() {}
 
 func (x *Schema) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[63]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -4857,6 +4739,7 @@ type SchemaOrReference struct {
 	unknownFields protoimpl.UnknownFields
 
 	// Types that are assignable to Oneof:
+	//
 	//	*SchemaOrReference_Schema
 	//	*SchemaOrReference_Reference
 	Oneof isSchemaOrReference_Oneof `protobuf_oneof:"oneof"`
@@ -4864,11 +4747,9 @@ type SchemaOrReference struct {
 
 func (x *SchemaOrReference) Reset() {
 	*x = SchemaOrReference{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[64]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[64]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *SchemaOrReference) String() string {
@@ -4879,7 +4760,7 @@ func (*SchemaOrReference) ProtoMessage() {}
 
 func (x *SchemaOrReference) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[64]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -4941,11 +4822,9 @@ type SchemasOrReferences struct {
 
 func (x *SchemasOrReferences) Reset() {
 	*x = SchemasOrReferences{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[65]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[65]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *SchemasOrReferences) String() string {
@@ -4956,7 +4835,7 @@ func (*SchemasOrReferences) ProtoMessage() {}
 
 func (x *SchemasOrReferences) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[65]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -4989,11 +4868,9 @@ type SecurityRequirement struct {
 
 func (x *SecurityRequirement) Reset() {
 	*x = SecurityRequirement{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[66]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[66]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *SecurityRequirement) String() string {
@@ -5004,7 +4881,7 @@ func (*SecurityRequirement) ProtoMessage() {}
 
 func (x *SecurityRequirement) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[66]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -5045,11 +4922,9 @@ type SecurityScheme struct {
 
 func (x *SecurityScheme) Reset() {
 	*x = SecurityScheme{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[67]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[67]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *SecurityScheme) String() string {
@@ -5060,7 +4935,7 @@ func (*SecurityScheme) ProtoMessage() {}
 
 func (x *SecurityScheme) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[67]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -5144,6 +5019,7 @@ type SecuritySchemeOrReference struct {
 	unknownFields protoimpl.UnknownFields
 
 	// Types that are assignable to Oneof:
+	//
 	//	*SecuritySchemeOrReference_SecurityScheme
 	//	*SecuritySchemeOrReference_Reference
 	Oneof isSecuritySchemeOrReference_Oneof `protobuf_oneof:"oneof"`
@@ -5151,11 +5027,9 @@ type SecuritySchemeOrReference struct {
 
 func (x *SecuritySchemeOrReference) Reset() {
 	*x = SecuritySchemeOrReference{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[68]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[68]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *SecuritySchemeOrReference) String() string {
@@ -5166,7 +5040,7 @@ func (*SecuritySchemeOrReference) ProtoMessage() {}
 
 func (x *SecuritySchemeOrReference) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[68]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -5228,11 +5102,9 @@ type SecuritySchemesOrReferences struct {
 
 func (x *SecuritySchemesOrReferences) Reset() {
 	*x = SecuritySchemesOrReferences{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[69]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[69]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *SecuritySchemesOrReferences) String() string {
@@ -5243,7 +5115,7 @@ func (*SecuritySchemesOrReferences) ProtoMessage() {}
 
 func (x *SecuritySchemesOrReferences) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[69]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -5279,11 +5151,9 @@ type Server struct {
 
 func (x *Server) Reset() {
 	*x = Server{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[70]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[70]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *Server) String() string {
@@ -5294,7 +5164,7 @@ func (*Server) ProtoMessage() {}
 
 func (x *Server) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[70]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -5351,11 +5221,9 @@ type ServerVariable struct {
 
 func (x *ServerVariable) Reset() {
 	*x = ServerVariable{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[71]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[71]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *ServerVariable) String() string {
@@ -5366,7 +5234,7 @@ func (*ServerVariable) ProtoMessage() {}
 
 func (x *ServerVariable) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[71]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -5419,11 +5287,9 @@ type ServerVariables struct {
 
 func (x *ServerVariables) Reset() {
 	*x = ServerVariables{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[72]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[72]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *ServerVariables) String() string {
@@ -5434,7 +5300,7 @@ func (*ServerVariables) ProtoMessage() {}
 
 func (x *ServerVariables) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[72]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -5463,6 +5329,7 @@ type SpecificationExtension struct {
 	unknownFields protoimpl.UnknownFields
 
 	// Types that are assignable to Oneof:
+	//
 	//	*SpecificationExtension_Number
 	//	*SpecificationExtension_Boolean
 	//	*SpecificationExtension_String_
@@ -5471,11 +5338,9 @@ type SpecificationExtension struct {
 
 func (x *SpecificationExtension) Reset() {
 	*x = SpecificationExtension{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[73]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[73]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *SpecificationExtension) String() string {
@@ -5486,7 +5351,7 @@ func (*SpecificationExtension) ProtoMessage() {}
 
 func (x *SpecificationExtension) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[73]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -5561,11 +5426,9 @@ type StringArray struct {
 
 func (x *StringArray) Reset() {
 	*x = StringArray{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[74]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[74]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *StringArray) String() string {
@@ -5576,7 +5439,7 @@ func (*StringArray) ProtoMessage() {}
 
 func (x *StringArray) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[74]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -5608,11 +5471,9 @@ type Strings struct {
 
 func (x *Strings) Reset() {
 	*x = Strings{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[75]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[75]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *Strings) String() string {
@@ -5623,7 +5484,7 @@ func (*Strings) ProtoMessage() {}
 
 func (x *Strings) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[75]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -5659,11 +5520,9 @@ type Tag struct {
 
 func (x *Tag) Reset() {
 	*x = Tag{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[76]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[76]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *Tag) String() string {
@@ -5674,7 +5533,7 @@ func (*Tag) ProtoMessage() {}
 
 func (x *Tag) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[76]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -5733,11 +5592,9 @@ type Xml struct {
 
 func (x *Xml) Reset() {
 	*x = Xml{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[77]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[77]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *Xml) String() string {
@@ -5748,7 +5605,7 @@ func (*Xml) ProtoMessage() {}
 
 func (x *Xml) ProtoReflect() protoreflect.Message {
 	mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[77]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -6781,7 +6638,7 @@ func file_openapiv3_OpenAPIv3_proto_rawDescGZIP() []byte {
 }
 
 var file_openapiv3_OpenAPIv3_proto_msgTypes = make([]protoimpl.MessageInfo, 78)
-var file_openapiv3_OpenAPIv3_proto_goTypes = []interface{}{
+var file_openapiv3_OpenAPIv3_proto_goTypes = []any{
 	(*AdditionalPropertiesItem)(nil),       // 0: openapi.v3.AdditionalPropertiesItem
 	(*Any)(nil),                            // 1: openapi.v3.Any
 	(*AnyOrExpression)(nil),                // 2: openapi.v3.AnyOrExpression
@@ -7040,994 +6897,56 @@ func file_openapiv3_OpenAPIv3_proto_init() {
 	if File_openapiv3_OpenAPIv3_proto != nil {
 		return
 	}
-	if !protoimpl.UnsafeEnabled {
-		file_openapiv3_OpenAPIv3_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*AdditionalPropertiesItem); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv3_OpenAPIv3_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*Any); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv3_OpenAPIv3_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*AnyOrExpression); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv3_OpenAPIv3_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*Callback); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv3_OpenAPIv3_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*CallbackOrReference); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv3_OpenAPIv3_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*CallbacksOrReferences); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv3_OpenAPIv3_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*Components); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv3_OpenAPIv3_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*Contact); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv3_OpenAPIv3_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*DefaultType); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv3_OpenAPIv3_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*Discriminator); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv3_OpenAPIv3_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*Document); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv3_OpenAPIv3_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*Encoding); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv3_OpenAPIv3_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*Encodings); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv3_OpenAPIv3_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*Example); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv3_OpenAPIv3_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*ExampleOrReference); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv3_OpenAPIv3_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*ExamplesOrReferences); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv3_OpenAPIv3_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*Expression); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv3_OpenAPIv3_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*ExternalDocs); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv3_OpenAPIv3_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*Header); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv3_OpenAPIv3_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*HeaderOrReference); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv3_OpenAPIv3_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*HeadersOrReferences); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv3_OpenAPIv3_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*Info); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv3_OpenAPIv3_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*ItemsItem); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv3_OpenAPIv3_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*License); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv3_OpenAPIv3_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*Link); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv3_OpenAPIv3_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*LinkOrReference); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv3_OpenAPIv3_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*LinksOrReferences); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv3_OpenAPIv3_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*MediaType); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv3_OpenAPIv3_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*MediaTypes); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv3_OpenAPIv3_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*NamedAny); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv3_OpenAPIv3_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*NamedCallbackOrReference); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv3_OpenAPIv3_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*NamedEncoding); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv3_OpenAPIv3_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*NamedExampleOrReference); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv3_OpenAPIv3_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*NamedHeaderOrReference); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv3_OpenAPIv3_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*NamedLinkOrReference); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv3_OpenAPIv3_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*NamedMediaType); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv3_OpenAPIv3_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*NamedParameterOrReference); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv3_OpenAPIv3_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*NamedPathItem); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv3_OpenAPIv3_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*NamedRequestBodyOrReference); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv3_OpenAPIv3_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*NamedResponseOrReference); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv3_OpenAPIv3_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*NamedSchemaOrReference); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv3_OpenAPIv3_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*NamedSecuritySchemeOrReference); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv3_OpenAPIv3_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*NamedServerVariable); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv3_OpenAPIv3_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*NamedString); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv3_OpenAPIv3_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*NamedStringArray); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv3_OpenAPIv3_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*OauthFlow); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv3_OpenAPIv3_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*OauthFlows); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv3_OpenAPIv3_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*Object); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv3_OpenAPIv3_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*Operation); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv3_OpenAPIv3_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*Parameter); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv3_OpenAPIv3_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*ParameterOrReference); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv3_OpenAPIv3_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*ParametersOrReferences); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv3_OpenAPIv3_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*PathItem); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv3_OpenAPIv3_proto_msgTypes[53].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*Paths); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv3_OpenAPIv3_proto_msgTypes[54].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*Properties); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv3_OpenAPIv3_proto_msgTypes[55].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*Reference); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv3_OpenAPIv3_proto_msgTypes[56].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*RequestBodiesOrReferences); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv3_OpenAPIv3_proto_msgTypes[57].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*RequestBody); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv3_OpenAPIv3_proto_msgTypes[58].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*RequestBodyOrReference); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv3_OpenAPIv3_proto_msgTypes[59].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*Response); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv3_OpenAPIv3_proto_msgTypes[60].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*ResponseOrReference); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv3_OpenAPIv3_proto_msgTypes[61].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*Responses); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv3_OpenAPIv3_proto_msgTypes[62].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*ResponsesOrReferences); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv3_OpenAPIv3_proto_msgTypes[63].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*Schema); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv3_OpenAPIv3_proto_msgTypes[64].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*SchemaOrReference); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv3_OpenAPIv3_proto_msgTypes[65].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*SchemasOrReferences); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv3_OpenAPIv3_proto_msgTypes[66].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*SecurityRequirement); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv3_OpenAPIv3_proto_msgTypes[67].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*SecurityScheme); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv3_OpenAPIv3_proto_msgTypes[68].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*SecuritySchemeOrReference); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv3_OpenAPIv3_proto_msgTypes[69].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*SecuritySchemesOrReferences); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv3_OpenAPIv3_proto_msgTypes[70].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*Server); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv3_OpenAPIv3_proto_msgTypes[71].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*ServerVariable); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv3_OpenAPIv3_proto_msgTypes[72].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*ServerVariables); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv3_OpenAPIv3_proto_msgTypes[73].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*SpecificationExtension); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv3_OpenAPIv3_proto_msgTypes[74].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*StringArray); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv3_OpenAPIv3_proto_msgTypes[75].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*Strings); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv3_OpenAPIv3_proto_msgTypes[76].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*Tag); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_openapiv3_OpenAPIv3_proto_msgTypes[77].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*Xml); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-	}
-	file_openapiv3_OpenAPIv3_proto_msgTypes[0].OneofWrappers = []interface{}{
+	file_openapiv3_OpenAPIv3_proto_msgTypes[0].OneofWrappers = []any{
 		(*AdditionalPropertiesItem_SchemaOrReference)(nil),
 		(*AdditionalPropertiesItem_Boolean)(nil),
 	}
-	file_openapiv3_OpenAPIv3_proto_msgTypes[2].OneofWrappers = []interface{}{
+	file_openapiv3_OpenAPIv3_proto_msgTypes[2].OneofWrappers = []any{
 		(*AnyOrExpression_Any)(nil),
 		(*AnyOrExpression_Expression)(nil),
 	}
-	file_openapiv3_OpenAPIv3_proto_msgTypes[4].OneofWrappers = []interface{}{
+	file_openapiv3_OpenAPIv3_proto_msgTypes[4].OneofWrappers = []any{
 		(*CallbackOrReference_Callback)(nil),
 		(*CallbackOrReference_Reference)(nil),
 	}
-	file_openapiv3_OpenAPIv3_proto_msgTypes[8].OneofWrappers = []interface{}{
+	file_openapiv3_OpenAPIv3_proto_msgTypes[8].OneofWrappers = []any{
 		(*DefaultType_Number)(nil),
 		(*DefaultType_Boolean)(nil),
 		(*DefaultType_String_)(nil),
 	}
-	file_openapiv3_OpenAPIv3_proto_msgTypes[14].OneofWrappers = []interface{}{
+	file_openapiv3_OpenAPIv3_proto_msgTypes[14].OneofWrappers = []any{
 		(*ExampleOrReference_Example)(nil),
 		(*ExampleOrReference_Reference)(nil),
 	}
-	file_openapiv3_OpenAPIv3_proto_msgTypes[19].OneofWrappers = []interface{}{
+	file_openapiv3_OpenAPIv3_proto_msgTypes[19].OneofWrappers = []any{
 		(*HeaderOrReference_Header)(nil),
 		(*HeaderOrReference_Reference)(nil),
 	}
-	file_openapiv3_OpenAPIv3_proto_msgTypes[25].OneofWrappers = []interface{}{
+	file_openapiv3_OpenAPIv3_proto_msgTypes[25].OneofWrappers = []any{
 		(*LinkOrReference_Link)(nil),
 		(*LinkOrReference_Reference)(nil),
 	}
-	file_openapiv3_OpenAPIv3_proto_msgTypes[50].OneofWrappers = []interface{}{
+	file_openapiv3_OpenAPIv3_proto_msgTypes[50].OneofWrappers = []any{
 		(*ParameterOrReference_Parameter)(nil),
 		(*ParameterOrReference_Reference)(nil),
 	}
-	file_openapiv3_OpenAPIv3_proto_msgTypes[58].OneofWrappers = []interface{}{
+	file_openapiv3_OpenAPIv3_proto_msgTypes[58].OneofWrappers = []any{
 		(*RequestBodyOrReference_RequestBody)(nil),
 		(*RequestBodyOrReference_Reference)(nil),
 	}
-	file_openapiv3_OpenAPIv3_proto_msgTypes[60].OneofWrappers = []interface{}{
+	file_openapiv3_OpenAPIv3_proto_msgTypes[60].OneofWrappers = []any{
 		(*ResponseOrReference_Response)(nil),
 		(*ResponseOrReference_Reference)(nil),
 	}
-	file_openapiv3_OpenAPIv3_proto_msgTypes[64].OneofWrappers = []interface{}{
+	file_openapiv3_OpenAPIv3_proto_msgTypes[64].OneofWrappers = []any{
 		(*SchemaOrReference_Schema)(nil),
 		(*SchemaOrReference_Reference)(nil),
 	}
-	file_openapiv3_OpenAPIv3_proto_msgTypes[68].OneofWrappers = []interface{}{
+	file_openapiv3_OpenAPIv3_proto_msgTypes[68].OneofWrappers = []any{
 		(*SecuritySchemeOrReference_SecurityScheme)(nil),
 		(*SecuritySchemeOrReference_Reference)(nil),
 	}
-	file_openapiv3_OpenAPIv3_proto_msgTypes[73].OneofWrappers = []interface{}{
+	file_openapiv3_OpenAPIv3_proto_msgTypes[73].OneofWrappers = []any{
 		(*SpecificationExtension_Number)(nil),
 		(*SpecificationExtension_Boolean)(nil),
 		(*SpecificationExtension_String_)(nil),
diff --git a/vendor/github.com/google/gnostic-models/openapiv3/annotations.pb.go b/vendor/github.com/google/gnostic-models/openapiv3/annotations.pb.go
new file mode 100644
index 0000000000000000000000000000000000000000..f9f1bd26547a68537827acb9795790948e585bc8
--- /dev/null
+++ b/vendor/github.com/google/gnostic-models/openapiv3/annotations.pb.go
@@ -0,0 +1,182 @@
+// Copyright 2022 Google LLC. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// 	protoc-gen-go v1.35.1
+// 	protoc        v4.23.4
+// source: openapiv3/annotations.proto
+
+package openapi_v3
+
+import (
+	protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+	protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+	descriptorpb "google.golang.org/protobuf/types/descriptorpb"
+	reflect "reflect"
+)
+
+const (
+	// Verify that this generated code is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+	// Verify that runtime/protoimpl is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+var file_openapiv3_annotations_proto_extTypes = []protoimpl.ExtensionInfo{
+	{
+		ExtendedType:  (*descriptorpb.FileOptions)(nil),
+		ExtensionType: (*Document)(nil),
+		Field:         1143,
+		Name:          "openapi.v3.document",
+		Tag:           "bytes,1143,opt,name=document",
+		Filename:      "openapiv3/annotations.proto",
+	},
+	{
+		ExtendedType:  (*descriptorpb.MethodOptions)(nil),
+		ExtensionType: (*Operation)(nil),
+		Field:         1143,
+		Name:          "openapi.v3.operation",
+		Tag:           "bytes,1143,opt,name=operation",
+		Filename:      "openapiv3/annotations.proto",
+	},
+	{
+		ExtendedType:  (*descriptorpb.MessageOptions)(nil),
+		ExtensionType: (*Schema)(nil),
+		Field:         1143,
+		Name:          "openapi.v3.schema",
+		Tag:           "bytes,1143,opt,name=schema",
+		Filename:      "openapiv3/annotations.proto",
+	},
+	{
+		ExtendedType:  (*descriptorpb.FieldOptions)(nil),
+		ExtensionType: (*Schema)(nil),
+		Field:         1143,
+		Name:          "openapi.v3.property",
+		Tag:           "bytes,1143,opt,name=property",
+		Filename:      "openapiv3/annotations.proto",
+	},
+}
+
+// Extension fields to descriptorpb.FileOptions.
+var (
+	// optional openapi.v3.Document document = 1143;
+	E_Document = &file_openapiv3_annotations_proto_extTypes[0]
+)
+
+// Extension fields to descriptorpb.MethodOptions.
+var (
+	// optional openapi.v3.Operation operation = 1143;
+	E_Operation = &file_openapiv3_annotations_proto_extTypes[1]
+)
+
+// Extension fields to descriptorpb.MessageOptions.
+var (
+	// optional openapi.v3.Schema schema = 1143;
+	E_Schema = &file_openapiv3_annotations_proto_extTypes[2]
+)
+
+// Extension fields to descriptorpb.FieldOptions.
+var (
+	// optional openapi.v3.Schema property = 1143;
+	E_Property = &file_openapiv3_annotations_proto_extTypes[3]
+)
+
+var File_openapiv3_annotations_proto protoreflect.FileDescriptor
+
+var file_openapiv3_annotations_proto_rawDesc = []byte{
+	0x0a, 0x1b, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x61, 0x6e, 0x6e, 0x6f,
+	0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x6f,
+	0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+	0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72,
+	0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x6f, 0x70, 0x65,
+	0x6e, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x4f, 0x70, 0x65, 0x6e, 0x41, 0x50, 0x49, 0x76, 0x33,
+	0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x3a, 0x4f, 0x0a, 0x08, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65,
+	0x6e, 0x74, 0x12, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+	0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+	0x18, 0xf7, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70,
+	0x69, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x08, 0x64,
+	0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x3a, 0x54, 0x0a, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61,
+	0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+	0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74,
+	0x69, 0x6f, 0x6e, 0x73, 0x18, 0xf7, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70,
+	0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69,
+	0x6f, 0x6e, 0x52, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x3a, 0x4c, 0x0a,
+	0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+	0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67,
+	0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xf7, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32,
+	0x12, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x63, 0x68,
+	0x65, 0x6d, 0x61, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x3a, 0x4e, 0x0a, 0x08, 0x70,
+	0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+	0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f,
+	0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xf7, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e,
+	0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d,
+	0x61, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x42, 0x42, 0x0a, 0x0e, 0x6f,
+	0x72, 0x67, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x33, 0x42, 0x10, 0x41,
+	0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50,
+	0x01, 0x5a, 0x16, 0x2e, 0x2f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x33, 0x3b, 0x6f,
+	0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x33, 0xa2, 0x02, 0x03, 0x4f, 0x41, 0x53, 0x62,
+	0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var file_openapiv3_annotations_proto_goTypes = []any{
+	(*descriptorpb.FileOptions)(nil),    // 0: google.protobuf.FileOptions
+	(*descriptorpb.MethodOptions)(nil),  // 1: google.protobuf.MethodOptions
+	(*descriptorpb.MessageOptions)(nil), // 2: google.protobuf.MessageOptions
+	(*descriptorpb.FieldOptions)(nil),   // 3: google.protobuf.FieldOptions
+	(*Document)(nil),                    // 4: openapi.v3.Document
+	(*Operation)(nil),                   // 5: openapi.v3.Operation
+	(*Schema)(nil),                      // 6: openapi.v3.Schema
+}
+var file_openapiv3_annotations_proto_depIdxs = []int32{
+	0, // 0: openapi.v3.document:extendee -> google.protobuf.FileOptions
+	1, // 1: openapi.v3.operation:extendee -> google.protobuf.MethodOptions
+	2, // 2: openapi.v3.schema:extendee -> google.protobuf.MessageOptions
+	3, // 3: openapi.v3.property:extendee -> google.protobuf.FieldOptions
+	4, // 4: openapi.v3.document:type_name -> openapi.v3.Document
+	5, // 5: openapi.v3.operation:type_name -> openapi.v3.Operation
+	6, // 6: openapi.v3.schema:type_name -> openapi.v3.Schema
+	6, // 7: openapi.v3.property:type_name -> openapi.v3.Schema
+	8, // [8:8] is the sub-list for method output_type
+	8, // [8:8] is the sub-list for method input_type
+	4, // [4:8] is the sub-list for extension type_name
+	0, // [0:4] is the sub-list for extension extendee
+	0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_openapiv3_annotations_proto_init() }
+func file_openapiv3_annotations_proto_init() {
+	if File_openapiv3_annotations_proto != nil {
+		return
+	}
+	file_openapiv3_OpenAPIv3_proto_init()
+	type x struct{}
+	out := protoimpl.TypeBuilder{
+		File: protoimpl.DescBuilder{
+			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+			RawDescriptor: file_openapiv3_annotations_proto_rawDesc,
+			NumEnums:      0,
+			NumMessages:   0,
+			NumExtensions: 4,
+			NumServices:   0,
+		},
+		GoTypes:           file_openapiv3_annotations_proto_goTypes,
+		DependencyIndexes: file_openapiv3_annotations_proto_depIdxs,
+		ExtensionInfos:    file_openapiv3_annotations_proto_extTypes,
+	}.Build()
+	File_openapiv3_annotations_proto = out.File
+	file_openapiv3_annotations_proto_rawDesc = nil
+	file_openapiv3_annotations_proto_goTypes = nil
+	file_openapiv3_annotations_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/google/gnostic-models/openapiv3/annotations.proto b/vendor/github.com/google/gnostic-models/openapiv3/annotations.proto
new file mode 100644
index 0000000000000000000000000000000000000000..09ee0aac51b43714d7e6ac972d6bc3b5d0e702d7
--- /dev/null
+++ b/vendor/github.com/google/gnostic-models/openapiv3/annotations.proto
@@ -0,0 +1,56 @@
+// Copyright 2022 Google LLC. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package openapi.v3;
+
+import "google/protobuf/descriptor.proto";
+import "openapiv3/OpenAPIv3.proto";
+
+// The Go package name.
+option go_package = "./openapiv3;openapi_v3";
+// This option lets the proto compiler generate Java code inside the package
+// name (see below) instead of inside an outer class. It creates a simpler
+// developer experience by reducing one-level of name nesting and be
+// consistent with most programming languages that don't support outer classes.
+option java_multiple_files = true;
+// The Java outer classname should be the filename in UpperCamelCase. This
+// class is only used to hold proto descriptor, so developers don't need to
+// work with it directly.
+option java_outer_classname = "AnnotationsProto";
+// The Java package name must be proto package name with proper prefix.
+option java_package = "org.openapi_v3";
+// A reasonable prefix for the Objective-C symbols generated from the package.
+// It should at a minimum be 3 characters long, all uppercase, and convention
+// is to use an abbreviation of the package name. Something short, but
+// hopefully unique enough to not conflict with things that may come along in
+// the future. 'GPB' is reserved for the protocol buffer implementation itself.
+option objc_class_prefix = "OAS";
+
+extend google.protobuf.FileOptions {
+  Document document = 1143;
+}
+
+extend google.protobuf.MethodOptions {
+  Operation operation = 1143;
+}
+
+extend google.protobuf.MessageOptions {
+  Schema schema = 1143;
+}
+
+extend google.protobuf.FieldOptions {
+  Schema property = 1143;
+}
diff --git a/vendor/github.com/gorilla/websocket/README.md b/vendor/github.com/gorilla/websocket/README.md
index 2517a28715facc76c62c39808092c03fcd108bc7..d33ed7fdd8f89da3821a20c30d416803ca2d4adf 100644
--- a/vendor/github.com/gorilla/websocket/README.md
+++ b/vendor/github.com/gorilla/websocket/README.md
@@ -7,12 +7,6 @@ Gorilla WebSocket is a [Go](http://golang.org/) implementation of the
 [WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol.
 
 
----
-
-⚠️ **[The Gorilla WebSocket Package is looking for a new maintainer](https://github.com/gorilla/websocket/issues/370)**
-
----
-
 ### Documentation
 
 * [API Reference](https://pkg.go.dev/github.com/gorilla/websocket?tab=doc)
diff --git a/vendor/github.com/gorilla/websocket/client.go b/vendor/github.com/gorilla/websocket/client.go
index 2efd83555d375b7dcb6ab19071c89eff4aa8f8ce..04fdafee18ea87bd40e981393f3405157180949f 100644
--- a/vendor/github.com/gorilla/websocket/client.go
+++ b/vendor/github.com/gorilla/websocket/client.go
@@ -9,6 +9,7 @@ import (
 	"context"
 	"crypto/tls"
 	"errors"
+	"fmt"
 	"io"
 	"io/ioutil"
 	"net"
@@ -318,14 +319,14 @@ func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader h
 	}
 
 	netConn, err := netDial("tcp", hostPort)
+	if err != nil {
+		return nil, nil, err
+	}
 	if trace != nil && trace.GotConn != nil {
 		trace.GotConn(httptrace.GotConnInfo{
 			Conn: netConn,
 		})
 	}
-	if err != nil {
-		return nil, nil, err
-	}
 
 	defer func() {
 		if netConn != nil {
@@ -370,6 +371,17 @@ func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader h
 
 	resp, err := http.ReadResponse(conn.br, req)
 	if err != nil {
+		if d.TLSClientConfig != nil {
+			for _, proto := range d.TLSClientConfig.NextProtos {
+				if proto != "http/1.1" {
+					return nil, nil, fmt.Errorf(
+						"websocket: protocol %q was given but is not supported;"+
+							"sharing tls.Config with net/http Transport can cause this error: %w",
+						proto, err,
+					)
+				}
+			}
+		}
 		return nil, nil, err
 	}
 
diff --git a/vendor/github.com/gorilla/websocket/conn.go b/vendor/github.com/gorilla/websocket/conn.go
index 331eebc85009aba4aa029ccf2e218141839d448f..5161ef81f6292b36e6819a719c42fef48219f12b 100644
--- a/vendor/github.com/gorilla/websocket/conn.go
+++ b/vendor/github.com/gorilla/websocket/conn.go
@@ -1189,8 +1189,16 @@ func (c *Conn) SetPongHandler(h func(appData string) error) {
 	c.handlePong = h
 }
 
+// NetConn returns the underlying connection that is wrapped by c.
+// Note that writing to or reading from this connection directly will corrupt the
+// WebSocket connection.
+func (c *Conn) NetConn() net.Conn {
+	return c.conn
+}
+
 // UnderlyingConn returns the internal net.Conn. This can be used to further
 // modifications to connection specific flags.
+// Deprecated: Use the NetConn method.
 func (c *Conn) UnderlyingConn() net.Conn {
 	return c.conn
 }
diff --git a/vendor/github.com/gorilla/websocket/server.go b/vendor/github.com/gorilla/websocket/server.go
index 24d53b38abededecbfca6e3d0dff02f67742faef..bb335974321331f4b5e430e286765f717769f5b1 100644
--- a/vendor/github.com/gorilla/websocket/server.go
+++ b/vendor/github.com/gorilla/websocket/server.go
@@ -154,8 +154,8 @@ func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeade
 	}
 
 	challengeKey := r.Header.Get("Sec-Websocket-Key")
-	if challengeKey == "" {
-		return u.returnError(w, r, http.StatusBadRequest, "websocket: not a websocket handshake: 'Sec-WebSocket-Key' header is missing or blank")
+	if !isValidChallengeKey(challengeKey) {
+		return u.returnError(w, r, http.StatusBadRequest, "websocket: not a websocket handshake: 'Sec-WebSocket-Key' header must be Base64 encoded value of 16-byte in length")
 	}
 
 	subprotocol := u.selectSubprotocol(r, responseHeader)
diff --git a/vendor/github.com/gorilla/websocket/util.go b/vendor/github.com/gorilla/websocket/util.go
index 7bf2f66c6747d5fd1d08e652e7df747fb229f5bd..31a5dee6462bd456a4bc2332dc5eee250bab7bcc 100644
--- a/vendor/github.com/gorilla/websocket/util.go
+++ b/vendor/github.com/gorilla/websocket/util.go
@@ -281,3 +281,18 @@ headers:
 	}
 	return result
 }
+
+// isValidChallengeKey checks if the argument meets RFC6455 specification.
+func isValidChallengeKey(s string) bool {
+	// From RFC6455:
+	//
+	// A |Sec-WebSocket-Key| header field with a base64-encoded (see
+	// Section 4 of [RFC4648]) value that, when decoded, is 16 bytes in
+	// length.
+
+	if s == "" {
+		return false
+	}
+	decoded, err := base64.StdEncoding.DecodeString(s)
+	return err == nil && len(decoded) == 16
+}
diff --git a/vendor/github.com/grafana/regexp/.gitignore b/vendor/github.com/grafana/regexp/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..66fd13c903cac02eb9657cd53fb227823484401d
--- /dev/null
+++ b/vendor/github.com/grafana/regexp/.gitignore
@@ -0,0 +1,15 @@
+# Binaries for programs and plugins
+*.exe
+*.exe~
+*.dll
+*.so
+*.dylib
+
+# Test binary, built with `go test -c`
+*.test
+
+# Output of the go coverage tool, specifically when used with LiteIDE
+*.out
+
+# Dependency directories (remove the comment below to include it)
+# vendor/
diff --git a/vendor/github.com/grafana/regexp/LICENSE b/vendor/github.com/grafana/regexp/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..6a66aea5eafe0ca6a688840c47219556c552488e
--- /dev/null
+++ b/vendor/github.com/grafana/regexp/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/grafana/regexp/README.md b/vendor/github.com/grafana/regexp/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..756e60dcfdb8292d43d7912cf54b8c97990f4f15
--- /dev/null
+++ b/vendor/github.com/grafana/regexp/README.md
@@ -0,0 +1,12 @@
+# Grafana Go regexp package
+This repo is a fork of the upstream Go `regexp` package, with some code optimisations to make it run faster.
+
+All the optimisations have been submitted upstream, but not yet merged.
+
+All semantics are the same, and the optimised code passes all tests from upstream.
+
+The `main` branch is non-optimised: switch over to [`speedup`](https://github.com/grafana/regexp/tree/speedup) branch for the improved code.
+
+## Benchmarks:
+
+![image](https://user-images.githubusercontent.com/8125524/152182951-856549ed-6044-4285-b799-69b31f598e32.png)
diff --git a/vendor/github.com/grafana/regexp/backtrack.go b/vendor/github.com/grafana/regexp/backtrack.go
new file mode 100644
index 0000000000000000000000000000000000000000..7c37c66a80c787e0ed1ea4ccc886eecb9df05d2d
--- /dev/null
+++ b/vendor/github.com/grafana/regexp/backtrack.go
@@ -0,0 +1,365 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// backtrack is a regular expression search with submatch
+// tracking for small regular expressions and texts. It allocates
+// a bit vector with (length of input) * (length of prog) bits,
+// to make sure it never explores the same (character position, instruction)
+// state multiple times. This limits the search to run in time linear in
+// the length of the test.
+//
+// backtrack is a fast replacement for the NFA code on small
+// regexps when onepass cannot be used.
+
+package regexp
+
+import (
+	"regexp/syntax"
+	"sync"
+)
+
+// A job is an entry on the backtracker's job stack. It holds
+// the instruction pc and the position in the input.
+type job struct {
+	pc  uint32
+	arg bool
+	pos int
+}
+
+const (
+	visitedBits        = 32
+	maxBacktrackProg   = 500        // len(prog.Inst) <= max
+	maxBacktrackVector = 256 * 1024 // bit vector size <= max (bits)
+)
+
+// bitState holds state for the backtracker.
+type bitState struct {
+	end      int
+	cap      []int
+	matchcap []int
+	jobs     []job
+	visited  []uint32
+
+	inputs inputs
+}
+
+var bitStatePool sync.Pool
+
+func newBitState() *bitState {
+	b, ok := bitStatePool.Get().(*bitState)
+	if !ok {
+		b = new(bitState)
+	}
+	return b
+}
+
+func freeBitState(b *bitState) {
+	b.inputs.clear()
+	bitStatePool.Put(b)
+}
+
+// maxBitStateLen returns the maximum length of a string to search with
+// the backtracker using prog.
+func maxBitStateLen(prog *syntax.Prog) int {
+	if !shouldBacktrack(prog) {
+		return 0
+	}
+	return maxBacktrackVector / len(prog.Inst)
+}
+
+// shouldBacktrack reports whether the program is too
+// long for the backtracker to run.
+func shouldBacktrack(prog *syntax.Prog) bool {
+	return len(prog.Inst) <= maxBacktrackProg
+}
+
+// reset resets the state of the backtracker.
+// end is the end position in the input.
+// ncap is the number of captures.
+func (b *bitState) reset(prog *syntax.Prog, end int, ncap int) {
+	b.end = end
+
+	if cap(b.jobs) == 0 {
+		b.jobs = make([]job, 0, 256)
+	} else {
+		b.jobs = b.jobs[:0]
+	}
+
+	visitedSize := (len(prog.Inst)*(end+1) + visitedBits - 1) / visitedBits
+	if cap(b.visited) < visitedSize {
+		b.visited = make([]uint32, visitedSize, maxBacktrackVector/visitedBits)
+	} else {
+		b.visited = b.visited[:visitedSize]
+		clear(b.visited) // set to 0
+	}
+
+	if cap(b.cap) < ncap {
+		b.cap = make([]int, ncap)
+	} else {
+		b.cap = b.cap[:ncap]
+	}
+	for i := range b.cap {
+		b.cap[i] = -1
+	}
+
+	if cap(b.matchcap) < ncap {
+		b.matchcap = make([]int, ncap)
+	} else {
+		b.matchcap = b.matchcap[:ncap]
+	}
+	for i := range b.matchcap {
+		b.matchcap[i] = -1
+	}
+}
+
+// shouldVisit reports whether the combination of (pc, pos) has not
+// been visited yet.
+func (b *bitState) shouldVisit(pc uint32, pos int) bool {
+	n := uint(int(pc)*(b.end+1) + pos)
+	if b.visited[n/visitedBits]&(1<<(n&(visitedBits-1))) != 0 {
+		return false
+	}
+	b.visited[n/visitedBits] |= 1 << (n & (visitedBits - 1))
+	return true
+}
+
+// push pushes (pc, pos, arg) onto the job stack if it should be
+// visited.
+func (b *bitState) push(re *Regexp, pc uint32, pos int, arg bool) {
+	// Only check shouldVisit when arg is false.
+	// When arg is true, we are continuing a previous visit.
+	if re.prog.Inst[pc].Op != syntax.InstFail && (arg || b.shouldVisit(pc, pos)) {
+		b.jobs = append(b.jobs, job{pc: pc, arg: arg, pos: pos})
+	}
+}
+
+// tryBacktrack runs a backtracking search starting at pos.
+func (re *Regexp) tryBacktrack(b *bitState, i input, pc uint32, pos int) bool {
+	longest := re.longest
+
+	b.push(re, pc, pos, false)
+	for len(b.jobs) > 0 {
+		l := len(b.jobs) - 1
+		// Pop job off the stack.
+		pc := b.jobs[l].pc
+		pos := b.jobs[l].pos
+		arg := b.jobs[l].arg
+		b.jobs = b.jobs[:l]
+
+		// Optimization: rather than push and pop,
+		// code that is going to Push and continue
+		// the loop simply updates ip, p, and arg
+		// and jumps to CheckAndLoop. We have to
+		// do the ShouldVisit check that Push
+		// would have, but we avoid the stack
+		// manipulation.
+		goto Skip
+	CheckAndLoop:
+		if !b.shouldVisit(pc, pos) {
+			continue
+		}
+	Skip:
+
+		inst := &re.prog.Inst[pc]
+
+		switch inst.Op {
+		default:
+			panic("bad inst")
+		case syntax.InstFail:
+			panic("unexpected InstFail")
+		case syntax.InstAlt:
+			// Cannot just
+			//   b.push(inst.Out, pos, false)
+			//   b.push(inst.Arg, pos, false)
+			// If during the processing of inst.Out, we encounter
+			// inst.Arg via another path, we want to process it then.
+			// Pushing it here will inhibit that. Instead, re-push
+			// inst with arg==true as a reminder to push inst.Arg out
+			// later.
+			if arg {
+				// Finished inst.Out; try inst.Arg.
+				arg = false
+				pc = inst.Arg
+				goto CheckAndLoop
+			} else {
+				b.push(re, pc, pos, true)
+				pc = inst.Out
+				goto CheckAndLoop
+			}
+
+		case syntax.InstAltMatch:
+			// One opcode consumes runes; the other leads to match.
+			switch re.prog.Inst[inst.Out].Op {
+			case syntax.InstRune, syntax.InstRune1, syntax.InstRuneAny, syntax.InstRuneAnyNotNL:
+				// inst.Arg is the match.
+				b.push(re, inst.Arg, pos, false)
+				pc = inst.Arg
+				pos = b.end
+				goto CheckAndLoop
+			}
+			// inst.Out is the match - non-greedy
+			b.push(re, inst.Out, b.end, false)
+			pc = inst.Out
+			goto CheckAndLoop
+
+		case syntax.InstRune:
+			r, width := i.step(pos)
+			if !inst.MatchRune(r) {
+				continue
+			}
+			pos += width
+			pc = inst.Out
+			goto CheckAndLoop
+
+		case syntax.InstRune1:
+			r, width := i.step(pos)
+			if r != inst.Rune[0] {
+				continue
+			}
+			pos += width
+			pc = inst.Out
+			goto CheckAndLoop
+
+		case syntax.InstRuneAnyNotNL:
+			r, width := i.step(pos)
+			if r == '\n' || r == endOfText {
+				continue
+			}
+			pos += width
+			pc = inst.Out
+			goto CheckAndLoop
+
+		case syntax.InstRuneAny:
+			r, width := i.step(pos)
+			if r == endOfText {
+				continue
+			}
+			pos += width
+			pc = inst.Out
+			goto CheckAndLoop
+
+		case syntax.InstCapture:
+			if arg {
+				// Finished inst.Out; restore the old value.
+				b.cap[inst.Arg] = pos
+				continue
+			} else {
+				if inst.Arg < uint32(len(b.cap)) {
+					// Capture pos to register, but save old value.
+					b.push(re, pc, b.cap[inst.Arg], true) // come back when we're done.
+					b.cap[inst.Arg] = pos
+				}
+				pc = inst.Out
+				goto CheckAndLoop
+			}
+
+		case syntax.InstEmptyWidth:
+			flag := i.context(pos)
+			if !flag.match(syntax.EmptyOp(inst.Arg)) {
+				continue
+			}
+			pc = inst.Out
+			goto CheckAndLoop
+
+		case syntax.InstNop:
+			pc = inst.Out
+			goto CheckAndLoop
+
+		case syntax.InstMatch:
+			// We found a match. If the caller doesn't care
+			// where the match is, no point going further.
+			if len(b.cap) == 0 {
+				return true
+			}
+
+			// Record best match so far.
+			// Only need to check end point, because this entire
+			// call is only considering one start position.
+			if len(b.cap) > 1 {
+				b.cap[1] = pos
+			}
+			if old := b.matchcap[1]; old == -1 || (longest && pos > 0 && pos > old) {
+				copy(b.matchcap, b.cap)
+			}
+
+			// If going for first match, we're done.
+			if !longest {
+				return true
+			}
+
+			// If we used the entire text, no longer match is possible.
+			if pos == b.end {
+				return true
+			}
+
+			// Otherwise, continue on in hope of a longer match.
+			continue
+		}
+	}
+
+	return longest && len(b.matchcap) > 1 && b.matchcap[1] >= 0
+}
+
+// backtrack runs a backtracking search of prog on the input starting at pos.
+func (re *Regexp) backtrack(ib []byte, is string, pos int, ncap int, dstCap []int) []int {
+	startCond := re.cond
+	if startCond == ^syntax.EmptyOp(0) { // impossible
+		return nil
+	}
+	if startCond&syntax.EmptyBeginText != 0 && pos != 0 {
+		// Anchored match, past beginning of text.
+		return nil
+	}
+
+	b := newBitState()
+	i, end := b.inputs.init(nil, ib, is)
+	b.reset(re.prog, end, ncap)
+
+	// Anchored search must start at the beginning of the input
+	if startCond&syntax.EmptyBeginText != 0 {
+		if len(b.cap) > 0 {
+			b.cap[0] = pos
+		}
+		if !re.tryBacktrack(b, i, uint32(re.prog.Start), pos) {
+			freeBitState(b)
+			return nil
+		}
+	} else {
+
+		// Unanchored search, starting from each possible text position.
+		// Notice that we have to try the empty string at the end of
+		// the text, so the loop condition is pos <= end, not pos < end.
+		// This looks like it's quadratic in the size of the text,
+		// but we are not clearing visited between calls to TrySearch,
+		// so no work is duplicated and it ends up still being linear.
+		width := -1
+		for ; pos <= end && width != 0; pos += width {
+			if len(re.prefix) > 0 {
+				// Match requires literal prefix; fast search for it.
+				advance := i.index(re, pos)
+				if advance < 0 {
+					freeBitState(b)
+					return nil
+				}
+				pos += advance
+			}
+
+			if len(b.cap) > 0 {
+				b.cap[0] = pos
+			}
+			if re.tryBacktrack(b, i, uint32(re.prog.Start), pos) {
+				// Match must be leftmost; done.
+				goto Match
+			}
+			_, width = i.step(pos)
+		}
+		freeBitState(b)
+		return nil
+	}
+
+Match:
+	dstCap = append(dstCap, b.matchcap...)
+	freeBitState(b)
+	return dstCap
+}
diff --git a/vendor/github.com/grafana/regexp/exec.go b/vendor/github.com/grafana/regexp/exec.go
new file mode 100644
index 0000000000000000000000000000000000000000..3fc4b684febd6034230df4a4c00a082961c7d0c9
--- /dev/null
+++ b/vendor/github.com/grafana/regexp/exec.go
@@ -0,0 +1,554 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package regexp
+
+import (
+	"io"
+	"regexp/syntax"
+	"sync"
+)
+
+// A queue is a 'sparse array' holding pending threads of execution.
+// See https://research.swtch.com/2008/03/using-uninitialized-memory-for-fun-and.html
+type queue struct {
+	sparse []uint32
+	dense  []entry
+}
+
+// An entry is an entry on a queue.
+// It holds both the instruction pc and the actual thread.
+// Some queue entries are just place holders so that the machine
+// knows it has considered that pc. Such entries have t == nil.
+type entry struct {
+	pc uint32
+	t  *thread
+}
+
+// A thread is the state of a single path through the machine:
+// an instruction and a corresponding capture array.
+// See https://swtch.com/~rsc/regexp/regexp2.html
+type thread struct {
+	inst *syntax.Inst
+	cap  []int
+}
+
+// A machine holds all the state during an NFA simulation for p.
+type machine struct {
+	re       *Regexp      // corresponding Regexp
+	p        *syntax.Prog // compiled program
+	q0, q1   queue        // two queues for runq, nextq
+	pool     []*thread    // pool of available threads
+	matched  bool         // whether a match was found
+	matchcap []int        // capture information for the match
+
+	inputs inputs
+}
+
+type inputs struct {
+	// cached inputs, to avoid allocation
+	bytes  inputBytes
+	string inputString
+	reader inputReader
+}
+
+func (i *inputs) newBytes(b []byte) input {
+	i.bytes.str = b
+	return &i.bytes
+}
+
+func (i *inputs) newString(s string) input {
+	i.string.str = s
+	return &i.string
+}
+
+func (i *inputs) newReader(r io.RuneReader) input {
+	i.reader.r = r
+	i.reader.atEOT = false
+	i.reader.pos = 0
+	return &i.reader
+}
+
+func (i *inputs) clear() {
+	// We need to clear 1 of these.
+	// Avoid the expense of clearing the others (pointer write barrier).
+	if i.bytes.str != nil {
+		i.bytes.str = nil
+	} else if i.reader.r != nil {
+		i.reader.r = nil
+	} else {
+		i.string.str = ""
+	}
+}
+
+func (i *inputs) init(r io.RuneReader, b []byte, s string) (input, int) {
+	if r != nil {
+		return i.newReader(r), 0
+	}
+	if b != nil {
+		return i.newBytes(b), len(b)
+	}
+	return i.newString(s), len(s)
+}
+
+func (m *machine) init(ncap int) {
+	for _, t := range m.pool {
+		t.cap = t.cap[:ncap]
+	}
+	m.matchcap = m.matchcap[:ncap]
+}
+
+// alloc allocates a new thread with the given instruction.
+// It uses the free pool if possible.
+func (m *machine) alloc(i *syntax.Inst) *thread {
+	var t *thread
+	if n := len(m.pool); n > 0 {
+		t = m.pool[n-1]
+		m.pool = m.pool[:n-1]
+	} else {
+		t = new(thread)
+		t.cap = make([]int, len(m.matchcap), cap(m.matchcap))
+	}
+	t.inst = i
+	return t
+}
+
+// A lazyFlag is a lazily-evaluated syntax.EmptyOp,
+// for checking zero-width flags like ^ $ \A \z \B \b.
+// It records the pair of relevant runes and does not
+// determine the implied flags until absolutely necessary
+// (most of the time, that means never).
+type lazyFlag uint64
+
+func newLazyFlag(r1, r2 rune) lazyFlag {
+	return lazyFlag(uint64(r1)<<32 | uint64(uint32(r2)))
+}
+
+func (f lazyFlag) match(op syntax.EmptyOp) bool {
+	if op == 0 {
+		return true
+	}
+	r1 := rune(f >> 32)
+	if op&syntax.EmptyBeginLine != 0 {
+		if r1 != '\n' && r1 >= 0 {
+			return false
+		}
+		op &^= syntax.EmptyBeginLine
+	}
+	if op&syntax.EmptyBeginText != 0 {
+		if r1 >= 0 {
+			return false
+		}
+		op &^= syntax.EmptyBeginText
+	}
+	if op == 0 {
+		return true
+	}
+	r2 := rune(f)
+	if op&syntax.EmptyEndLine != 0 {
+		if r2 != '\n' && r2 >= 0 {
+			return false
+		}
+		op &^= syntax.EmptyEndLine
+	}
+	if op&syntax.EmptyEndText != 0 {
+		if r2 >= 0 {
+			return false
+		}
+		op &^= syntax.EmptyEndText
+	}
+	if op == 0 {
+		return true
+	}
+	if syntax.IsWordChar(r1) != syntax.IsWordChar(r2) {
+		op &^= syntax.EmptyWordBoundary
+	} else {
+		op &^= syntax.EmptyNoWordBoundary
+	}
+	return op == 0
+}
+
+// match runs the machine over the input starting at pos.
+// It reports whether a match was found.
+// If so, m.matchcap holds the submatch information.
+func (m *machine) match(i input, pos int) bool {
+	startCond := m.re.cond
+	if startCond == ^syntax.EmptyOp(0) { // impossible
+		return false
+	}
+	m.matched = false
+	for i := range m.matchcap {
+		m.matchcap[i] = -1
+	}
+	runq, nextq := &m.q0, &m.q1
+	r, r1 := endOfText, endOfText
+	width, width1 := 0, 0
+	r, width = i.step(pos)
+	if r != endOfText {
+		r1, width1 = i.step(pos + width)
+	}
+	var flag lazyFlag
+	if pos == 0 {
+		flag = newLazyFlag(-1, r)
+	} else {
+		flag = i.context(pos)
+	}
+	for {
+		if len(runq.dense) == 0 {
+			if startCond&syntax.EmptyBeginText != 0 && pos != 0 {
+				// Anchored match, past beginning of text.
+				break
+			}
+			if m.matched {
+				// Have match; finished exploring alternatives.
+				break
+			}
+			if len(m.re.prefix) > 0 && r1 != m.re.prefixRune && i.canCheckPrefix() {
+				// Match requires literal prefix; fast search for it.
+				advance := i.index(m.re, pos)
+				if advance < 0 {
+					break
+				}
+				pos += advance
+				r, width = i.step(pos)
+				r1, width1 = i.step(pos + width)
+			}
+		}
+		if !m.matched {
+			if len(m.matchcap) > 0 {
+				m.matchcap[0] = pos
+			}
+			m.add(runq, uint32(m.p.Start), pos, m.matchcap, &flag, nil)
+		}
+		flag = newLazyFlag(r, r1)
+		m.step(runq, nextq, pos, pos+width, r, &flag)
+		if width == 0 {
+			break
+		}
+		if len(m.matchcap) == 0 && m.matched {
+			// Found a match and not paying attention
+			// to where it is, so any match will do.
+			break
+		}
+		pos += width
+		r, width = r1, width1
+		if r != endOfText {
+			r1, width1 = i.step(pos + width)
+		}
+		runq, nextq = nextq, runq
+	}
+	m.clear(nextq)
+	return m.matched
+}
+
+// clear frees all threads on the thread queue.
+func (m *machine) clear(q *queue) {
+	for _, d := range q.dense {
+		if d.t != nil {
+			m.pool = append(m.pool, d.t)
+		}
+	}
+	q.dense = q.dense[:0]
+}
+
+// step executes one step of the machine, running each of the threads
+// on runq and appending new threads to nextq.
+// The step processes the rune c (which may be endOfText),
+// which starts at position pos and ends at nextPos.
+// nextCond gives the setting for the empty-width flags after c.
+func (m *machine) step(runq, nextq *queue, pos, nextPos int, c rune, nextCond *lazyFlag) {
+	longest := m.re.longest
+	for j := 0; j < len(runq.dense); j++ {
+		d := &runq.dense[j]
+		t := d.t
+		if t == nil {
+			continue
+		}
+		if longest && m.matched && len(t.cap) > 0 && m.matchcap[0] < t.cap[0] {
+			m.pool = append(m.pool, t)
+			continue
+		}
+		i := t.inst
+		add := false
+		switch i.Op {
+		default:
+			panic("bad inst")
+
+		case syntax.InstMatch:
+			if len(t.cap) > 0 && (!longest || !m.matched || m.matchcap[1] < pos) {
+				t.cap[1] = pos
+				copy(m.matchcap, t.cap)
+			}
+			if !longest {
+				// First-match mode: cut off all lower-priority threads.
+				for _, d := range runq.dense[j+1:] {
+					if d.t != nil {
+						m.pool = append(m.pool, d.t)
+					}
+				}
+				runq.dense = runq.dense[:0]
+			}
+			m.matched = true
+
+		case syntax.InstRune:
+			add = i.MatchRune(c)
+		case syntax.InstRune1:
+			add = c == i.Rune[0]
+		case syntax.InstRuneAny:
+			add = true
+		case syntax.InstRuneAnyNotNL:
+			add = c != '\n'
+		}
+		if add {
+			t = m.add(nextq, i.Out, nextPos, t.cap, nextCond, t)
+		}
+		if t != nil {
+			m.pool = append(m.pool, t)
+		}
+	}
+	runq.dense = runq.dense[:0]
+}
+
+// add adds an entry to q for pc, unless the q already has such an entry.
+// It also recursively adds an entry for all instructions reachable from pc by following
+// empty-width conditions satisfied by cond.  pos gives the current position
+// in the input.
+func (m *machine) add(q *queue, pc uint32, pos int, cap []int, cond *lazyFlag, t *thread) *thread {
+Again:
+	if pc == 0 {
+		return t
+	}
+	if j := q.sparse[pc]; j < uint32(len(q.dense)) && q.dense[j].pc == pc {
+		return t
+	}
+
+	j := len(q.dense)
+	q.dense = q.dense[:j+1]
+	d := &q.dense[j]
+	d.t = nil
+	d.pc = pc
+	q.sparse[pc] = uint32(j)
+
+	i := &m.p.Inst[pc]
+	switch i.Op {
+	default:
+		panic("unhandled")
+	case syntax.InstFail:
+		// nothing
+	case syntax.InstAlt, syntax.InstAltMatch:
+		t = m.add(q, i.Out, pos, cap, cond, t)
+		pc = i.Arg
+		goto Again
+	case syntax.InstEmptyWidth:
+		if cond.match(syntax.EmptyOp(i.Arg)) {
+			pc = i.Out
+			goto Again
+		}
+	case syntax.InstNop:
+		pc = i.Out
+		goto Again
+	case syntax.InstCapture:
+		if int(i.Arg) < len(cap) {
+			opos := cap[i.Arg]
+			cap[i.Arg] = pos
+			m.add(q, i.Out, pos, cap, cond, nil)
+			cap[i.Arg] = opos
+		} else {
+			pc = i.Out
+			goto Again
+		}
+	case syntax.InstMatch, syntax.InstRune, syntax.InstRune1, syntax.InstRuneAny, syntax.InstRuneAnyNotNL:
+		if t == nil {
+			t = m.alloc(i)
+		} else {
+			t.inst = i
+		}
+		if len(cap) > 0 && &t.cap[0] != &cap[0] {
+			copy(t.cap, cap)
+		}
+		d.t = t
+		t = nil
+	}
+	return t
+}
+
+type onePassMachine struct {
+	inputs   inputs
+	matchcap []int
+}
+
+var onePassPool sync.Pool
+
+func newOnePassMachine() *onePassMachine {
+	m, ok := onePassPool.Get().(*onePassMachine)
+	if !ok {
+		m = new(onePassMachine)
+	}
+	return m
+}
+
+func freeOnePassMachine(m *onePassMachine) {
+	m.inputs.clear()
+	onePassPool.Put(m)
+}
+
+// doOnePass implements r.doExecute using the one-pass execution engine.
+func (re *Regexp) doOnePass(ir io.RuneReader, ib []byte, is string, pos, ncap int, dstCap []int) []int {
+	startCond := re.cond
+	if startCond == ^syntax.EmptyOp(0) { // impossible
+		return nil
+	}
+
+	m := newOnePassMachine()
+	if cap(m.matchcap) < ncap {
+		m.matchcap = make([]int, ncap)
+	} else {
+		m.matchcap = m.matchcap[:ncap]
+	}
+
+	matched := false
+	for i := range m.matchcap {
+		m.matchcap[i] = -1
+	}
+
+	i, _ := m.inputs.init(ir, ib, is)
+
+	r, r1 := endOfText, endOfText
+	width, width1 := 0, 0
+	r, width = i.step(pos)
+	if r != endOfText {
+		r1, width1 = i.step(pos + width)
+	}
+	var flag lazyFlag
+	if pos == 0 {
+		flag = newLazyFlag(-1, r)
+	} else {
+		flag = i.context(pos)
+	}
+	pc := re.onepass.Start
+	inst := &re.onepass.Inst[pc]
+	// If there is a simple literal prefix, skip over it.
+	if pos == 0 && flag.match(syntax.EmptyOp(inst.Arg)) &&
+		len(re.prefix) > 0 && i.canCheckPrefix() {
+		// Match requires literal prefix; fast search for it.
+		if !i.hasPrefix(re) {
+			goto Return
+		}
+		pos += len(re.prefix)
+		r, width = i.step(pos)
+		r1, width1 = i.step(pos + width)
+		flag = i.context(pos)
+		pc = int(re.prefixEnd)
+	}
+	for {
+		inst = &re.onepass.Inst[pc]
+		pc = int(inst.Out)
+		switch inst.Op {
+		default:
+			panic("bad inst")
+		case syntax.InstMatch:
+			matched = true
+			if len(m.matchcap) > 0 {
+				m.matchcap[0] = 0
+				m.matchcap[1] = pos
+			}
+			goto Return
+		case syntax.InstRune:
+			if !inst.MatchRune(r) {
+				goto Return
+			}
+		case syntax.InstRune1:
+			if r != inst.Rune[0] {
+				goto Return
+			}
+		case syntax.InstRuneAny:
+			// Nothing
+		case syntax.InstRuneAnyNotNL:
+			if r == '\n' {
+				goto Return
+			}
+		// peek at the input rune to see which branch of the Alt to take
+		case syntax.InstAlt, syntax.InstAltMatch:
+			pc = int(onePassNext(inst, r))
+			continue
+		case syntax.InstFail:
+			goto Return
+		case syntax.InstNop:
+			continue
+		case syntax.InstEmptyWidth:
+			if !flag.match(syntax.EmptyOp(inst.Arg)) {
+				goto Return
+			}
+			continue
+		case syntax.InstCapture:
+			if int(inst.Arg) < len(m.matchcap) {
+				m.matchcap[inst.Arg] = pos
+			}
+			continue
+		}
+		if width == 0 {
+			break
+		}
+		flag = newLazyFlag(r, r1)
+		pos += width
+		r, width = r1, width1
+		if r != endOfText {
+			r1, width1 = i.step(pos + width)
+		}
+	}
+
+Return:
+	if !matched {
+		freeOnePassMachine(m)
+		return nil
+	}
+
+	dstCap = append(dstCap, m.matchcap...)
+	freeOnePassMachine(m)
+	return dstCap
+}
+
+// doMatch reports whether either r, b or s match the regexp.
+func (re *Regexp) doMatch(r io.RuneReader, b []byte, s string) bool {
+	return re.doExecute(r, b, s, 0, 0, nil) != nil
+}
+
+// doExecute finds the leftmost match in the input, appends the position
+// of its subexpressions to dstCap and returns dstCap.
+//
+// nil is returned if no matches are found and non-nil if matches are found.
+func (re *Regexp) doExecute(r io.RuneReader, b []byte, s string, pos int, ncap int, dstCap []int) []int {
+	if dstCap == nil {
+		// Make sure 'return dstCap' is non-nil.
+		dstCap = arrayNoInts[:0:0]
+	}
+
+	if r == nil && len(b)+len(s) < re.minInputLen {
+		return nil
+	}
+
+	if re.onepass != nil {
+		return re.doOnePass(r, b, s, pos, ncap, dstCap)
+	}
+	if r == nil && len(b)+len(s) < re.maxBitStateLen {
+		return re.backtrack(b, s, pos, ncap, dstCap)
+	}
+
+	m := re.get()
+	i, _ := m.inputs.init(r, b, s)
+
+	m.init(ncap)
+	if !m.match(i, pos) {
+		re.put(m)
+		return nil
+	}
+
+	dstCap = append(dstCap, m.matchcap...)
+	re.put(m)
+	return dstCap
+}
+
+// arrayNoInts is returned by doExecute match if nil dstCap is passed
+// to it with ncap=0.
+var arrayNoInts [0]int
diff --git a/vendor/github.com/grafana/regexp/onepass.go b/vendor/github.com/grafana/regexp/onepass.go
new file mode 100644
index 0000000000000000000000000000000000000000..53cbd958394120f513e75a0790c65e619b87c139
--- /dev/null
+++ b/vendor/github.com/grafana/regexp/onepass.go
@@ -0,0 +1,500 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package regexp
+
+import (
+	"regexp/syntax"
+	"slices"
+	"strings"
+	"unicode"
+	"unicode/utf8"
+)
+
+// "One-pass" regexp execution.
+// Some regexps can be analyzed to determine that they never need
+// backtracking: they are guaranteed to run in one pass over the string
+// without bothering to save all the usual NFA state.
+// Detect those and execute them more quickly.
+
+// A onePassProg is a compiled one-pass regular expression program.
+// It is the same as syntax.Prog except for the use of onePassInst.
+type onePassProg struct {
+	Inst   []onePassInst
+	Start  int // index of start instruction
+	NumCap int // number of InstCapture insts in re
+}
+
+// A onePassInst is a single instruction in a one-pass regular expression program.
+// It is the same as syntax.Inst except for the new 'Next' field.
+type onePassInst struct {
+	syntax.Inst
+	Next []uint32
+}
+
+// onePassPrefix returns a literal string that all matches for the
+// regexp must start with. Complete is true if the prefix
+// is the entire match. Pc is the index of the last rune instruction
+// in the string. The onePassPrefix skips over the mandatory
+// EmptyBeginText.
+func onePassPrefix(p *syntax.Prog) (prefix string, complete bool, pc uint32) {
+	i := &p.Inst[p.Start]
+	if i.Op != syntax.InstEmptyWidth || (syntax.EmptyOp(i.Arg))&syntax.EmptyBeginText == 0 {
+		return "", i.Op == syntax.InstMatch, uint32(p.Start)
+	}
+	pc = i.Out
+	i = &p.Inst[pc]
+	for i.Op == syntax.InstNop {
+		pc = i.Out
+		i = &p.Inst[pc]
+	}
+	// Avoid allocation of buffer if prefix is empty.
+	if iop(i) != syntax.InstRune || len(i.Rune) != 1 {
+		return "", i.Op == syntax.InstMatch, uint32(p.Start)
+	}
+
+	// Have prefix; gather characters.
+	var buf strings.Builder
+	for iop(i) == syntax.InstRune && len(i.Rune) == 1 && syntax.Flags(i.Arg)&syntax.FoldCase == 0 && i.Rune[0] != utf8.RuneError {
+		buf.WriteRune(i.Rune[0])
+		pc, i = i.Out, &p.Inst[i.Out]
+	}
+	if i.Op == syntax.InstEmptyWidth &&
+		syntax.EmptyOp(i.Arg)&syntax.EmptyEndText != 0 &&
+		p.Inst[i.Out].Op == syntax.InstMatch {
+		complete = true
+	}
+	return buf.String(), complete, pc
+}
+
+// onePassNext selects the next actionable state of the prog, based on the input character.
+// It should only be called when i.Op == InstAlt or InstAltMatch, and from the one-pass machine.
+// One of the alternates may ultimately lead without input to end of line. If the instruction
+// is InstAltMatch the path to the InstMatch is in i.Out, the normal node in i.Next.
+func onePassNext(i *onePassInst, r rune) uint32 {
+	next := i.MatchRunePos(r)
+	if next >= 0 {
+		return i.Next[next]
+	}
+	if i.Op == syntax.InstAltMatch {
+		return i.Out
+	}
+	return 0
+}
+
+func iop(i *syntax.Inst) syntax.InstOp {
+	op := i.Op
+	switch op {
+	case syntax.InstRune1, syntax.InstRuneAny, syntax.InstRuneAnyNotNL:
+		op = syntax.InstRune
+	}
+	return op
+}
+
+// Sparse Array implementation is used as a queueOnePass.
+type queueOnePass struct {
+	sparse          []uint32
+	dense           []uint32
+	size, nextIndex uint32
+}
+
+func (q *queueOnePass) empty() bool {
+	return q.nextIndex >= q.size
+}
+
+func (q *queueOnePass) next() (n uint32) {
+	n = q.dense[q.nextIndex]
+	q.nextIndex++
+	return
+}
+
+func (q *queueOnePass) clear() {
+	q.size = 0
+	q.nextIndex = 0
+}
+
+func (q *queueOnePass) contains(u uint32) bool {
+	if u >= uint32(len(q.sparse)) {
+		return false
+	}
+	return q.sparse[u] < q.size && q.dense[q.sparse[u]] == u
+}
+
+func (q *queueOnePass) insert(u uint32) {
+	if !q.contains(u) {
+		q.insertNew(u)
+	}
+}
+
+func (q *queueOnePass) insertNew(u uint32) {
+	if u >= uint32(len(q.sparse)) {
+		return
+	}
+	q.sparse[u] = q.size
+	q.dense[q.size] = u
+	q.size++
+}
+
+func newQueue(size int) (q *queueOnePass) {
+	return &queueOnePass{
+		sparse: make([]uint32, size),
+		dense:  make([]uint32, size),
+	}
+}
+
+// mergeRuneSets merges two non-intersecting runesets, and returns the merged result,
+// and a NextIp array. The idea is that if a rune matches the OnePassRunes at index
+// i, NextIp[i/2] is the target. If the input sets intersect, an empty runeset and a
+// NextIp array with the single element mergeFailed is returned.
+// The code assumes that both inputs contain ordered and non-intersecting rune pairs.
+const mergeFailed = uint32(0xffffffff)
+
+var (
+	noRune = []rune{}
+	noNext = []uint32{mergeFailed}
+)
+
+func mergeRuneSets(leftRunes, rightRunes *[]rune, leftPC, rightPC uint32) ([]rune, []uint32) {
+	leftLen := len(*leftRunes)
+	rightLen := len(*rightRunes)
+	if leftLen&0x1 != 0 || rightLen&0x1 != 0 {
+		panic("mergeRuneSets odd length []rune")
+	}
+	var (
+		lx, rx int
+	)
+	merged := make([]rune, 0)
+	next := make([]uint32, 0)
+	ok := true
+	defer func() {
+		if !ok {
+			merged = nil
+			next = nil
+		}
+	}()
+
+	ix := -1
+	extend := func(newLow *int, newArray *[]rune, pc uint32) bool {
+		if ix > 0 && (*newArray)[*newLow] <= merged[ix] {
+			return false
+		}
+		merged = append(merged, (*newArray)[*newLow], (*newArray)[*newLow+1])
+		*newLow += 2
+		ix += 2
+		next = append(next, pc)
+		return true
+	}
+
+	for lx < leftLen || rx < rightLen {
+		switch {
+		case rx >= rightLen:
+			ok = extend(&lx, leftRunes, leftPC)
+		case lx >= leftLen:
+			ok = extend(&rx, rightRunes, rightPC)
+		case (*rightRunes)[rx] < (*leftRunes)[lx]:
+			ok = extend(&rx, rightRunes, rightPC)
+		default:
+			ok = extend(&lx, leftRunes, leftPC)
+		}
+		if !ok {
+			return noRune, noNext
+		}
+	}
+	return merged, next
+}
+
+// cleanupOnePass drops working memory, and restores certain shortcut instructions.
+func cleanupOnePass(prog *onePassProg, original *syntax.Prog) {
+	for ix, instOriginal := range original.Inst {
+		switch instOriginal.Op {
+		case syntax.InstAlt, syntax.InstAltMatch, syntax.InstRune:
+		case syntax.InstCapture, syntax.InstEmptyWidth, syntax.InstNop, syntax.InstMatch, syntax.InstFail:
+			prog.Inst[ix].Next = nil
+		case syntax.InstRune1, syntax.InstRuneAny, syntax.InstRuneAnyNotNL:
+			prog.Inst[ix].Next = nil
+			prog.Inst[ix] = onePassInst{Inst: instOriginal}
+		}
+	}
+}
+
+// onePassCopy creates a copy of the original Prog, as we'll be modifying it.
+func onePassCopy(prog *syntax.Prog) *onePassProg {
+	p := &onePassProg{
+		Start:  prog.Start,
+		NumCap: prog.NumCap,
+		Inst:   make([]onePassInst, len(prog.Inst)),
+	}
+	for i, inst := range prog.Inst {
+		p.Inst[i] = onePassInst{Inst: inst}
+	}
+
+	// rewrites one or more common Prog constructs that enable some otherwise
+	// non-onepass Progs to be onepass. A:BD (for example) means an InstAlt at
+	// ip A, that points to ips B & C.
+	// A:BC + B:DA => A:BC + B:CD
+	// A:BC + B:DC => A:DC + B:DC
+	for pc := range p.Inst {
+		switch p.Inst[pc].Op {
+		default:
+			continue
+		case syntax.InstAlt, syntax.InstAltMatch:
+			// A:Bx + B:Ay
+			p_A_Other := &p.Inst[pc].Out
+			p_A_Alt := &p.Inst[pc].Arg
+			// make sure a target is another Alt
+			instAlt := p.Inst[*p_A_Alt]
+			if !(instAlt.Op == syntax.InstAlt || instAlt.Op == syntax.InstAltMatch) {
+				p_A_Alt, p_A_Other = p_A_Other, p_A_Alt
+				instAlt = p.Inst[*p_A_Alt]
+				if !(instAlt.Op == syntax.InstAlt || instAlt.Op == syntax.InstAltMatch) {
+					continue
+				}
+			}
+			instOther := p.Inst[*p_A_Other]
+			// Analyzing both legs pointing to Alts is for another day
+			if instOther.Op == syntax.InstAlt || instOther.Op == syntax.InstAltMatch {
+				// too complicated
+				continue
+			}
+			// simple empty transition loop
+			// A:BC + B:DA => A:BC + B:DC
+			p_B_Alt := &p.Inst[*p_A_Alt].Out
+			p_B_Other := &p.Inst[*p_A_Alt].Arg
+			patch := false
+			if instAlt.Out == uint32(pc) {
+				patch = true
+			} else if instAlt.Arg == uint32(pc) {
+				patch = true
+				p_B_Alt, p_B_Other = p_B_Other, p_B_Alt
+			}
+			if patch {
+				*p_B_Alt = *p_A_Other
+			}
+
+			// empty transition to common target
+			// A:BC + B:DC => A:DC + B:DC
+			if *p_A_Other == *p_B_Alt {
+				*p_A_Alt = *p_B_Other
+			}
+		}
+	}
+	return p
+}
+
+var anyRuneNotNL = []rune{0, '\n' - 1, '\n' + 1, unicode.MaxRune}
+var anyRune = []rune{0, unicode.MaxRune}
+
+// makeOnePass creates a onepass Prog, if possible. It is possible if at any alt,
+// the match engine can always tell which branch to take. The routine may modify
+// p if it is turned into a onepass Prog. If it isn't possible for this to be a
+// onepass Prog, the Prog nil is returned. makeOnePass is recursive
+// to the size of the Prog.
+func makeOnePass(p *onePassProg) *onePassProg {
+	// If the machine is very long, it's not worth the time to check if we can use one pass.
+	if len(p.Inst) >= 1000 {
+		return nil
+	}
+
+	var (
+		instQueue    = newQueue(len(p.Inst))
+		visitQueue   = newQueue(len(p.Inst))
+		check        func(uint32, []bool) bool
+		onePassRunes = make([][]rune, len(p.Inst))
+	)
+
+	// check that paths from Alt instructions are unambiguous, and rebuild the new
+	// program as a onepass program
+	check = func(pc uint32, m []bool) (ok bool) {
+		ok = true
+		inst := &p.Inst[pc]
+		if visitQueue.contains(pc) {
+			return
+		}
+		visitQueue.insert(pc)
+		switch inst.Op {
+		case syntax.InstAlt, syntax.InstAltMatch:
+			ok = check(inst.Out, m) && check(inst.Arg, m)
+			// check no-input paths to InstMatch
+			matchOut := m[inst.Out]
+			matchArg := m[inst.Arg]
+			if matchOut && matchArg {
+				ok = false
+				break
+			}
+			// Match on empty goes in inst.Out
+			if matchArg {
+				inst.Out, inst.Arg = inst.Arg, inst.Out
+				matchOut, matchArg = matchArg, matchOut
+			}
+			if matchOut {
+				m[pc] = true
+				inst.Op = syntax.InstAltMatch
+			}
+
+			// build a dispatch operator from the two legs of the alt.
+			onePassRunes[pc], inst.Next = mergeRuneSets(
+				&onePassRunes[inst.Out], &onePassRunes[inst.Arg], inst.Out, inst.Arg)
+			if len(inst.Next) > 0 && inst.Next[0] == mergeFailed {
+				ok = false
+				break
+			}
+		case syntax.InstCapture, syntax.InstNop:
+			ok = check(inst.Out, m)
+			m[pc] = m[inst.Out]
+			// pass matching runes back through these no-ops.
+			onePassRunes[pc] = append([]rune{}, onePassRunes[inst.Out]...)
+			inst.Next = make([]uint32, len(onePassRunes[pc])/2+1)
+			for i := range inst.Next {
+				inst.Next[i] = inst.Out
+			}
+		case syntax.InstEmptyWidth:
+			ok = check(inst.Out, m)
+			m[pc] = m[inst.Out]
+			onePassRunes[pc] = append([]rune{}, onePassRunes[inst.Out]...)
+			inst.Next = make([]uint32, len(onePassRunes[pc])/2+1)
+			for i := range inst.Next {
+				inst.Next[i] = inst.Out
+			}
+		case syntax.InstMatch, syntax.InstFail:
+			m[pc] = inst.Op == syntax.InstMatch
+		case syntax.InstRune:
+			m[pc] = false
+			if len(inst.Next) > 0 {
+				break
+			}
+			instQueue.insert(inst.Out)
+			if len(inst.Rune) == 0 {
+				onePassRunes[pc] = []rune{}
+				inst.Next = []uint32{inst.Out}
+				break
+			}
+			runes := make([]rune, 0)
+			if len(inst.Rune) == 1 && syntax.Flags(inst.Arg)&syntax.FoldCase != 0 {
+				r0 := inst.Rune[0]
+				runes = append(runes, r0, r0)
+				for r1 := unicode.SimpleFold(r0); r1 != r0; r1 = unicode.SimpleFold(r1) {
+					runes = append(runes, r1, r1)
+				}
+				slices.Sort(runes)
+			} else {
+				runes = append(runes, inst.Rune...)
+			}
+			onePassRunes[pc] = runes
+			inst.Next = make([]uint32, len(onePassRunes[pc])/2+1)
+			for i := range inst.Next {
+				inst.Next[i] = inst.Out
+			}
+			inst.Op = syntax.InstRune
+		case syntax.InstRune1:
+			m[pc] = false
+			if len(inst.Next) > 0 {
+				break
+			}
+			instQueue.insert(inst.Out)
+			runes := []rune{}
+			// expand case-folded runes
+			if syntax.Flags(inst.Arg)&syntax.FoldCase != 0 {
+				r0 := inst.Rune[0]
+				runes = append(runes, r0, r0)
+				for r1 := unicode.SimpleFold(r0); r1 != r0; r1 = unicode.SimpleFold(r1) {
+					runes = append(runes, r1, r1)
+				}
+				slices.Sort(runes)
+			} else {
+				runes = append(runes, inst.Rune[0], inst.Rune[0])
+			}
+			onePassRunes[pc] = runes
+			inst.Next = make([]uint32, len(onePassRunes[pc])/2+1)
+			for i := range inst.Next {
+				inst.Next[i] = inst.Out
+			}
+			inst.Op = syntax.InstRune
+		case syntax.InstRuneAny:
+			m[pc] = false
+			if len(inst.Next) > 0 {
+				break
+			}
+			instQueue.insert(inst.Out)
+			onePassRunes[pc] = append([]rune{}, anyRune...)
+			inst.Next = []uint32{inst.Out}
+		case syntax.InstRuneAnyNotNL:
+			m[pc] = false
+			if len(inst.Next) > 0 {
+				break
+			}
+			instQueue.insert(inst.Out)
+			onePassRunes[pc] = append([]rune{}, anyRuneNotNL...)
+			inst.Next = make([]uint32, len(onePassRunes[pc])/2+1)
+			for i := range inst.Next {
+				inst.Next[i] = inst.Out
+			}
+		}
+		return
+	}
+
+	instQueue.clear()
+	instQueue.insert(uint32(p.Start))
+	m := make([]bool, len(p.Inst))
+	for !instQueue.empty() {
+		visitQueue.clear()
+		pc := instQueue.next()
+		if !check(pc, m) {
+			p = nil
+			break
+		}
+	}
+	if p != nil {
+		for i := range p.Inst {
+			p.Inst[i].Rune = onePassRunes[i]
+		}
+	}
+	return p
+}
+
+// compileOnePass returns a new *syntax.Prog suitable for onePass execution if the original Prog
+// can be recharacterized as a one-pass regexp program, or syntax.nil if the
+// Prog cannot be converted. For a one pass prog, the fundamental condition that must
+// be true is: at any InstAlt, there must be no ambiguity about what branch to  take.
+func compileOnePass(prog *syntax.Prog) (p *onePassProg) {
+	if prog.Start == 0 {
+		return nil
+	}
+	// onepass regexp is anchored
+	if prog.Inst[prog.Start].Op != syntax.InstEmptyWidth ||
+		syntax.EmptyOp(prog.Inst[prog.Start].Arg)&syntax.EmptyBeginText != syntax.EmptyBeginText {
+		return nil
+	}
+	// every instruction leading to InstMatch must be EmptyEndText
+	for _, inst := range prog.Inst {
+		opOut := prog.Inst[inst.Out].Op
+		switch inst.Op {
+		default:
+			if opOut == syntax.InstMatch {
+				return nil
+			}
+		case syntax.InstAlt, syntax.InstAltMatch:
+			if opOut == syntax.InstMatch || prog.Inst[inst.Arg].Op == syntax.InstMatch {
+				return nil
+			}
+		case syntax.InstEmptyWidth:
+			if opOut == syntax.InstMatch {
+				if syntax.EmptyOp(inst.Arg)&syntax.EmptyEndText == syntax.EmptyEndText {
+					continue
+				}
+				return nil
+			}
+		}
+	}
+	// Creates a slightly optimized copy of the original Prog
+	// that cleans up some Prog idioms that block valid onepass programs
+	p = onePassCopy(prog)
+
+	// checkAmbiguity on InstAlts, build onepass Prog if possible
+	p = makeOnePass(p)
+
+	if p != nil {
+		cleanupOnePass(p, prog)
+	}
+	return p
+}
diff --git a/vendor/github.com/grafana/regexp/regexp.go b/vendor/github.com/grafana/regexp/regexp.go
new file mode 100644
index 0000000000000000000000000000000000000000..d1218ad0e872d00211964a7d17e02288d9db90e9
--- /dev/null
+++ b/vendor/github.com/grafana/regexp/regexp.go
@@ -0,0 +1,1304 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package regexp implements regular expression search.
+//
+// The syntax of the regular expressions accepted is the same
+// general syntax used by Perl, Python, and other languages.
+// More precisely, it is the syntax accepted by RE2 and described at
+// https://golang.org/s/re2syntax, except for \C.
+// For an overview of the syntax, see the [regexp/syntax] package.
+//
+// The regexp implementation provided by this package is
+// guaranteed to run in time linear in the size of the input.
+// (This is a property not guaranteed by most open source
+// implementations of regular expressions.) For more information
+// about this property, see
+//
+//	https://swtch.com/~rsc/regexp/regexp1.html
+//
+// or any book about automata theory.
+//
+// All characters are UTF-8-encoded code points.
+// Following [utf8.DecodeRune], each byte of an invalid UTF-8 sequence
+// is treated as if it encoded utf8.RuneError (U+FFFD).
+//
+// There are 16 methods of [Regexp] that match a regular expression and identify
+// the matched text. Their names are matched by this regular expression:
+//
+//	Find(All)?(String)?(Submatch)?(Index)?
+//
+// If 'All' is present, the routine matches successive non-overlapping
+// matches of the entire expression. Empty matches abutting a preceding
+// match are ignored. The return value is a slice containing the successive
+// return values of the corresponding non-'All' routine. These routines take
+// an extra integer argument, n. If n >= 0, the function returns at most n
+// matches/submatches; otherwise, it returns all of them.
+//
+// If 'String' is present, the argument is a string; otherwise it is a slice
+// of bytes; return values are adjusted as appropriate.
+//
+// If 'Submatch' is present, the return value is a slice identifying the
+// successive submatches of the expression. Submatches are matches of
+// parenthesized subexpressions (also known as capturing groups) within the
+// regular expression, numbered from left to right in order of opening
+// parenthesis. Submatch 0 is the match of the entire expression, submatch 1 is
+// the match of the first parenthesized subexpression, and so on.
+//
+// If 'Index' is present, matches and submatches are identified by byte index
+// pairs within the input string: result[2*n:2*n+2] identifies the indexes of
+// the nth submatch. The pair for n==0 identifies the match of the entire
+// expression. If 'Index' is not present, the match is identified by the text
+// of the match/submatch. If an index is negative or text is nil, it means that
+// subexpression did not match any string in the input. For 'String' versions
+// an empty string means either no match or an empty match.
+//
+// There is also a subset of the methods that can be applied to text read
+// from a RuneReader:
+//
+//	MatchReader, FindReaderIndex, FindReaderSubmatchIndex
+//
+// This set may grow. Note that regular expression matches may need to
+// examine text beyond the text returned by a match, so the methods that
+// match text from a RuneReader may read arbitrarily far into the input
+// before returning.
+//
+// (There are a few other methods that do not match this pattern.)
+package regexp
+
+import (
+	"bytes"
+	"io"
+	"regexp/syntax"
+	"strconv"
+	"strings"
+	"sync"
+	"unicode"
+	"unicode/utf8"
+)
+
+// Regexp is the representation of a compiled regular expression.
+// A Regexp is safe for concurrent use by multiple goroutines,
+// except for configuration methods, such as [Regexp.Longest].
+type Regexp struct {
+	expr           string       // as passed to Compile
+	prog           *syntax.Prog // compiled program
+	onepass        *onePassProg // onepass program or nil
+	numSubexp      int
+	maxBitStateLen int
+	subexpNames    []string
+	prefix         string         // required prefix in unanchored matches
+	prefixBytes    []byte         // prefix, as a []byte
+	prefixRune     rune           // first rune in prefix
+	prefixEnd      uint32         // pc for last rune in prefix
+	mpool          int            // pool for machines
+	matchcap       int            // size of recorded match lengths
+	prefixComplete bool           // prefix is the entire regexp
+	cond           syntax.EmptyOp // empty-width conditions required at start of match
+	minInputLen    int            // minimum length of the input in bytes
+
+	// This field can be modified by the Longest method,
+	// but it is otherwise read-only.
+	longest bool // whether regexp prefers leftmost-longest match
+}
+
+// String returns the source text used to compile the regular expression.
+func (re *Regexp) String() string {
+	return re.expr
+}
+
+// Copy returns a new [Regexp] object copied from re.
+// Calling [Regexp.Longest] on one copy does not affect another.
+//
+// Deprecated: In earlier releases, when using a [Regexp] in multiple goroutines,
+// giving each goroutine its own copy helped to avoid lock contention.
+// As of Go 1.12, using Copy is no longer necessary to avoid lock contention.
+// Copy may still be appropriate if the reason for its use is to make
+// two copies with different [Regexp.Longest] settings.
+func (re *Regexp) Copy() *Regexp {
+	re2 := *re
+	return &re2
+}
+
+// Compile parses a regular expression and returns, if successful,
+// a [Regexp] object that can be used to match against text.
+//
+// When matching against text, the regexp returns a match that
+// begins as early as possible in the input (leftmost), and among those
+// it chooses the one that a backtracking search would have found first.
+// This so-called leftmost-first matching is the same semantics
+// that Perl, Python, and other implementations use, although this
+// package implements it without the expense of backtracking.
+// For POSIX leftmost-longest matching, see [CompilePOSIX].
+func Compile(expr string) (*Regexp, error) {
+	return compile(expr, syntax.Perl, false)
+}
+
+// CompilePOSIX is like [Compile] but restricts the regular expression
+// to POSIX ERE (egrep) syntax and changes the match semantics to
+// leftmost-longest.
+//
+// That is, when matching against text, the regexp returns a match that
+// begins as early as possible in the input (leftmost), and among those
+// it chooses a match that is as long as possible.
+// This so-called leftmost-longest matching is the same semantics
+// that early regular expression implementations used and that POSIX
+// specifies.
+//
+// However, there can be multiple leftmost-longest matches, with different
+// submatch choices, and here this package diverges from POSIX.
+// Among the possible leftmost-longest matches, this package chooses
+// the one that a backtracking search would have found first, while POSIX
+// specifies that the match be chosen to maximize the length of the first
+// subexpression, then the second, and so on from left to right.
+// The POSIX rule is computationally prohibitive and not even well-defined.
+// See https://swtch.com/~rsc/regexp/regexp2.html#posix for details.
+func CompilePOSIX(expr string) (*Regexp, error) {
+	return compile(expr, syntax.POSIX, true)
+}
+
+// Longest makes future searches prefer the leftmost-longest match.
+// That is, when matching against text, the regexp returns a match that
+// begins as early as possible in the input (leftmost), and among those
+// it chooses a match that is as long as possible.
+// This method modifies the [Regexp] and may not be called concurrently
+// with any other methods.
+func (re *Regexp) Longest() {
+	re.longest = true
+}
+
+func compile(expr string, mode syntax.Flags, longest bool) (*Regexp, error) {
+	re, err := syntax.Parse(expr, mode)
+	if err != nil {
+		return nil, err
+	}
+	maxCap := re.MaxCap()
+	capNames := re.CapNames()
+
+	re = re.Simplify()
+	prog, err := syntax.Compile(re)
+	if err != nil {
+		return nil, err
+	}
+	matchcap := prog.NumCap
+	if matchcap < 2 {
+		matchcap = 2
+	}
+	regexp := &Regexp{
+		expr:        expr,
+		prog:        prog,
+		onepass:     compileOnePass(prog),
+		numSubexp:   maxCap,
+		subexpNames: capNames,
+		cond:        prog.StartCond(),
+		longest:     longest,
+		matchcap:    matchcap,
+		minInputLen: minInputLen(re),
+	}
+	if regexp.onepass == nil {
+		regexp.prefix, regexp.prefixComplete = prog.Prefix()
+		regexp.maxBitStateLen = maxBitStateLen(prog)
+	} else {
+		regexp.prefix, regexp.prefixComplete, regexp.prefixEnd = onePassPrefix(prog)
+	}
+	if regexp.prefix != "" {
+		// TODO(rsc): Remove this allocation by adding
+		// IndexString to package bytes.
+		regexp.prefixBytes = []byte(regexp.prefix)
+		regexp.prefixRune, _ = utf8.DecodeRuneInString(regexp.prefix)
+	}
+
+	n := len(prog.Inst)
+	i := 0
+	for matchSize[i] != 0 && matchSize[i] < n {
+		i++
+	}
+	regexp.mpool = i
+
+	return regexp, nil
+}
+
+// Pools of *machine for use during (*Regexp).doExecute,
+// split up by the size of the execution queues.
+// matchPool[i] machines have queue size matchSize[i].
+// On a 64-bit system each queue entry is 16 bytes,
+// so matchPool[0] has 16*2*128 = 4kB queues, etc.
+// The final matchPool is a catch-all for very large queues.
+var (
+	matchSize = [...]int{128, 512, 2048, 16384, 0}
+	matchPool [len(matchSize)]sync.Pool
+)
+
+// get returns a machine to use for matching re.
+// It uses the re's machine cache if possible, to avoid
+// unnecessary allocation.
+func (re *Regexp) get() *machine {
+	m, ok := matchPool[re.mpool].Get().(*machine)
+	if !ok {
+		m = new(machine)
+	}
+	m.re = re
+	m.p = re.prog
+	if cap(m.matchcap) < re.matchcap {
+		m.matchcap = make([]int, re.matchcap)
+		for _, t := range m.pool {
+			t.cap = make([]int, re.matchcap)
+		}
+	}
+
+	// Allocate queues if needed.
+	// Or reallocate, for "large" match pool.
+	n := matchSize[re.mpool]
+	if n == 0 { // large pool
+		n = len(re.prog.Inst)
+	}
+	if len(m.q0.sparse) < n {
+		m.q0 = queue{make([]uint32, n), make([]entry, 0, n)}
+		m.q1 = queue{make([]uint32, n), make([]entry, 0, n)}
+	}
+	return m
+}
+
+// put returns a machine to the correct machine pool.
+func (re *Regexp) put(m *machine) {
+	m.re = nil
+	m.p = nil
+	m.inputs.clear()
+	matchPool[re.mpool].Put(m)
+}
+
+// minInputLen walks the regexp to find the minimum length of any matchable input.
+func minInputLen(re *syntax.Regexp) int {
+	switch re.Op {
+	default:
+		return 0
+	case syntax.OpAnyChar, syntax.OpAnyCharNotNL, syntax.OpCharClass:
+		return 1
+	case syntax.OpLiteral:
+		l := 0
+		for _, r := range re.Rune {
+			if r == utf8.RuneError {
+				l++
+			} else {
+				l += utf8.RuneLen(r)
+			}
+		}
+		return l
+	case syntax.OpCapture, syntax.OpPlus:
+		return minInputLen(re.Sub[0])
+	case syntax.OpRepeat:
+		return re.Min * minInputLen(re.Sub[0])
+	case syntax.OpConcat:
+		l := 0
+		for _, sub := range re.Sub {
+			l += minInputLen(sub)
+		}
+		return l
+	case syntax.OpAlternate:
+		l := minInputLen(re.Sub[0])
+		var lnext int
+		for _, sub := range re.Sub[1:] {
+			lnext = minInputLen(sub)
+			if lnext < l {
+				l = lnext
+			}
+		}
+		return l
+	}
+}
+
+// MustCompile is like [Compile] but panics if the expression cannot be parsed.
+// It simplifies safe initialization of global variables holding compiled regular
+// expressions.
+func MustCompile(str string) *Regexp {
+	regexp, err := Compile(str)
+	if err != nil {
+		panic(`regexp: Compile(` + quote(str) + `): ` + err.Error())
+	}
+	return regexp
+}
+
+// MustCompilePOSIX is like [CompilePOSIX] but panics if the expression cannot be parsed.
+// It simplifies safe initialization of global variables holding compiled regular
+// expressions.
+func MustCompilePOSIX(str string) *Regexp {
+	regexp, err := CompilePOSIX(str)
+	if err != nil {
+		panic(`regexp: CompilePOSIX(` + quote(str) + `): ` + err.Error())
+	}
+	return regexp
+}
+
+func quote(s string) string {
+	if strconv.CanBackquote(s) {
+		return "`" + s + "`"
+	}
+	return strconv.Quote(s)
+}
+
+// NumSubexp returns the number of parenthesized subexpressions in this [Regexp].
+func (re *Regexp) NumSubexp() int {
+	return re.numSubexp
+}
+
+// SubexpNames returns the names of the parenthesized subexpressions
+// in this [Regexp]. The name for the first sub-expression is names[1],
+// so that if m is a match slice, the name for m[i] is SubexpNames()[i].
+// Since the Regexp as a whole cannot be named, names[0] is always
+// the empty string. The slice should not be modified.
+func (re *Regexp) SubexpNames() []string {
+	return re.subexpNames
+}
+
+// SubexpIndex returns the index of the first subexpression with the given name,
+// or -1 if there is no subexpression with that name.
+//
+// Note that multiple subexpressions can be written using the same name, as in
+// (?P<bob>a+)(?P<bob>b+), which declares two subexpressions named "bob".
+// In this case, SubexpIndex returns the index of the leftmost such subexpression
+// in the regular expression.
+func (re *Regexp) SubexpIndex(name string) int {
+	if name != "" {
+		for i, s := range re.subexpNames {
+			if name == s {
+				return i
+			}
+		}
+	}
+	return -1
+}
+
+const endOfText rune = -1
+
+// input abstracts different representations of the input text. It provides
+// one-character lookahead.
+type input interface {
+	step(pos int) (r rune, width int) // advance one rune
+	canCheckPrefix() bool             // can we look ahead without losing info?
+	hasPrefix(re *Regexp) bool
+	index(re *Regexp, pos int) int
+	context(pos int) lazyFlag
+}
+
+// inputString scans a string.
+type inputString struct {
+	str string
+}
+
+func (i *inputString) step(pos int) (rune, int) {
+	if pos < len(i.str) {
+		c := i.str[pos]
+		if c < utf8.RuneSelf {
+			return rune(c), 1
+		}
+		return utf8.DecodeRuneInString(i.str[pos:])
+	}
+	return endOfText, 0
+}
+
+func (i *inputString) canCheckPrefix() bool {
+	return true
+}
+
+func (i *inputString) hasPrefix(re *Regexp) bool {
+	return strings.HasPrefix(i.str, re.prefix)
+}
+
+func (i *inputString) index(re *Regexp, pos int) int {
+	return strings.Index(i.str[pos:], re.prefix)
+}
+
+func (i *inputString) context(pos int) lazyFlag {
+	r1, r2 := endOfText, endOfText
+	// 0 < pos && pos <= len(i.str)
+	if uint(pos-1) < uint(len(i.str)) {
+		r1 = rune(i.str[pos-1])
+		if r1 >= utf8.RuneSelf {
+			r1, _ = utf8.DecodeLastRuneInString(i.str[:pos])
+		}
+	}
+	// 0 <= pos && pos < len(i.str)
+	if uint(pos) < uint(len(i.str)) {
+		r2 = rune(i.str[pos])
+		if r2 >= utf8.RuneSelf {
+			r2, _ = utf8.DecodeRuneInString(i.str[pos:])
+		}
+	}
+	return newLazyFlag(r1, r2)
+}
+
+// inputBytes scans a byte slice.
+type inputBytes struct {
+	str []byte
+}
+
+func (i *inputBytes) step(pos int) (rune, int) {
+	if pos < len(i.str) {
+		c := i.str[pos]
+		if c < utf8.RuneSelf {
+			return rune(c), 1
+		}
+		return utf8.DecodeRune(i.str[pos:])
+	}
+	return endOfText, 0
+}
+
+func (i *inputBytes) canCheckPrefix() bool {
+	return true
+}
+
+func (i *inputBytes) hasPrefix(re *Regexp) bool {
+	return bytes.HasPrefix(i.str, re.prefixBytes)
+}
+
+func (i *inputBytes) index(re *Regexp, pos int) int {
+	return bytes.Index(i.str[pos:], re.prefixBytes)
+}
+
+func (i *inputBytes) context(pos int) lazyFlag {
+	r1, r2 := endOfText, endOfText
+	// 0 < pos && pos <= len(i.str)
+	if uint(pos-1) < uint(len(i.str)) {
+		r1 = rune(i.str[pos-1])
+		if r1 >= utf8.RuneSelf {
+			r1, _ = utf8.DecodeLastRune(i.str[:pos])
+		}
+	}
+	// 0 <= pos && pos < len(i.str)
+	if uint(pos) < uint(len(i.str)) {
+		r2 = rune(i.str[pos])
+		if r2 >= utf8.RuneSelf {
+			r2, _ = utf8.DecodeRune(i.str[pos:])
+		}
+	}
+	return newLazyFlag(r1, r2)
+}
+
+// inputReader scans a RuneReader.
+type inputReader struct {
+	r     io.RuneReader
+	atEOT bool
+	pos   int
+}
+
+func (i *inputReader) step(pos int) (rune, int) {
+	if !i.atEOT && pos != i.pos {
+		return endOfText, 0
+
+	}
+	r, w, err := i.r.ReadRune()
+	if err != nil {
+		i.atEOT = true
+		return endOfText, 0
+	}
+	i.pos += w
+	return r, w
+}
+
+func (i *inputReader) canCheckPrefix() bool {
+	return false
+}
+
+func (i *inputReader) hasPrefix(re *Regexp) bool {
+	return false
+}
+
+func (i *inputReader) index(re *Regexp, pos int) int {
+	return -1
+}
+
+func (i *inputReader) context(pos int) lazyFlag {
+	return 0 // not used
+}
+
+// LiteralPrefix returns a literal string that must begin any match
+// of the regular expression re. It returns the boolean true if the
+// literal string comprises the entire regular expression.
+func (re *Regexp) LiteralPrefix() (prefix string, complete bool) {
+	return re.prefix, re.prefixComplete
+}
+
+// MatchReader reports whether the text returned by the [io.RuneReader]
+// contains any match of the regular expression re.
+func (re *Regexp) MatchReader(r io.RuneReader) bool {
+	return re.doMatch(r, nil, "")
+}
+
+// MatchString reports whether the string s
+// contains any match of the regular expression re.
+func (re *Regexp) MatchString(s string) bool {
+	return re.doMatch(nil, nil, s)
+}
+
+// Match reports whether the byte slice b
+// contains any match of the regular expression re.
+func (re *Regexp) Match(b []byte) bool {
+	return re.doMatch(nil, b, "")
+}
+
+// MatchReader reports whether the text returned by the RuneReader
+// contains any match of the regular expression pattern.
+// More complicated queries need to use [Compile] and the full [Regexp] interface.
+func MatchReader(pattern string, r io.RuneReader) (matched bool, err error) {
+	re, err := Compile(pattern)
+	if err != nil {
+		return false, err
+	}
+	return re.MatchReader(r), nil
+}
+
+// MatchString reports whether the string s
+// contains any match of the regular expression pattern.
+// More complicated queries need to use [Compile] and the full [Regexp] interface.
+func MatchString(pattern string, s string) (matched bool, err error) {
+	re, err := Compile(pattern)
+	if err != nil {
+		return false, err
+	}
+	return re.MatchString(s), nil
+}
+
+// Match reports whether the byte slice b
+// contains any match of the regular expression pattern.
+// More complicated queries need to use [Compile] and the full [Regexp] interface.
+func Match(pattern string, b []byte) (matched bool, err error) {
+	re, err := Compile(pattern)
+	if err != nil {
+		return false, err
+	}
+	return re.Match(b), nil
+}
+
+// ReplaceAllString returns a copy of src, replacing matches of the [Regexp]
+// with the replacement string repl.
+// Inside repl, $ signs are interpreted as in [Regexp.Expand].
+func (re *Regexp) ReplaceAllString(src, repl string) string {
+	n := 2
+	if strings.Contains(repl, "$") {
+		n = 2 * (re.numSubexp + 1)
+	}
+	b := re.replaceAll(nil, src, n, func(dst []byte, match []int) []byte {
+		return re.expand(dst, repl, nil, src, match)
+	})
+	return string(b)
+}
+
+// ReplaceAllLiteralString returns a copy of src, replacing matches of the [Regexp]
+// with the replacement string repl. The replacement repl is substituted directly,
+// without using [Regexp.Expand].
+func (re *Regexp) ReplaceAllLiteralString(src, repl string) string {
+	return string(re.replaceAll(nil, src, 2, func(dst []byte, match []int) []byte {
+		return append(dst, repl...)
+	}))
+}
+
+// ReplaceAllStringFunc returns a copy of src in which all matches of the
+// [Regexp] have been replaced by the return value of function repl applied
+// to the matched substring. The replacement returned by repl is substituted
+// directly, without using [Regexp.Expand].
+func (re *Regexp) ReplaceAllStringFunc(src string, repl func(string) string) string {
+	b := re.replaceAll(nil, src, 2, func(dst []byte, match []int) []byte {
+		return append(dst, repl(src[match[0]:match[1]])...)
+	})
+	return string(b)
+}
+
+func (re *Regexp) replaceAll(bsrc []byte, src string, nmatch int, repl func(dst []byte, m []int) []byte) []byte {
+	lastMatchEnd := 0 // end position of the most recent match
+	searchPos := 0    // position where we next look for a match
+	var buf []byte
+	var endPos int
+	if bsrc != nil {
+		endPos = len(bsrc)
+	} else {
+		endPos = len(src)
+	}
+	if nmatch > re.prog.NumCap {
+		nmatch = re.prog.NumCap
+	}
+
+	var dstCap [2]int
+	for searchPos <= endPos {
+		a := re.doExecute(nil, bsrc, src, searchPos, nmatch, dstCap[:0])
+		if len(a) == 0 {
+			break // no more matches
+		}
+
+		// Copy the unmatched characters before this match.
+		if bsrc != nil {
+			buf = append(buf, bsrc[lastMatchEnd:a[0]]...)
+		} else {
+			buf = append(buf, src[lastMatchEnd:a[0]]...)
+		}
+
+		// Now insert a copy of the replacement string, but not for a
+		// match of the empty string immediately after another match.
+		// (Otherwise, we get double replacement for patterns that
+		// match both empty and nonempty strings.)
+		if a[1] > lastMatchEnd || a[0] == 0 {
+			buf = repl(buf, a)
+		}
+		lastMatchEnd = a[1]
+
+		// Advance past this match; always advance at least one character.
+		var width int
+		if bsrc != nil {
+			_, width = utf8.DecodeRune(bsrc[searchPos:])
+		} else {
+			_, width = utf8.DecodeRuneInString(src[searchPos:])
+		}
+		if searchPos+width > a[1] {
+			searchPos += width
+		} else if searchPos+1 > a[1] {
+			// This clause is only needed at the end of the input
+			// string. In that case, DecodeRuneInString returns width=0.
+			searchPos++
+		} else {
+			searchPos = a[1]
+		}
+	}
+
+	// Copy the unmatched characters after the last match.
+	if bsrc != nil {
+		buf = append(buf, bsrc[lastMatchEnd:]...)
+	} else {
+		buf = append(buf, src[lastMatchEnd:]...)
+	}
+
+	return buf
+}
+
+// ReplaceAll returns a copy of src, replacing matches of the [Regexp]
+// with the replacement text repl.
+// Inside repl, $ signs are interpreted as in [Regexp.Expand].
+func (re *Regexp) ReplaceAll(src, repl []byte) []byte {
+	n := 2
+	if bytes.IndexByte(repl, '$') >= 0 {
+		n = 2 * (re.numSubexp + 1)
+	}
+	srepl := ""
+	b := re.replaceAll(src, "", n, func(dst []byte, match []int) []byte {
+		if len(srepl) != len(repl) {
+			srepl = string(repl)
+		}
+		return re.expand(dst, srepl, src, "", match)
+	})
+	return b
+}
+
+// ReplaceAllLiteral returns a copy of src, replacing matches of the [Regexp]
+// with the replacement bytes repl. The replacement repl is substituted directly,
+// without using [Regexp.Expand].
+func (re *Regexp) ReplaceAllLiteral(src, repl []byte) []byte {
+	return re.replaceAll(src, "", 2, func(dst []byte, match []int) []byte {
+		return append(dst, repl...)
+	})
+}
+
+// ReplaceAllFunc returns a copy of src in which all matches of the
+// [Regexp] have been replaced by the return value of function repl applied
+// to the matched byte slice. The replacement returned by repl is substituted
+// directly, without using [Regexp.Expand].
+func (re *Regexp) ReplaceAllFunc(src []byte, repl func([]byte) []byte) []byte {
+	return re.replaceAll(src, "", 2, func(dst []byte, match []int) []byte {
+		return append(dst, repl(src[match[0]:match[1]])...)
+	})
+}
+
+// Bitmap used by func special to check whether a character needs to be escaped.
+var specialBytes [16]byte
+
+// special reports whether byte b needs to be escaped by QuoteMeta.
+func special(b byte) bool {
+	return b < utf8.RuneSelf && specialBytes[b%16]&(1<<(b/16)) != 0
+}
+
+func init() {
+	for _, b := range []byte(`\.+*?()|[]{}^$`) {
+		specialBytes[b%16] |= 1 << (b / 16)
+	}
+}
+
+// QuoteMeta returns a string that escapes all regular expression metacharacters
+// inside the argument text; the returned string is a regular expression matching
+// the literal text.
+func QuoteMeta(s string) string {
+	// A byte loop is correct because all metacharacters are ASCII.
+	var i int
+	for i = 0; i < len(s); i++ {
+		if special(s[i]) {
+			break
+		}
+	}
+	// No meta characters found, so return original string.
+	if i >= len(s) {
+		return s
+	}
+
+	b := make([]byte, 2*len(s)-i)
+	copy(b, s[:i])
+	j := i
+	for ; i < len(s); i++ {
+		if special(s[i]) {
+			b[j] = '\\'
+			j++
+		}
+		b[j] = s[i]
+		j++
+	}
+	return string(b[:j])
+}
+
+// The number of capture values in the program may correspond
+// to fewer capturing expressions than are in the regexp.
+// For example, "(a){0}" turns into an empty program, so the
+// maximum capture in the program is 0 but we need to return
+// an expression for \1.  Pad appends -1s to the slice a as needed.
+func (re *Regexp) pad(a []int) []int {
+	if a == nil {
+		// No match.
+		return nil
+	}
+	n := (1 + re.numSubexp) * 2
+	for len(a) < n {
+		a = append(a, -1)
+	}
+	return a
+}
+
+// allMatches calls deliver at most n times
+// with the location of successive matches in the input text.
+// The input text is b if non-nil, otherwise s.
+func (re *Regexp) allMatches(s string, b []byte, n int, deliver func([]int)) {
+	var end int
+	if b == nil {
+		end = len(s)
+	} else {
+		end = len(b)
+	}
+
+	for pos, i, prevMatchEnd := 0, 0, -1; i < n && pos <= end; {
+		matches := re.doExecute(nil, b, s, pos, re.prog.NumCap, nil)
+		if len(matches) == 0 {
+			break
+		}
+
+		accept := true
+		if matches[1] == pos {
+			// We've found an empty match.
+			if matches[0] == prevMatchEnd {
+				// We don't allow an empty match right
+				// after a previous match, so ignore it.
+				accept = false
+			}
+			var width int
+			if b == nil {
+				is := inputString{str: s}
+				_, width = is.step(pos)
+			} else {
+				ib := inputBytes{str: b}
+				_, width = ib.step(pos)
+			}
+			if width > 0 {
+				pos += width
+			} else {
+				pos = end + 1
+			}
+		} else {
+			pos = matches[1]
+		}
+		prevMatchEnd = matches[1]
+
+		if accept {
+			deliver(re.pad(matches))
+			i++
+		}
+	}
+}
+
+// Find returns a slice holding the text of the leftmost match in b of the regular expression.
+// A return value of nil indicates no match.
+func (re *Regexp) Find(b []byte) []byte {
+	var dstCap [2]int
+	a := re.doExecute(nil, b, "", 0, 2, dstCap[:0])
+	if a == nil {
+		return nil
+	}
+	return b[a[0]:a[1]:a[1]]
+}
+
+// FindIndex returns a two-element slice of integers defining the location of
+// the leftmost match in b of the regular expression. The match itself is at
+// b[loc[0]:loc[1]].
+// A return value of nil indicates no match.
+func (re *Regexp) FindIndex(b []byte) (loc []int) {
+	a := re.doExecute(nil, b, "", 0, 2, nil)
+	if a == nil {
+		return nil
+	}
+	return a[0:2]
+}
+
+// FindString returns a string holding the text of the leftmost match in s of the regular
+// expression. If there is no match, the return value is an empty string,
+// but it will also be empty if the regular expression successfully matches
+// an empty string. Use [Regexp.FindStringIndex] or [Regexp.FindStringSubmatch] if it is
+// necessary to distinguish these cases.
+func (re *Regexp) FindString(s string) string {
+	var dstCap [2]int
+	a := re.doExecute(nil, nil, s, 0, 2, dstCap[:0])
+	if a == nil {
+		return ""
+	}
+	return s[a[0]:a[1]]
+}
+
+// FindStringIndex returns a two-element slice of integers defining the
+// location of the leftmost match in s of the regular expression. The match
+// itself is at s[loc[0]:loc[1]].
+// A return value of nil indicates no match.
+func (re *Regexp) FindStringIndex(s string) (loc []int) {
+	a := re.doExecute(nil, nil, s, 0, 2, nil)
+	if a == nil {
+		return nil
+	}
+	return a[0:2]
+}
+
+// FindReaderIndex returns a two-element slice of integers defining the
+// location of the leftmost match of the regular expression in text read from
+// the [io.RuneReader]. The match text was found in the input stream at
+// byte offset loc[0] through loc[1]-1.
+// A return value of nil indicates no match.
+func (re *Regexp) FindReaderIndex(r io.RuneReader) (loc []int) {
+	a := re.doExecute(r, nil, "", 0, 2, nil)
+	if a == nil {
+		return nil
+	}
+	return a[0:2]
+}
+
+// FindSubmatch returns a slice of slices holding the text of the leftmost
+// match of the regular expression in b and the matches, if any, of its
+// subexpressions, as defined by the 'Submatch' descriptions in the package
+// comment.
+// A return value of nil indicates no match.
+func (re *Regexp) FindSubmatch(b []byte) [][]byte {
+	var dstCap [4]int
+	a := re.doExecute(nil, b, "", 0, re.prog.NumCap, dstCap[:0])
+	if a == nil {
+		return nil
+	}
+	ret := make([][]byte, 1+re.numSubexp)
+	for i := range ret {
+		if 2*i < len(a) && a[2*i] >= 0 {
+			ret[i] = b[a[2*i]:a[2*i+1]:a[2*i+1]]
+		}
+	}
+	return ret
+}
+
+// Expand appends template to dst and returns the result; during the
+// append, Expand replaces variables in the template with corresponding
+// matches drawn from src. The match slice should have been returned by
+// [Regexp.FindSubmatchIndex].
+//
+// In the template, a variable is denoted by a substring of the form
+// $name or ${name}, where name is a non-empty sequence of letters,
+// digits, and underscores. A purely numeric name like $1 refers to
+// the submatch with the corresponding index; other names refer to
+// capturing parentheses named with the (?P<name>...) syntax. A
+// reference to an out of range or unmatched index or a name that is not
+// present in the regular expression is replaced with an empty slice.
+//
+// In the $name form, name is taken to be as long as possible: $1x is
+// equivalent to ${1x}, not ${1}x, and, $10 is equivalent to ${10}, not ${1}0.
+//
+// To insert a literal $ in the output, use $$ in the template.
+func (re *Regexp) Expand(dst []byte, template []byte, src []byte, match []int) []byte {
+	return re.expand(dst, string(template), src, "", match)
+}
+
+// ExpandString is like [Regexp.Expand] but the template and source are strings.
+// It appends to and returns a byte slice in order to give the calling
+// code control over allocation.
+func (re *Regexp) ExpandString(dst []byte, template string, src string, match []int) []byte {
+	return re.expand(dst, template, nil, src, match)
+}
+
+func (re *Regexp) expand(dst []byte, template string, bsrc []byte, src string, match []int) []byte {
+	for len(template) > 0 {
+		before, after, ok := strings.Cut(template, "$")
+		if !ok {
+			break
+		}
+		dst = append(dst, before...)
+		template = after
+		if template != "" && template[0] == '$' {
+			// Treat $$ as $.
+			dst = append(dst, '$')
+			template = template[1:]
+			continue
+		}
+		name, num, rest, ok := extract(template)
+		if !ok {
+			// Malformed; treat $ as raw text.
+			dst = append(dst, '$')
+			continue
+		}
+		template = rest
+		if num >= 0 {
+			if 2*num+1 < len(match) && match[2*num] >= 0 {
+				if bsrc != nil {
+					dst = append(dst, bsrc[match[2*num]:match[2*num+1]]...)
+				} else {
+					dst = append(dst, src[match[2*num]:match[2*num+1]]...)
+				}
+			}
+		} else {
+			for i, namei := range re.subexpNames {
+				if name == namei && 2*i+1 < len(match) && match[2*i] >= 0 {
+					if bsrc != nil {
+						dst = append(dst, bsrc[match[2*i]:match[2*i+1]]...)
+					} else {
+						dst = append(dst, src[match[2*i]:match[2*i+1]]...)
+					}
+					break
+				}
+			}
+		}
+	}
+	dst = append(dst, template...)
+	return dst
+}
+
+// extract returns the name from a leading "name" or "{name}" in str.
+// (The $ has already been removed by the caller.)
+// If it is a number, extract returns num set to that number; otherwise num = -1.
+func extract(str string) (name string, num int, rest string, ok bool) {
+	if str == "" {
+		return
+	}
+	brace := false
+	if str[0] == '{' {
+		brace = true
+		str = str[1:]
+	}
+	i := 0
+	for i < len(str) {
+		rune, size := utf8.DecodeRuneInString(str[i:])
+		if !unicode.IsLetter(rune) && !unicode.IsDigit(rune) && rune != '_' {
+			break
+		}
+		i += size
+	}
+	if i == 0 {
+		// empty name is not okay
+		return
+	}
+	name = str[:i]
+	if brace {
+		if i >= len(str) || str[i] != '}' {
+			// missing closing brace
+			return
+		}
+		i++
+	}
+
+	// Parse number.
+	num = 0
+	for i := 0; i < len(name); i++ {
+		if name[i] < '0' || '9' < name[i] || num >= 1e8 {
+			num = -1
+			break
+		}
+		num = num*10 + int(name[i]) - '0'
+	}
+	// Disallow leading zeros.
+	if name[0] == '0' && len(name) > 1 {
+		num = -1
+	}
+
+	rest = str[i:]
+	ok = true
+	return
+}
+
+// FindSubmatchIndex returns a slice holding the index pairs identifying the
+// leftmost match of the regular expression in b and the matches, if any, of
+// its subexpressions, as defined by the 'Submatch' and 'Index' descriptions
+// in the package comment.
+// A return value of nil indicates no match.
+func (re *Regexp) FindSubmatchIndex(b []byte) []int {
+	return re.pad(re.doExecute(nil, b, "", 0, re.prog.NumCap, nil))
+}
+
+// FindStringSubmatch returns a slice of strings holding the text of the
+// leftmost match of the regular expression in s and the matches, if any, of
+// its subexpressions, as defined by the 'Submatch' description in the
+// package comment.
+// A return value of nil indicates no match.
+func (re *Regexp) FindStringSubmatch(s string) []string {
+	var dstCap [4]int
+	a := re.doExecute(nil, nil, s, 0, re.prog.NumCap, dstCap[:0])
+	if a == nil {
+		return nil
+	}
+	ret := make([]string, 1+re.numSubexp)
+	for i := range ret {
+		if 2*i < len(a) && a[2*i] >= 0 {
+			ret[i] = s[a[2*i]:a[2*i+1]]
+		}
+	}
+	return ret
+}
+
+// FindStringSubmatchIndex returns a slice holding the index pairs
+// identifying the leftmost match of the regular expression in s and the
+// matches, if any, of its subexpressions, as defined by the 'Submatch' and
+// 'Index' descriptions in the package comment.
+// A return value of nil indicates no match.
+func (re *Regexp) FindStringSubmatchIndex(s string) []int {
+	return re.pad(re.doExecute(nil, nil, s, 0, re.prog.NumCap, nil))
+}
+
+// FindReaderSubmatchIndex returns a slice holding the index pairs
+// identifying the leftmost match of the regular expression of text read by
+// the [io.RuneReader], and the matches, if any, of its subexpressions, as defined
+// by the 'Submatch' and 'Index' descriptions in the package comment. A
+// return value of nil indicates no match.
+func (re *Regexp) FindReaderSubmatchIndex(r io.RuneReader) []int {
+	return re.pad(re.doExecute(r, nil, "", 0, re.prog.NumCap, nil))
+}
+
+const startSize = 10 // The size at which to start a slice in the 'All' routines.
+
+// FindAll is the 'All' version of [Regexp.Find]; it returns a slice of all successive
+// matches of the expression, as defined by the 'All' description in the
+// package comment.
+// A return value of nil indicates no match.
+func (re *Regexp) FindAll(b []byte, n int) [][]byte {
+	if n < 0 {
+		n = len(b) + 1
+	}
+	var result [][]byte
+	re.allMatches("", b, n, func(match []int) {
+		if result == nil {
+			result = make([][]byte, 0, startSize)
+		}
+		result = append(result, b[match[0]:match[1]:match[1]])
+	})
+	return result
+}
+
+// FindAllIndex is the 'All' version of [Regexp.FindIndex]; it returns a slice of all
+// successive matches of the expression, as defined by the 'All' description
+// in the package comment.
+// A return value of nil indicates no match.
+func (re *Regexp) FindAllIndex(b []byte, n int) [][]int {
+	if n < 0 {
+		n = len(b) + 1
+	}
+	var result [][]int
+	re.allMatches("", b, n, func(match []int) {
+		if result == nil {
+			result = make([][]int, 0, startSize)
+		}
+		result = append(result, match[0:2])
+	})
+	return result
+}
+
+// FindAllString is the 'All' version of [Regexp.FindString]; it returns a slice of all
+// successive matches of the expression, as defined by the 'All' description
+// in the package comment.
+// A return value of nil indicates no match.
+func (re *Regexp) FindAllString(s string, n int) []string {
+	if n < 0 {
+		n = len(s) + 1
+	}
+	var result []string
+	re.allMatches(s, nil, n, func(match []int) {
+		if result == nil {
+			result = make([]string, 0, startSize)
+		}
+		result = append(result, s[match[0]:match[1]])
+	})
+	return result
+}
+
+// FindAllStringIndex is the 'All' version of [Regexp.FindStringIndex]; it returns a
+// slice of all successive matches of the expression, as defined by the 'All'
+// description in the package comment.
+// A return value of nil indicates no match.
+func (re *Regexp) FindAllStringIndex(s string, n int) [][]int {
+	if n < 0 {
+		n = len(s) + 1
+	}
+	var result [][]int
+	re.allMatches(s, nil, n, func(match []int) {
+		if result == nil {
+			result = make([][]int, 0, startSize)
+		}
+		result = append(result, match[0:2])
+	})
+	return result
+}
+
+// FindAllSubmatch is the 'All' version of [Regexp.FindSubmatch]; it returns a slice
+// of all successive matches of the expression, as defined by the 'All'
+// description in the package comment.
+// A return value of nil indicates no match.
+func (re *Regexp) FindAllSubmatch(b []byte, n int) [][][]byte {
+	if n < 0 {
+		n = len(b) + 1
+	}
+	var result [][][]byte
+	re.allMatches("", b, n, func(match []int) {
+		if result == nil {
+			result = make([][][]byte, 0, startSize)
+		}
+		slice := make([][]byte, len(match)/2)
+		for j := range slice {
+			if match[2*j] >= 0 {
+				slice[j] = b[match[2*j]:match[2*j+1]:match[2*j+1]]
+			}
+		}
+		result = append(result, slice)
+	})
+	return result
+}
+
+// FindAllSubmatchIndex is the 'All' version of [Regexp.FindSubmatchIndex]; it returns
+// a slice of all successive matches of the expression, as defined by the
+// 'All' description in the package comment.
+// A return value of nil indicates no match.
+func (re *Regexp) FindAllSubmatchIndex(b []byte, n int) [][]int {
+	if n < 0 {
+		n = len(b) + 1
+	}
+	var result [][]int
+	re.allMatches("", b, n, func(match []int) {
+		if result == nil {
+			result = make([][]int, 0, startSize)
+		}
+		result = append(result, match)
+	})
+	return result
+}
+
+// FindAllStringSubmatch is the 'All' version of [Regexp.FindStringSubmatch]; it
+// returns a slice of all successive matches of the expression, as defined by
+// the 'All' description in the package comment.
+// A return value of nil indicates no match.
+func (re *Regexp) FindAllStringSubmatch(s string, n int) [][]string {
+	if n < 0 {
+		n = len(s) + 1
+	}
+	var result [][]string
+	re.allMatches(s, nil, n, func(match []int) {
+		if result == nil {
+			result = make([][]string, 0, startSize)
+		}
+		slice := make([]string, len(match)/2)
+		for j := range slice {
+			if match[2*j] >= 0 {
+				slice[j] = s[match[2*j]:match[2*j+1]]
+			}
+		}
+		result = append(result, slice)
+	})
+	return result
+}
+
+// FindAllStringSubmatchIndex is the 'All' version of
+// [Regexp.FindStringSubmatchIndex]; it returns a slice of all successive matches of
+// the expression, as defined by the 'All' description in the package
+// comment.
+// A return value of nil indicates no match.
+func (re *Regexp) FindAllStringSubmatchIndex(s string, n int) [][]int {
+	if n < 0 {
+		n = len(s) + 1
+	}
+	var result [][]int
+	re.allMatches(s, nil, n, func(match []int) {
+		if result == nil {
+			result = make([][]int, 0, startSize)
+		}
+		result = append(result, match)
+	})
+	return result
+}
+
+// Split slices s into substrings separated by the expression and returns a slice of
+// the substrings between those expression matches.
+//
+// The slice returned by this method consists of all the substrings of s
+// not contained in the slice returned by [Regexp.FindAllString]. When called on an expression
+// that contains no metacharacters, it is equivalent to [strings.SplitN].
+//
+// Example:
+//
+//	s := regexp.MustCompile("a*").Split("abaabaccadaaae", 5)
+//	// s: ["", "b", "b", "c", "cadaaae"]
+//
+// The count determines the number of substrings to return:
+//
+//	n > 0: at most n substrings; the last substring will be the unsplit remainder.
+//	n == 0: the result is nil (zero substrings)
+//	n < 0: all substrings
+func (re *Regexp) Split(s string, n int) []string {
+
+	if n == 0 {
+		return nil
+	}
+
+	if len(re.expr) > 0 && len(s) == 0 {
+		return []string{""}
+	}
+
+	matches := re.FindAllStringIndex(s, n)
+	strings := make([]string, 0, len(matches))
+
+	beg := 0
+	end := 0
+	for _, match := range matches {
+		if n > 0 && len(strings) >= n-1 {
+			break
+		}
+
+		end = match[0]
+		if match[1] != 0 {
+			strings = append(strings, s[beg:end])
+		}
+		beg = match[1]
+	}
+
+	if end != len(s) {
+		strings = append(strings, s[beg:])
+	}
+
+	return strings
+}
+
+// MarshalText implements [encoding.TextMarshaler]. The output
+// matches that of calling the [Regexp.String] method.
+//
+// Note that the output is lossy in some cases: This method does not indicate
+// POSIX regular expressions (i.e. those compiled by calling [CompilePOSIX]), or
+// those for which the [Regexp.Longest] method has been called.
+func (re *Regexp) MarshalText() ([]byte, error) {
+	return []byte(re.String()), nil
+}
+
+// UnmarshalText implements [encoding.TextUnmarshaler] by calling
+// [Compile] on the encoded value.
+func (re *Regexp) UnmarshalText(text []byte) error {
+	newRE, err := Compile(string(text))
+	if err != nil {
+		return err
+	}
+	*re = *newRE
+	return nil
+}
diff --git a/vendor/github.com/grafana/regexp/syntax/compile.go b/vendor/github.com/grafana/regexp/syntax/compile.go
new file mode 100644
index 0000000000000000000000000000000000000000..c9f9fa024bf10d0981d5f61f309b1251c64cf756
--- /dev/null
+++ b/vendor/github.com/grafana/regexp/syntax/compile.go
@@ -0,0 +1,296 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package syntax
+
+import "unicode"
+
+// A patchList is a list of instruction pointers that need to be filled in (patched).
+// Because the pointers haven't been filled in yet, we can reuse their storage
+// to hold the list. It's kind of sleazy, but works well in practice.
+// See https://swtch.com/~rsc/regexp/regexp1.html for inspiration.
+//
+// These aren't really pointers: they're integers, so we can reinterpret them
+// this way without using package unsafe. A value l.head denotes
+// p.inst[l.head>>1].Out (l.head&1==0) or .Arg (l.head&1==1).
+// head == 0 denotes the empty list, okay because we start every program
+// with a fail instruction, so we'll never want to point at its output link.
+type patchList struct {
+	head, tail uint32
+}
+
+func makePatchList(n uint32) patchList {
+	return patchList{n, n}
+}
+
+func (l patchList) patch(p *Prog, val uint32) {
+	head := l.head
+	for head != 0 {
+		i := &p.Inst[head>>1]
+		if head&1 == 0 {
+			head = i.Out
+			i.Out = val
+		} else {
+			head = i.Arg
+			i.Arg = val
+		}
+	}
+}
+
+func (l1 patchList) append(p *Prog, l2 patchList) patchList {
+	if l1.head == 0 {
+		return l2
+	}
+	if l2.head == 0 {
+		return l1
+	}
+
+	i := &p.Inst[l1.tail>>1]
+	if l1.tail&1 == 0 {
+		i.Out = l2.head
+	} else {
+		i.Arg = l2.head
+	}
+	return patchList{l1.head, l2.tail}
+}
+
+// A frag represents a compiled program fragment.
+type frag struct {
+	i        uint32    // index of first instruction
+	out      patchList // where to record end instruction
+	nullable bool      // whether fragment can match empty string
+}
+
+type compiler struct {
+	p *Prog
+}
+
+// Compile compiles the regexp into a program to be executed.
+// The regexp should have been simplified already (returned from re.Simplify).
+func Compile(re *Regexp) (*Prog, error) {
+	var c compiler
+	c.init()
+	f := c.compile(re)
+	f.out.patch(c.p, c.inst(InstMatch).i)
+	c.p.Start = int(f.i)
+	return c.p, nil
+}
+
+func (c *compiler) init() {
+	c.p = new(Prog)
+	c.p.NumCap = 2 // implicit ( and ) for whole match $0
+	c.inst(InstFail)
+}
+
+var anyRuneNotNL = []rune{0, '\n' - 1, '\n' + 1, unicode.MaxRune}
+var anyRune = []rune{0, unicode.MaxRune}
+
+func (c *compiler) compile(re *Regexp) frag {
+	switch re.Op {
+	case OpNoMatch:
+		return c.fail()
+	case OpEmptyMatch:
+		return c.nop()
+	case OpLiteral:
+		if len(re.Rune) == 0 {
+			return c.nop()
+		}
+		var f frag
+		for j := range re.Rune {
+			f1 := c.rune(re.Rune[j:j+1], re.Flags)
+			if j == 0 {
+				f = f1
+			} else {
+				f = c.cat(f, f1)
+			}
+		}
+		return f
+	case OpCharClass:
+		return c.rune(re.Rune, re.Flags)
+	case OpAnyCharNotNL:
+		return c.rune(anyRuneNotNL, 0)
+	case OpAnyChar:
+		return c.rune(anyRune, 0)
+	case OpBeginLine:
+		return c.empty(EmptyBeginLine)
+	case OpEndLine:
+		return c.empty(EmptyEndLine)
+	case OpBeginText:
+		return c.empty(EmptyBeginText)
+	case OpEndText:
+		return c.empty(EmptyEndText)
+	case OpWordBoundary:
+		return c.empty(EmptyWordBoundary)
+	case OpNoWordBoundary:
+		return c.empty(EmptyNoWordBoundary)
+	case OpCapture:
+		bra := c.cap(uint32(re.Cap << 1))
+		sub := c.compile(re.Sub[0])
+		ket := c.cap(uint32(re.Cap<<1 | 1))
+		return c.cat(c.cat(bra, sub), ket)
+	case OpStar:
+		return c.star(c.compile(re.Sub[0]), re.Flags&NonGreedy != 0)
+	case OpPlus:
+		return c.plus(c.compile(re.Sub[0]), re.Flags&NonGreedy != 0)
+	case OpQuest:
+		return c.quest(c.compile(re.Sub[0]), re.Flags&NonGreedy != 0)
+	case OpConcat:
+		if len(re.Sub) == 0 {
+			return c.nop()
+		}
+		var f frag
+		for i, sub := range re.Sub {
+			if i == 0 {
+				f = c.compile(sub)
+			} else {
+				f = c.cat(f, c.compile(sub))
+			}
+		}
+		return f
+	case OpAlternate:
+		var f frag
+		for _, sub := range re.Sub {
+			f = c.alt(f, c.compile(sub))
+		}
+		return f
+	}
+	panic("regexp: unhandled case in compile")
+}
+
+func (c *compiler) inst(op InstOp) frag {
+	// TODO: impose length limit
+	f := frag{i: uint32(len(c.p.Inst)), nullable: true}
+	c.p.Inst = append(c.p.Inst, Inst{Op: op})
+	return f
+}
+
+func (c *compiler) nop() frag {
+	f := c.inst(InstNop)
+	f.out = makePatchList(f.i << 1)
+	return f
+}
+
+func (c *compiler) fail() frag {
+	return frag{}
+}
+
+func (c *compiler) cap(arg uint32) frag {
+	f := c.inst(InstCapture)
+	f.out = makePatchList(f.i << 1)
+	c.p.Inst[f.i].Arg = arg
+
+	if c.p.NumCap < int(arg)+1 {
+		c.p.NumCap = int(arg) + 1
+	}
+	return f
+}
+
+func (c *compiler) cat(f1, f2 frag) frag {
+	// concat of failure is failure
+	if f1.i == 0 || f2.i == 0 {
+		return frag{}
+	}
+
+	// TODO: elide nop
+
+	f1.out.patch(c.p, f2.i)
+	return frag{f1.i, f2.out, f1.nullable && f2.nullable}
+}
+
+func (c *compiler) alt(f1, f2 frag) frag {
+	// alt of failure is other
+	if f1.i == 0 {
+		return f2
+	}
+	if f2.i == 0 {
+		return f1
+	}
+
+	f := c.inst(InstAlt)
+	i := &c.p.Inst[f.i]
+	i.Out = f1.i
+	i.Arg = f2.i
+	f.out = f1.out.append(c.p, f2.out)
+	f.nullable = f1.nullable || f2.nullable
+	return f
+}
+
+func (c *compiler) quest(f1 frag, nongreedy bool) frag {
+	f := c.inst(InstAlt)
+	i := &c.p.Inst[f.i]
+	if nongreedy {
+		i.Arg = f1.i
+		f.out = makePatchList(f.i << 1)
+	} else {
+		i.Out = f1.i
+		f.out = makePatchList(f.i<<1 | 1)
+	}
+	f.out = f.out.append(c.p, f1.out)
+	return f
+}
+
+// loop returns the fragment for the main loop of a plus or star.
+// For plus, it can be used after changing the entry to f1.i.
+// For star, it can be used directly when f1 can't match an empty string.
+// (When f1 can match an empty string, f1* must be implemented as (f1+)?
+// to get the priority match order correct.)
+func (c *compiler) loop(f1 frag, nongreedy bool) frag {
+	f := c.inst(InstAlt)
+	i := &c.p.Inst[f.i]
+	if nongreedy {
+		i.Arg = f1.i
+		f.out = makePatchList(f.i << 1)
+	} else {
+		i.Out = f1.i
+		f.out = makePatchList(f.i<<1 | 1)
+	}
+	f1.out.patch(c.p, f.i)
+	return f
+}
+
+func (c *compiler) star(f1 frag, nongreedy bool) frag {
+	if f1.nullable {
+		// Use (f1+)? to get priority match order correct.
+		// See golang.org/issue/46123.
+		return c.quest(c.plus(f1, nongreedy), nongreedy)
+	}
+	return c.loop(f1, nongreedy)
+}
+
+func (c *compiler) plus(f1 frag, nongreedy bool) frag {
+	return frag{f1.i, c.loop(f1, nongreedy).out, f1.nullable}
+}
+
+func (c *compiler) empty(op EmptyOp) frag {
+	f := c.inst(InstEmptyWidth)
+	c.p.Inst[f.i].Arg = uint32(op)
+	f.out = makePatchList(f.i << 1)
+	return f
+}
+
+func (c *compiler) rune(r []rune, flags Flags) frag {
+	f := c.inst(InstRune)
+	f.nullable = false
+	i := &c.p.Inst[f.i]
+	i.Rune = r
+	flags &= FoldCase // only relevant flag is FoldCase
+	if len(r) != 1 || unicode.SimpleFold(r[0]) == r[0] {
+		// and sometimes not even that
+		flags &^= FoldCase
+	}
+	i.Arg = uint32(flags)
+	f.out = makePatchList(f.i << 1)
+
+	// Special cases for exec machine.
+	switch {
+	case flags&FoldCase == 0 && (len(r) == 1 || len(r) == 2 && r[0] == r[1]):
+		i.Op = InstRune1
+	case len(r) == 2 && r[0] == 0 && r[1] == unicode.MaxRune:
+		i.Op = InstRuneAny
+	case len(r) == 4 && r[0] == 0 && r[1] == '\n'-1 && r[2] == '\n'+1 && r[3] == unicode.MaxRune:
+		i.Op = InstRuneAnyNotNL
+	}
+
+	return f
+}
diff --git a/vendor/github.com/grafana/regexp/syntax/doc.go b/vendor/github.com/grafana/regexp/syntax/doc.go
new file mode 100644
index 0000000000000000000000000000000000000000..877f1043ddda8b4ead01122d7e9131ceb92fa98c
--- /dev/null
+++ b/vendor/github.com/grafana/regexp/syntax/doc.go
@@ -0,0 +1,142 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by mksyntaxgo from the RE2 distribution. DO NOT EDIT.
+
+/*
+Package syntax parses regular expressions into parse trees and compiles
+parse trees into programs. Most clients of regular expressions will use the
+facilities of package [regexp] (such as [regexp.Compile] and [regexp.Match]) instead of this package.
+
+# Syntax
+
+The regular expression syntax understood by this package when parsing with the [Perl] flag is as follows.
+Parts of the syntax can be disabled by passing alternate flags to [Parse].
+
+Single characters:
+
+	.              any character, possibly including newline (flag s=true)
+	[xyz]          character class
+	[^xyz]         negated character class
+	\d             Perl character class
+	\D             negated Perl character class
+	[[:alpha:]]    ASCII character class
+	[[:^alpha:]]   negated ASCII character class
+	\pN            Unicode character class (one-letter name)
+	\p{Greek}      Unicode character class
+	\PN            negated Unicode character class (one-letter name)
+	\P{Greek}      negated Unicode character class
+
+Composites:
+
+	xy             x followed by y
+	x|y            x or y (prefer x)
+
+Repetitions:
+
+	x*             zero or more x, prefer more
+	x+             one or more x, prefer more
+	x?             zero or one x, prefer one
+	x{n,m}         n or n+1 or ... or m x, prefer more
+	x{n,}          n or more x, prefer more
+	x{n}           exactly n x
+	x*?            zero or more x, prefer fewer
+	x+?            one or more x, prefer fewer
+	x??            zero or one x, prefer zero
+	x{n,m}?        n or n+1 or ... or m x, prefer fewer
+	x{n,}?         n or more x, prefer fewer
+	x{n}?          exactly n x
+
+Implementation restriction: The counting forms x{n,m}, x{n,}, and x{n}
+reject forms that create a minimum or maximum repetition count above 1000.
+Unlimited repetitions are not subject to this restriction.
+
+Grouping:
+
+	(re)           numbered capturing group (submatch)
+	(?P<name>re)   named & numbered capturing group (submatch)
+	(?<name>re)    named & numbered capturing group (submatch)
+	(?:re)         non-capturing group
+	(?flags)       set flags within current group; non-capturing
+	(?flags:re)    set flags during re; non-capturing
+
+	Flag syntax is xyz (set) or -xyz (clear) or xy-z (set xy, clear z). The flags are:
+
+	i              case-insensitive (default false)
+	m              multi-line mode: ^ and $ match begin/end line in addition to begin/end text (default false)
+	s              let . match \n (default false)
+	U              ungreedy: swap meaning of x* and x*?, x+ and x+?, etc (default false)
+
+Empty strings:
+
+	^              at beginning of text or line (flag m=true)
+	$              at end of text (like \z not \Z) or line (flag m=true)
+	\A             at beginning of text
+	\b             at ASCII word boundary (\w on one side and \W, \A, or \z on the other)
+	\B             not at ASCII word boundary
+	\z             at end of text
+
+Escape sequences:
+
+	\a             bell (== \007)
+	\f             form feed (== \014)
+	\t             horizontal tab (== \011)
+	\n             newline (== \012)
+	\r             carriage return (== \015)
+	\v             vertical tab character (== \013)
+	\*             literal *, for any punctuation character *
+	\123           octal character code (up to three digits)
+	\x7F           hex character code (exactly two digits)
+	\x{10FFFF}     hex character code
+	\Q...\E        literal text ... even if ... has punctuation
+
+Character class elements:
+
+	x              single character
+	A-Z            character range (inclusive)
+	\d             Perl character class
+	[:foo:]        ASCII character class foo
+	\p{Foo}        Unicode character class Foo
+	\pF            Unicode character class F (one-letter name)
+
+Named character classes as character class elements:
+
+	[\d]           digits (== \d)
+	[^\d]          not digits (== \D)
+	[\D]           not digits (== \D)
+	[^\D]          not not digits (== \d)
+	[[:name:]]     named ASCII class inside character class (== [:name:])
+	[^[:name:]]    named ASCII class inside negated character class (== [:^name:])
+	[\p{Name}]     named Unicode property inside character class (== \p{Name})
+	[^\p{Name}]    named Unicode property inside negated character class (== \P{Name})
+
+Perl character classes (all ASCII-only):
+
+	\d             digits (== [0-9])
+	\D             not digits (== [^0-9])
+	\s             whitespace (== [\t\n\f\r ])
+	\S             not whitespace (== [^\t\n\f\r ])
+	\w             word characters (== [0-9A-Za-z_])
+	\W             not word characters (== [^0-9A-Za-z_])
+
+ASCII character classes:
+
+	[[:alnum:]]    alphanumeric (== [0-9A-Za-z])
+	[[:alpha:]]    alphabetic (== [A-Za-z])
+	[[:ascii:]]    ASCII (== [\x00-\x7F])
+	[[:blank:]]    blank (== [\t ])
+	[[:cntrl:]]    control (== [\x00-\x1F\x7F])
+	[[:digit:]]    digits (== [0-9])
+	[[:graph:]]    graphical (== [!-~] == [A-Za-z0-9!"#$%&'()*+,\-./:;<=>?@[\\\]^_`{|}~])
+	[[:lower:]]    lower case (== [a-z])
+	[[:print:]]    printable (== [ -~] == [ [:graph:]])
+	[[:punct:]]    punctuation (== [!-/:-@[-`{-~])
+	[[:space:]]    whitespace (== [\t\n\v\f\r ])
+	[[:upper:]]    upper case (== [A-Z])
+	[[:word:]]     word characters (== [0-9A-Za-z_])
+	[[:xdigit:]]   hex digit (== [0-9A-Fa-f])
+
+Unicode character classes are those in [unicode.Categories] and [unicode.Scripts].
+*/
+package syntax
diff --git a/vendor/github.com/grafana/regexp/syntax/make_perl_groups.pl b/vendor/github.com/grafana/regexp/syntax/make_perl_groups.pl
new file mode 100644
index 0000000000000000000000000000000000000000..80a2c9ae6b9af5c57d8872dbe5448af1c13ce2dc
--- /dev/null
+++ b/vendor/github.com/grafana/regexp/syntax/make_perl_groups.pl
@@ -0,0 +1,113 @@
+#!/usr/bin/perl
+# Copyright 2008 The Go Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+# Modified version of RE2's make_perl_groups.pl.
+
+# Generate table entries giving character ranges
+# for POSIX/Perl character classes.  Rather than
+# figure out what the definition is, it is easier to ask
+# Perl about each letter from 0-128 and write down
+# its answer.
+
+@posixclasses = (
+	"[:alnum:]",
+	"[:alpha:]",
+	"[:ascii:]",
+	"[:blank:]",
+	"[:cntrl:]",
+	"[:digit:]",
+	"[:graph:]",
+	"[:lower:]",
+	"[:print:]",
+	"[:punct:]",
+	"[:space:]",
+	"[:upper:]",
+	"[:word:]",
+	"[:xdigit:]",
+);
+
+@perlclasses = (
+	"\\d",
+	"\\s",
+	"\\w",
+);
+
+%overrides = (
+	# Prior to Perl 5.18, \s did not match vertical tab.
+	# RE2 preserves that original behaviour.
+	"\\s:11" => 0,
+);
+
+sub ComputeClass($) {
+  my @ranges;
+  my ($class) = @_;
+  my $regexp = "[$class]";
+  my $start = -1;
+  for (my $i=0; $i<=129; $i++) {
+    if ($i == 129) { $i = 256; }
+    if ($i <= 128 && ($overrides{"$class:$i"} // chr($i) =~ $regexp)) {
+      if ($start < 0) {
+        $start = $i;
+      }
+    } else {
+      if ($start >= 0) {
+        push @ranges, [$start, $i-1];
+      }
+      $start = -1;
+    }
+  }
+  return @ranges;
+}
+
+sub PrintClass($$@) {
+  my ($cname, $name, @ranges) = @_;
+  print "var code$cname = []rune{  /* $name */\n";
+  for (my $i=0; $i<@ranges; $i++) {
+    my @a = @{$ranges[$i]};
+    printf "\t0x%x, 0x%x,\n", $a[0], $a[1];
+  }
+  print "}\n\n";
+  my $n = @ranges;
+  $negname = $name;
+  if ($negname =~ /:/) {
+    $negname =~ s/:/:^/;
+  } else {
+    $negname =~ y/a-z/A-Z/;
+  }
+  return "\t`$name`: {+1, code$cname},\n" .
+  	"\t`$negname`: {-1, code$cname},\n";
+}
+
+my $gen = 0;
+
+sub PrintClasses($@) {
+  my ($cname, @classes) = @_;
+  my @entries;
+  foreach my $cl (@classes) {
+    my @ranges = ComputeClass($cl);
+    push @entries, PrintClass(++$gen, $cl, @ranges);
+  }
+  print "var ${cname}Group = map[string]charGroup{\n";
+  foreach my $e (@entries) {
+    print $e;
+  }
+  print "}\n";
+  my $count = @entries;
+}
+
+print <<EOF;
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// GENERATED BY make_perl_groups.pl; DO NOT EDIT.
+// make_perl_groups.pl >perl_groups.go
+
+package syntax
+
+EOF
+
+PrintClasses("perl", @perlclasses);
+PrintClasses("posix", @posixclasses);
diff --git a/vendor/github.com/grafana/regexp/syntax/op_string.go b/vendor/github.com/grafana/regexp/syntax/op_string.go
new file mode 100644
index 0000000000000000000000000000000000000000..1368f5b7ea466b59d9eb6c471b7b899fb121cae1
--- /dev/null
+++ b/vendor/github.com/grafana/regexp/syntax/op_string.go
@@ -0,0 +1,52 @@
+// Code generated by "stringer -type Op -trimprefix Op"; DO NOT EDIT.
+
+package syntax
+
+import "strconv"
+
+func _() {
+	// An "invalid array index" compiler error signifies that the constant values have changed.
+	// Re-run the stringer command to generate them again.
+	var x [1]struct{}
+	_ = x[OpNoMatch-1]
+	_ = x[OpEmptyMatch-2]
+	_ = x[OpLiteral-3]
+	_ = x[OpCharClass-4]
+	_ = x[OpAnyCharNotNL-5]
+	_ = x[OpAnyChar-6]
+	_ = x[OpBeginLine-7]
+	_ = x[OpEndLine-8]
+	_ = x[OpBeginText-9]
+	_ = x[OpEndText-10]
+	_ = x[OpWordBoundary-11]
+	_ = x[OpNoWordBoundary-12]
+	_ = x[OpCapture-13]
+	_ = x[OpStar-14]
+	_ = x[OpPlus-15]
+	_ = x[OpQuest-16]
+	_ = x[OpRepeat-17]
+	_ = x[OpConcat-18]
+	_ = x[OpAlternate-19]
+	_ = x[opPseudo-128]
+}
+
+const (
+	_Op_name_0 = "NoMatchEmptyMatchLiteralCharClassAnyCharNotNLAnyCharBeginLineEndLineBeginTextEndTextWordBoundaryNoWordBoundaryCaptureStarPlusQuestRepeatConcatAlternate"
+	_Op_name_1 = "opPseudo"
+)
+
+var (
+	_Op_index_0 = [...]uint8{0, 7, 17, 24, 33, 45, 52, 61, 68, 77, 84, 96, 110, 117, 121, 125, 130, 136, 142, 151}
+)
+
+func (i Op) String() string {
+	switch {
+	case 1 <= i && i <= 19:
+		i -= 1
+		return _Op_name_0[_Op_index_0[i]:_Op_index_0[i+1]]
+	case i == 128:
+		return _Op_name_1
+	default:
+		return "Op(" + strconv.FormatInt(int64(i), 10) + ")"
+	}
+}
diff --git a/vendor/github.com/grafana/regexp/syntax/parse.go b/vendor/github.com/grafana/regexp/syntax/parse.go
new file mode 100644
index 0000000000000000000000000000000000000000..6ed6491c807a2fa098bb2088eb73ece41aa19a6e
--- /dev/null
+++ b/vendor/github.com/grafana/regexp/syntax/parse.go
@@ -0,0 +1,2136 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package syntax
+
+import (
+	"sort"
+	"strings"
+	"unicode"
+	"unicode/utf8"
+)
+
+// An Error describes a failure to parse a regular expression
+// and gives the offending expression.
+type Error struct {
+	Code ErrorCode
+	Expr string
+}
+
+func (e *Error) Error() string {
+	return "error parsing regexp: " + e.Code.String() + ": `" + e.Expr + "`"
+}
+
+// An ErrorCode describes a failure to parse a regular expression.
+type ErrorCode string
+
+const (
+	// Unexpected error
+	ErrInternalError ErrorCode = "regexp/syntax: internal error"
+
+	// Parse errors
+	ErrInvalidCharClass      ErrorCode = "invalid character class"
+	ErrInvalidCharRange      ErrorCode = "invalid character class range"
+	ErrInvalidEscape         ErrorCode = "invalid escape sequence"
+	ErrInvalidNamedCapture   ErrorCode = "invalid named capture"
+	ErrInvalidPerlOp         ErrorCode = "invalid or unsupported Perl syntax"
+	ErrInvalidRepeatOp       ErrorCode = "invalid nested repetition operator"
+	ErrInvalidRepeatSize     ErrorCode = "invalid repeat count"
+	ErrInvalidUTF8           ErrorCode = "invalid UTF-8"
+	ErrMissingBracket        ErrorCode = "missing closing ]"
+	ErrMissingParen          ErrorCode = "missing closing )"
+	ErrMissingRepeatArgument ErrorCode = "missing argument to repetition operator"
+	ErrTrailingBackslash     ErrorCode = "trailing backslash at end of expression"
+	ErrUnexpectedParen       ErrorCode = "unexpected )"
+	ErrNestingDepth          ErrorCode = "expression nests too deeply"
+	ErrLarge                 ErrorCode = "expression too large"
+)
+
+func (e ErrorCode) String() string {
+	return string(e)
+}
+
+// Flags control the behavior of the parser and record information about regexp context.
+type Flags uint16
+
+const (
+	FoldCase      Flags = 1 << iota // case-insensitive match
+	Literal                         // treat pattern as literal string
+	ClassNL                         // allow character classes like [^a-z] and [[:space:]] to match newline
+	DotNL                           // allow . to match newline
+	OneLine                         // treat ^ and $ as only matching at beginning and end of text
+	NonGreedy                       // make repetition operators default to non-greedy
+	PerlX                           // allow Perl extensions
+	UnicodeGroups                   // allow \p{Han}, \P{Han} for Unicode group and negation
+	WasDollar                       // regexp OpEndText was $, not \z
+	Simple                          // regexp contains no counted repetition
+
+	MatchNL = ClassNL | DotNL
+
+	Perl        = ClassNL | OneLine | PerlX | UnicodeGroups // as close to Perl as possible
+	POSIX Flags = 0                                         // POSIX syntax
+)
+
+// Pseudo-ops for parsing stack.
+const (
+	opLeftParen = opPseudo + iota
+	opVerticalBar
+)
+
+// maxHeight is the maximum height of a regexp parse tree.
+// It is somewhat arbitrarily chosen, but the idea is to be large enough
+// that no one will actually hit in real use but at the same time small enough
+// that recursion on the Regexp tree will not hit the 1GB Go stack limit.
+// The maximum amount of stack for a single recursive frame is probably
+// closer to 1kB, so this could potentially be raised, but it seems unlikely
+// that people have regexps nested even this deeply.
+// We ran a test on Google's C++ code base and turned up only
+// a single use case with depth > 100; it had depth 128.
+// Using depth 1000 should be plenty of margin.
+// As an optimization, we don't even bother calculating heights
+// until we've allocated at least maxHeight Regexp structures.
+const maxHeight = 1000
+
+// maxSize is the maximum size of a compiled regexp in Insts.
+// It too is somewhat arbitrarily chosen, but the idea is to be large enough
+// to allow significant regexps while at the same time small enough that
+// the compiled form will not take up too much memory.
+// 128 MB is enough for a 3.3 million Inst structures, which roughly
+// corresponds to a 3.3 MB regexp.
+const (
+	maxSize  = 128 << 20 / instSize
+	instSize = 5 * 8 // byte, 2 uint32, slice is 5 64-bit words
+)
+
+// maxRunes is the maximum number of runes allowed in a regexp tree
+// counting the runes in all the nodes.
+// Ignoring character classes p.numRunes is always less than the length of the regexp.
+// Character classes can make it much larger: each \pL adds 1292 runes.
+// 128 MB is enough for 32M runes, which is over 26k \pL instances.
+// Note that repetitions do not make copies of the rune slices,
+// so \pL{1000} is only one rune slice, not 1000.
+// We could keep a cache of character classes we've seen,
+// so that all the \pL we see use the same rune list,
+// but that doesn't remove the problem entirely:
+// consider something like [\pL01234][\pL01235][\pL01236]...[\pL^&*()].
+// And because the Rune slice is exposed directly in the Regexp,
+// there is not an opportunity to change the representation to allow
+// partial sharing between different character classes.
+// So the limit is the best we can do.
+const (
+	maxRunes = 128 << 20 / runeSize
+	runeSize = 4 // rune is int32
+)
+
+type parser struct {
+	flags       Flags     // parse mode flags
+	stack       []*Regexp // stack of parsed expressions
+	free        *Regexp
+	numCap      int // number of capturing groups seen
+	wholeRegexp string
+	tmpClass    []rune            // temporary char class work space
+	numRegexp   int               // number of regexps allocated
+	numRunes    int               // number of runes in char classes
+	repeats     int64             // product of all repetitions seen
+	height      map[*Regexp]int   // regexp height, for height limit check
+	size        map[*Regexp]int64 // regexp compiled size, for size limit check
+}
+
+func (p *parser) newRegexp(op Op) *Regexp {
+	re := p.free
+	if re != nil {
+		p.free = re.Sub0[0]
+		*re = Regexp{}
+	} else {
+		re = new(Regexp)
+		p.numRegexp++
+	}
+	re.Op = op
+	return re
+}
+
+func (p *parser) reuse(re *Regexp) {
+	if p.height != nil {
+		delete(p.height, re)
+	}
+	re.Sub0[0] = p.free
+	p.free = re
+}
+
+func (p *parser) checkLimits(re *Regexp) {
+	if p.numRunes > maxRunes {
+		panic(ErrLarge)
+	}
+	p.checkSize(re)
+	p.checkHeight(re)
+}
+
+func (p *parser) checkSize(re *Regexp) {
+	if p.size == nil {
+		// We haven't started tracking size yet.
+		// Do a relatively cheap check to see if we need to start.
+		// Maintain the product of all the repeats we've seen
+		// and don't track if the total number of regexp nodes
+		// we've seen times the repeat product is in budget.
+		if p.repeats == 0 {
+			p.repeats = 1
+		}
+		if re.Op == OpRepeat {
+			n := re.Max
+			if n == -1 {
+				n = re.Min
+			}
+			if n <= 0 {
+				n = 1
+			}
+			if int64(n) > maxSize/p.repeats {
+				p.repeats = maxSize
+			} else {
+				p.repeats *= int64(n)
+			}
+		}
+		if int64(p.numRegexp) < maxSize/p.repeats {
+			return
+		}
+
+		// We need to start tracking size.
+		// Make the map and belatedly populate it
+		// with info about everything we've constructed so far.
+		p.size = make(map[*Regexp]int64)
+		for _, re := range p.stack {
+			p.checkSize(re)
+		}
+	}
+
+	if p.calcSize(re, true) > maxSize {
+		panic(ErrLarge)
+	}
+}
+
+func (p *parser) calcSize(re *Regexp, force bool) int64 {
+	if !force {
+		if size, ok := p.size[re]; ok {
+			return size
+		}
+	}
+
+	var size int64
+	switch re.Op {
+	case OpLiteral:
+		size = int64(len(re.Rune))
+	case OpCapture, OpStar:
+		// star can be 1+ or 2+; assume 2 pessimistically
+		size = 2 + p.calcSize(re.Sub[0], false)
+	case OpPlus, OpQuest:
+		size = 1 + p.calcSize(re.Sub[0], false)
+	case OpConcat:
+		for _, sub := range re.Sub {
+			size += p.calcSize(sub, false)
+		}
+	case OpAlternate:
+		for _, sub := range re.Sub {
+			size += p.calcSize(sub, false)
+		}
+		if len(re.Sub) > 1 {
+			size += int64(len(re.Sub)) - 1
+		}
+	case OpRepeat:
+		sub := p.calcSize(re.Sub[0], false)
+		if re.Max == -1 {
+			if re.Min == 0 {
+				size = 2 + sub // x*
+			} else {
+				size = 1 + int64(re.Min)*sub // xxx+
+			}
+			break
+		}
+		// x{2,5} = xx(x(x(x)?)?)?
+		size = int64(re.Max)*sub + int64(re.Max-re.Min)
+	}
+
+	size = max(1, size)
+	p.size[re] = size
+	return size
+}
+
+func (p *parser) checkHeight(re *Regexp) {
+	if p.numRegexp < maxHeight {
+		return
+	}
+	if p.height == nil {
+		p.height = make(map[*Regexp]int)
+		for _, re := range p.stack {
+			p.checkHeight(re)
+		}
+	}
+	if p.calcHeight(re, true) > maxHeight {
+		panic(ErrNestingDepth)
+	}
+}
+
+func (p *parser) calcHeight(re *Regexp, force bool) int {
+	if !force {
+		if h, ok := p.height[re]; ok {
+			return h
+		}
+	}
+	h := 1
+	for _, sub := range re.Sub {
+		hsub := p.calcHeight(sub, false)
+		if h < 1+hsub {
+			h = 1 + hsub
+		}
+	}
+	p.height[re] = h
+	return h
+}
+
+// Parse stack manipulation.
+
+// push pushes the regexp re onto the parse stack and returns the regexp.
+func (p *parser) push(re *Regexp) *Regexp {
+	p.numRunes += len(re.Rune)
+	if re.Op == OpCharClass && len(re.Rune) == 2 && re.Rune[0] == re.Rune[1] {
+		// Single rune.
+		if p.maybeConcat(re.Rune[0], p.flags&^FoldCase) {
+			return nil
+		}
+		re.Op = OpLiteral
+		re.Rune = re.Rune[:1]
+		re.Flags = p.flags &^ FoldCase
+	} else if re.Op == OpCharClass && len(re.Rune) == 4 &&
+		re.Rune[0] == re.Rune[1] && re.Rune[2] == re.Rune[3] &&
+		unicode.SimpleFold(re.Rune[0]) == re.Rune[2] &&
+		unicode.SimpleFold(re.Rune[2]) == re.Rune[0] ||
+		re.Op == OpCharClass && len(re.Rune) == 2 &&
+			re.Rune[0]+1 == re.Rune[1] &&
+			unicode.SimpleFold(re.Rune[0]) == re.Rune[1] &&
+			unicode.SimpleFold(re.Rune[1]) == re.Rune[0] {
+		// Case-insensitive rune like [Aa] or [Δδ].
+		if p.maybeConcat(re.Rune[0], p.flags|FoldCase) {
+			return nil
+		}
+
+		// Rewrite as (case-insensitive) literal.
+		re.Op = OpLiteral
+		re.Rune = re.Rune[:1]
+		re.Flags = p.flags | FoldCase
+	} else {
+		// Incremental concatenation.
+		p.maybeConcat(-1, 0)
+	}
+
+	p.stack = append(p.stack, re)
+	p.checkLimits(re)
+	return re
+}
+
+// maybeConcat implements incremental concatenation
+// of literal runes into string nodes. The parser calls this
+// before each push, so only the top fragment of the stack
+// might need processing. Since this is called before a push,
+// the topmost literal is no longer subject to operators like *
+// (Otherwise ab* would turn into (ab)*.)
+// If r >= 0 and there's a node left over, maybeConcat uses it
+// to push r with the given flags.
+// maybeConcat reports whether r was pushed.
+func (p *parser) maybeConcat(r rune, flags Flags) bool {
+	n := len(p.stack)
+	if n < 2 {
+		return false
+	}
+
+	re1 := p.stack[n-1]
+	re2 := p.stack[n-2]
+	if re1.Op != OpLiteral || re2.Op != OpLiteral || re1.Flags&FoldCase != re2.Flags&FoldCase {
+		return false
+	}
+
+	// Push re1 into re2.
+	re2.Rune = append(re2.Rune, re1.Rune...)
+
+	// Reuse re1 if possible.
+	if r >= 0 {
+		re1.Rune = re1.Rune0[:1]
+		re1.Rune[0] = r
+		re1.Flags = flags
+		return true
+	}
+
+	p.stack = p.stack[:n-1]
+	p.reuse(re1)
+	return false // did not push r
+}
+
+// literal pushes a literal regexp for the rune r on the stack.
+func (p *parser) literal(r rune) {
+	re := p.newRegexp(OpLiteral)
+	re.Flags = p.flags
+	if p.flags&FoldCase != 0 {
+		r = minFoldRune(r)
+	}
+	re.Rune0[0] = r
+	re.Rune = re.Rune0[:1]
+	p.push(re)
+}
+
+// minFoldRune returns the minimum rune fold-equivalent to r.
+func minFoldRune(r rune) rune {
+	if r < minFold || r > maxFold {
+		return r
+	}
+	m := r
+	r0 := r
+	for r = unicode.SimpleFold(r); r != r0; r = unicode.SimpleFold(r) {
+		m = min(m, r)
+	}
+	return m
+}
+
+// op pushes a regexp with the given op onto the stack
+// and returns that regexp.
+func (p *parser) op(op Op) *Regexp {
+	re := p.newRegexp(op)
+	re.Flags = p.flags
+	return p.push(re)
+}
+
+// repeat replaces the top stack element with itself repeated according to op, min, max.
+// before is the regexp suffix starting at the repetition operator.
+// after is the regexp suffix following after the repetition operator.
+// repeat returns an updated 'after' and an error, if any.
+func (p *parser) repeat(op Op, min, max int, before, after, lastRepeat string) (string, error) {
+	flags := p.flags
+	if p.flags&PerlX != 0 {
+		if len(after) > 0 && after[0] == '?' {
+			after = after[1:]
+			flags ^= NonGreedy
+		}
+		if lastRepeat != "" {
+			// In Perl it is not allowed to stack repetition operators:
+			// a** is a syntax error, not a doubled star, and a++ means
+			// something else entirely, which we don't support!
+			return "", &Error{ErrInvalidRepeatOp, lastRepeat[:len(lastRepeat)-len(after)]}
+		}
+	}
+	n := len(p.stack)
+	if n == 0 {
+		return "", &Error{ErrMissingRepeatArgument, before[:len(before)-len(after)]}
+	}
+	sub := p.stack[n-1]
+	if sub.Op >= opPseudo {
+		return "", &Error{ErrMissingRepeatArgument, before[:len(before)-len(after)]}
+	}
+
+	re := p.newRegexp(op)
+	re.Min = min
+	re.Max = max
+	re.Flags = flags
+	re.Sub = re.Sub0[:1]
+	re.Sub[0] = sub
+	p.stack[n-1] = re
+	p.checkLimits(re)
+
+	if op == OpRepeat && (min >= 2 || max >= 2) && !repeatIsValid(re, 1000) {
+		return "", &Error{ErrInvalidRepeatSize, before[:len(before)-len(after)]}
+	}
+
+	return after, nil
+}
+
+// repeatIsValid reports whether the repetition re is valid.
+// Valid means that the combination of the top-level repetition
+// and any inner repetitions does not exceed n copies of the
+// innermost thing.
+// This function rewalks the regexp tree and is called for every repetition,
+// so we have to worry about inducing quadratic behavior in the parser.
+// We avoid this by only calling repeatIsValid when min or max >= 2.
+// In that case the depth of any >= 2 nesting can only get to 9 without
+// triggering a parse error, so each subtree can only be rewalked 9 times.
+func repeatIsValid(re *Regexp, n int) bool {
+	if re.Op == OpRepeat {
+		m := re.Max
+		if m == 0 {
+			return true
+		}
+		if m < 0 {
+			m = re.Min
+		}
+		if m > n {
+			return false
+		}
+		if m > 0 {
+			n /= m
+		}
+	}
+	for _, sub := range re.Sub {
+		if !repeatIsValid(sub, n) {
+			return false
+		}
+	}
+	return true
+}
+
+// concat replaces the top of the stack (above the topmost '|' or '(') with its concatenation.
+func (p *parser) concat() *Regexp {
+	p.maybeConcat(-1, 0)
+
+	// Scan down to find pseudo-operator | or (.
+	i := len(p.stack)
+	for i > 0 && p.stack[i-1].Op < opPseudo {
+		i--
+	}
+	subs := p.stack[i:]
+	p.stack = p.stack[:i]
+
+	// Empty concatenation is special case.
+	if len(subs) == 0 {
+		return p.push(p.newRegexp(OpEmptyMatch))
+	}
+
+	return p.push(p.collapse(subs, OpConcat))
+}
+
+// alternate replaces the top of the stack (above the topmost '(') with its alternation.
+func (p *parser) alternate() *Regexp {
+	// Scan down to find pseudo-operator (.
+	// There are no | above (.
+	i := len(p.stack)
+	for i > 0 && p.stack[i-1].Op < opPseudo {
+		i--
+	}
+	subs := p.stack[i:]
+	p.stack = p.stack[:i]
+
+	// Make sure top class is clean.
+	// All the others already are (see swapVerticalBar).
+	if len(subs) > 0 {
+		cleanAlt(subs[len(subs)-1])
+	}
+
+	// Empty alternate is special case
+	// (shouldn't happen but easy to handle).
+	if len(subs) == 0 {
+		return p.push(p.newRegexp(OpNoMatch))
+	}
+
+	return p.push(p.collapse(subs, OpAlternate))
+}
+
+// cleanAlt cleans re for eventual inclusion in an alternation.
+func cleanAlt(re *Regexp) {
+	switch re.Op {
+	case OpCharClass:
+		re.Rune = cleanClass(&re.Rune)
+		if len(re.Rune) == 2 && re.Rune[0] == 0 && re.Rune[1] == unicode.MaxRune {
+			re.Rune = nil
+			re.Op = OpAnyChar
+			return
+		}
+		if len(re.Rune) == 4 && re.Rune[0] == 0 && re.Rune[1] == '\n'-1 && re.Rune[2] == '\n'+1 && re.Rune[3] == unicode.MaxRune {
+			re.Rune = nil
+			re.Op = OpAnyCharNotNL
+			return
+		}
+		if cap(re.Rune)-len(re.Rune) > 100 {
+			// re.Rune will not grow any more.
+			// Make a copy or inline to reclaim storage.
+			re.Rune = append(re.Rune0[:0], re.Rune...)
+		}
+	}
+}
+
+// collapse returns the result of applying op to sub.
+// If sub contains op nodes, they all get hoisted up
+// so that there is never a concat of a concat or an
+// alternate of an alternate.
+func (p *parser) collapse(subs []*Regexp, op Op) *Regexp {
+	if len(subs) == 1 {
+		return subs[0]
+	}
+	re := p.newRegexp(op)
+	re.Sub = re.Sub0[:0]
+	for _, sub := range subs {
+		if sub.Op == op {
+			re.Sub = append(re.Sub, sub.Sub...)
+			p.reuse(sub)
+		} else {
+			re.Sub = append(re.Sub, sub)
+		}
+	}
+	if op == OpAlternate {
+		re.Sub = p.factor(re.Sub)
+		if len(re.Sub) == 1 {
+			old := re
+			re = re.Sub[0]
+			p.reuse(old)
+		}
+	}
+	return re
+}
+
+// factor factors common prefixes from the alternation list sub.
+// It returns a replacement list that reuses the same storage and
+// frees (passes to p.reuse) any removed *Regexps.
+//
+// For example,
+//
+//	ABC|ABD|AEF|BCX|BCY
+//
+// simplifies by literal prefix extraction to
+//
+//	A(B(C|D)|EF)|BC(X|Y)
+//
+// which simplifies by character class introduction to
+//
+//	A(B[CD]|EF)|BC[XY]
+func (p *parser) factor(sub []*Regexp) []*Regexp {
+	if len(sub) < 2 {
+		return sub
+	}
+
+	// Round 1: Factor out common literal prefixes.
+	var str []rune
+	var strflags Flags
+	start := 0
+	out := sub[:0]
+	for i := 0; i <= len(sub); i++ {
+		// Invariant: the Regexps that were in sub[0:start] have been
+		// used or marked for reuse, and the slice space has been reused
+		// for out (len(out) <= start).
+		//
+		// Invariant: sub[start:i] consists of regexps that all begin
+		// with str as modified by strflags.
+		var istr []rune
+		var iflags Flags
+		if i < len(sub) {
+			istr, iflags = p.leadingString(sub[i])
+			if iflags == strflags {
+				same := 0
+				for same < len(str) && same < len(istr) && str[same] == istr[same] {
+					same++
+				}
+				if same > 0 {
+					// Matches at least one rune in current range.
+					// Keep going around.
+					str = str[:same]
+					continue
+				}
+			}
+		}
+
+		// Found end of a run with common leading literal string:
+		// sub[start:i] all begin with str[0:len(str)], but sub[i]
+		// does not even begin with str[0].
+		//
+		// Factor out common string and append factored expression to out.
+		if i == start {
+			// Nothing to do - run of length 0.
+		} else if i == start+1 {
+			// Just one: don't bother factoring.
+			out = append(out, sub[start])
+		} else {
+			// Construct factored form: prefix(suffix1|suffix2|...)
+			prefix := p.newRegexp(OpLiteral)
+			prefix.Flags = strflags
+			prefix.Rune = append(prefix.Rune[:0], str...)
+
+			for j := start; j < i; j++ {
+				sub[j] = p.removeLeadingString(sub[j], len(str))
+				p.checkLimits(sub[j])
+			}
+			suffix := p.collapse(sub[start:i], OpAlternate) // recurse
+
+			re := p.newRegexp(OpConcat)
+			re.Sub = append(re.Sub[:0], prefix, suffix)
+			out = append(out, re)
+		}
+
+		// Prepare for next iteration.
+		start = i
+		str = istr
+		strflags = iflags
+	}
+	sub = out
+
+	// Round 2: Factor out common simple prefixes,
+	// just the first piece of each concatenation.
+	// This will be good enough a lot of the time.
+	//
+	// Complex subexpressions (e.g. involving quantifiers)
+	// are not safe to factor because that collapses their
+	// distinct paths through the automaton, which affects
+	// correctness in some cases.
+	start = 0
+	out = sub[:0]
+	var first *Regexp
+	for i := 0; i <= len(sub); i++ {
+		// Invariant: the Regexps that were in sub[0:start] have been
+		// used or marked for reuse, and the slice space has been reused
+		// for out (len(out) <= start).
+		//
+		// Invariant: sub[start:i] consists of regexps that all begin with ifirst.
+		var ifirst *Regexp
+		if i < len(sub) {
+			ifirst = p.leadingRegexp(sub[i])
+			if first != nil && first.Equal(ifirst) &&
+				// first must be a character class OR a fixed repeat of a character class.
+				(isCharClass(first) || (first.Op == OpRepeat && first.Min == first.Max && isCharClass(first.Sub[0]))) {
+				continue
+			}
+		}
+
+		// Found end of a run with common leading regexp:
+		// sub[start:i] all begin with first but sub[i] does not.
+		//
+		// Factor out common regexp and append factored expression to out.
+		if i == start {
+			// Nothing to do - run of length 0.
+		} else if i == start+1 {
+			// Just one: don't bother factoring.
+			out = append(out, sub[start])
+		} else {
+			// Construct factored form: prefix(suffix1|suffix2|...)
+			prefix := first
+			for j := start; j < i; j++ {
+				reuse := j != start // prefix came from sub[start]
+				sub[j] = p.removeLeadingRegexp(sub[j], reuse)
+				p.checkLimits(sub[j])
+			}
+			suffix := p.collapse(sub[start:i], OpAlternate) // recurse
+
+			re := p.newRegexp(OpConcat)
+			re.Sub = append(re.Sub[:0], prefix, suffix)
+			out = append(out, re)
+		}
+
+		// Prepare for next iteration.
+		start = i
+		first = ifirst
+	}
+	sub = out
+
+	// Round 3: Collapse runs of single literals into character classes.
+	start = 0
+	out = sub[:0]
+	for i := 0; i <= len(sub); i++ {
+		// Invariant: the Regexps that were in sub[0:start] have been
+		// used or marked for reuse, and the slice space has been reused
+		// for out (len(out) <= start).
+		//
+		// Invariant: sub[start:i] consists of regexps that are either
+		// literal runes or character classes.
+		if i < len(sub) && isCharClass(sub[i]) {
+			continue
+		}
+
+		// sub[i] is not a char or char class;
+		// emit char class for sub[start:i]...
+		if i == start {
+			// Nothing to do - run of length 0.
+		} else if i == start+1 {
+			out = append(out, sub[start])
+		} else {
+			// Make new char class.
+			// Start with most complex regexp in sub[start].
+			max := start
+			for j := start + 1; j < i; j++ {
+				if sub[max].Op < sub[j].Op || sub[max].Op == sub[j].Op && len(sub[max].Rune) < len(sub[j].Rune) {
+					max = j
+				}
+			}
+			sub[start], sub[max] = sub[max], sub[start]
+
+			for j := start + 1; j < i; j++ {
+				mergeCharClass(sub[start], sub[j])
+				p.reuse(sub[j])
+			}
+			cleanAlt(sub[start])
+			out = append(out, sub[start])
+		}
+
+		// ... and then emit sub[i].
+		if i < len(sub) {
+			out = append(out, sub[i])
+		}
+		start = i + 1
+	}
+	sub = out
+
+	// Round 4: Collapse runs of empty matches into a single empty match.
+	start = 0
+	out = sub[:0]
+	for i := range sub {
+		if i+1 < len(sub) && sub[i].Op == OpEmptyMatch && sub[i+1].Op == OpEmptyMatch {
+			continue
+		}
+		out = append(out, sub[i])
+	}
+	sub = out
+
+	return sub
+}
+
+// leadingString returns the leading literal string that re begins with.
+// The string refers to storage in re or its children.
+func (p *parser) leadingString(re *Regexp) ([]rune, Flags) {
+	if re.Op == OpConcat && len(re.Sub) > 0 {
+		re = re.Sub[0]
+	}
+	if re.Op != OpLiteral {
+		return nil, 0
+	}
+	return re.Rune, re.Flags & FoldCase
+}
+
+// removeLeadingString removes the first n leading runes
+// from the beginning of re. It returns the replacement for re.
+func (p *parser) removeLeadingString(re *Regexp, n int) *Regexp {
+	if re.Op == OpConcat && len(re.Sub) > 0 {
+		// Removing a leading string in a concatenation
+		// might simplify the concatenation.
+		sub := re.Sub[0]
+		sub = p.removeLeadingString(sub, n)
+		re.Sub[0] = sub
+		if sub.Op == OpEmptyMatch {
+			p.reuse(sub)
+			switch len(re.Sub) {
+			case 0, 1:
+				// Impossible but handle.
+				re.Op = OpEmptyMatch
+				re.Sub = nil
+			case 2:
+				old := re
+				re = re.Sub[1]
+				p.reuse(old)
+			default:
+				copy(re.Sub, re.Sub[1:])
+				re.Sub = re.Sub[:len(re.Sub)-1]
+			}
+		}
+		return re
+	}
+
+	if re.Op == OpLiteral {
+		re.Rune = re.Rune[:copy(re.Rune, re.Rune[n:])]
+		if len(re.Rune) == 0 {
+			re.Op = OpEmptyMatch
+		}
+	}
+	return re
+}
+
+// leadingRegexp returns the leading regexp that re begins with.
+// The regexp refers to storage in re or its children.
+func (p *parser) leadingRegexp(re *Regexp) *Regexp {
+	if re.Op == OpEmptyMatch {
+		return nil
+	}
+	if re.Op == OpConcat && len(re.Sub) > 0 {
+		sub := re.Sub[0]
+		if sub.Op == OpEmptyMatch {
+			return nil
+		}
+		return sub
+	}
+	return re
+}
+
+// removeLeadingRegexp removes the leading regexp in re.
+// It returns the replacement for re.
+// If reuse is true, it passes the removed regexp (if no longer needed) to p.reuse.
+func (p *parser) removeLeadingRegexp(re *Regexp, reuse bool) *Regexp {
+	if re.Op == OpConcat && len(re.Sub) > 0 {
+		if reuse {
+			p.reuse(re.Sub[0])
+		}
+		re.Sub = re.Sub[:copy(re.Sub, re.Sub[1:])]
+		switch len(re.Sub) {
+		case 0:
+			re.Op = OpEmptyMatch
+			re.Sub = nil
+		case 1:
+			old := re
+			re = re.Sub[0]
+			p.reuse(old)
+		}
+		return re
+	}
+	if reuse {
+		p.reuse(re)
+	}
+	return p.newRegexp(OpEmptyMatch)
+}
+
+func literalRegexp(s string, flags Flags) *Regexp {
+	re := &Regexp{Op: OpLiteral}
+	re.Flags = flags
+	re.Rune = re.Rune0[:0] // use local storage for small strings
+	for _, c := range s {
+		if len(re.Rune) >= cap(re.Rune) {
+			// string is too long to fit in Rune0.  let Go handle it
+			re.Rune = []rune(s)
+			break
+		}
+		re.Rune = append(re.Rune, c)
+	}
+	return re
+}
+
+// Parsing.
+
+// Parse parses a regular expression string s, controlled by the specified
+// Flags, and returns a regular expression parse tree. The syntax is
+// described in the top-level comment.
+func Parse(s string, flags Flags) (*Regexp, error) {
+	return parse(s, flags)
+}
+
+func parse(s string, flags Flags) (_ *Regexp, err error) {
+	defer func() {
+		switch r := recover(); r {
+		default:
+			panic(r)
+		case nil:
+			// ok
+		case ErrLarge: // too big
+			err = &Error{Code: ErrLarge, Expr: s}
+		case ErrNestingDepth:
+			err = &Error{Code: ErrNestingDepth, Expr: s}
+		}
+	}()
+
+	if flags&Literal != 0 {
+		// Trivial parser for literal string.
+		if err := checkUTF8(s); err != nil {
+			return nil, err
+		}
+		return literalRegexp(s, flags), nil
+	}
+
+	// Otherwise, must do real work.
+	var (
+		p          parser
+		c          rune
+		op         Op
+		lastRepeat string
+	)
+	p.flags = flags
+	p.wholeRegexp = s
+	t := s
+	for t != "" {
+		repeat := ""
+	BigSwitch:
+		switch t[0] {
+		default:
+			if c, t, err = nextRune(t); err != nil {
+				return nil, err
+			}
+			p.literal(c)
+
+		case '(':
+			if p.flags&PerlX != 0 && len(t) >= 2 && t[1] == '?' {
+				// Flag changes and non-capturing groups.
+				if t, err = p.parsePerlFlags(t); err != nil {
+					return nil, err
+				}
+				break
+			}
+			p.numCap++
+			p.op(opLeftParen).Cap = p.numCap
+			t = t[1:]
+		case '|':
+			if err = p.parseVerticalBar(); err != nil {
+				return nil, err
+			}
+			t = t[1:]
+		case ')':
+			if err = p.parseRightParen(); err != nil {
+				return nil, err
+			}
+			t = t[1:]
+		case '^':
+			if p.flags&OneLine != 0 {
+				p.op(OpBeginText)
+			} else {
+				p.op(OpBeginLine)
+			}
+			t = t[1:]
+		case '$':
+			if p.flags&OneLine != 0 {
+				p.op(OpEndText).Flags |= WasDollar
+			} else {
+				p.op(OpEndLine)
+			}
+			t = t[1:]
+		case '.':
+			if p.flags&DotNL != 0 {
+				p.op(OpAnyChar)
+			} else {
+				p.op(OpAnyCharNotNL)
+			}
+			t = t[1:]
+		case '[':
+			if t, err = p.parseClass(t); err != nil {
+				return nil, err
+			}
+		case '*', '+', '?':
+			before := t
+			switch t[0] {
+			case '*':
+				op = OpStar
+			case '+':
+				op = OpPlus
+			case '?':
+				op = OpQuest
+			}
+			after := t[1:]
+			if after, err = p.repeat(op, 0, 0, before, after, lastRepeat); err != nil {
+				return nil, err
+			}
+			repeat = before
+			t = after
+		case '{':
+			op = OpRepeat
+			before := t
+			min, max, after, ok := p.parseRepeat(t)
+			if !ok {
+				// If the repeat cannot be parsed, { is a literal.
+				p.literal('{')
+				t = t[1:]
+				break
+			}
+			if min < 0 || min > 1000 || max > 1000 || max >= 0 && min > max {
+				// Numbers were too big, or max is present and min > max.
+				return nil, &Error{ErrInvalidRepeatSize, before[:len(before)-len(after)]}
+			}
+			if after, err = p.repeat(op, min, max, before, after, lastRepeat); err != nil {
+				return nil, err
+			}
+			repeat = before
+			t = after
+		case '\\':
+			if p.flags&PerlX != 0 && len(t) >= 2 {
+				switch t[1] {
+				case 'A':
+					p.op(OpBeginText)
+					t = t[2:]
+					break BigSwitch
+				case 'b':
+					p.op(OpWordBoundary)
+					t = t[2:]
+					break BigSwitch
+				case 'B':
+					p.op(OpNoWordBoundary)
+					t = t[2:]
+					break BigSwitch
+				case 'C':
+					// any byte; not supported
+					return nil, &Error{ErrInvalidEscape, t[:2]}
+				case 'Q':
+					// \Q ... \E: the ... is always literals
+					var lit string
+					lit, t, _ = strings.Cut(t[2:], `\E`)
+					for lit != "" {
+						c, rest, err := nextRune(lit)
+						if err != nil {
+							return nil, err
+						}
+						p.literal(c)
+						lit = rest
+					}
+					break BigSwitch
+				case 'z':
+					p.op(OpEndText)
+					t = t[2:]
+					break BigSwitch
+				}
+			}
+
+			re := p.newRegexp(OpCharClass)
+			re.Flags = p.flags
+
+			// Look for Unicode character group like \p{Han}
+			if len(t) >= 2 && (t[1] == 'p' || t[1] == 'P') {
+				r, rest, err := p.parseUnicodeClass(t, re.Rune0[:0])
+				if err != nil {
+					return nil, err
+				}
+				if r != nil {
+					re.Rune = r
+					t = rest
+					p.push(re)
+					break BigSwitch
+				}
+			}
+
+			// Perl character class escape.
+			if r, rest := p.parsePerlClassEscape(t, re.Rune0[:0]); r != nil {
+				re.Rune = r
+				t = rest
+				p.push(re)
+				break BigSwitch
+			}
+			p.reuse(re)
+
+			// Ordinary single-character escape.
+			if c, t, err = p.parseEscape(t); err != nil {
+				return nil, err
+			}
+			p.literal(c)
+		}
+		lastRepeat = repeat
+	}
+
+	p.concat()
+	if p.swapVerticalBar() {
+		// pop vertical bar
+		p.stack = p.stack[:len(p.stack)-1]
+	}
+	p.alternate()
+
+	n := len(p.stack)
+	if n != 1 {
+		return nil, &Error{ErrMissingParen, s}
+	}
+	return p.stack[0], nil
+}
+
+// parseRepeat parses {min} (max=min) or {min,} (max=-1) or {min,max}.
+// If s is not of that form, it returns ok == false.
+// If s has the right form but the values are too big, it returns min == -1, ok == true.
+func (p *parser) parseRepeat(s string) (min, max int, rest string, ok bool) {
+	if s == "" || s[0] != '{' {
+		return
+	}
+	s = s[1:]
+	var ok1 bool
+	if min, s, ok1 = p.parseInt(s); !ok1 {
+		return
+	}
+	if s == "" {
+		return
+	}
+	if s[0] != ',' {
+		max = min
+	} else {
+		s = s[1:]
+		if s == "" {
+			return
+		}
+		if s[0] == '}' {
+			max = -1
+		} else if max, s, ok1 = p.parseInt(s); !ok1 {
+			return
+		} else if max < 0 {
+			// parseInt found too big a number
+			min = -1
+		}
+	}
+	if s == "" || s[0] != '}' {
+		return
+	}
+	rest = s[1:]
+	ok = true
+	return
+}
+
+// parsePerlFlags parses a Perl flag setting or non-capturing group or both,
+// like (?i) or (?: or (?i:.  It removes the prefix from s and updates the parse state.
+// The caller must have ensured that s begins with "(?".
+func (p *parser) parsePerlFlags(s string) (rest string, err error) {
+	t := s
+
+	// Check for named captures, first introduced in Python's regexp library.
+	// As usual, there are three slightly different syntaxes:
+	//
+	//   (?P<name>expr)   the original, introduced by Python
+	//   (?<name>expr)    the .NET alteration, adopted by Perl 5.10
+	//   (?'name'expr)    another .NET alteration, adopted by Perl 5.10
+	//
+	// Perl 5.10 gave in and implemented the Python version too,
+	// but they claim that the last two are the preferred forms.
+	// PCRE and languages based on it (specifically, PHP and Ruby)
+	// support all three as well. EcmaScript 4 uses only the Python form.
+	//
+	// In both the open source world (via Code Search) and the
+	// Google source tree, (?P<expr>name) and (?<expr>name) are the
+	// dominant forms of named captures and both are supported.
+	startsWithP := len(t) > 4 && t[2] == 'P' && t[3] == '<'
+	startsWithName := len(t) > 3 && t[2] == '<'
+
+	if startsWithP || startsWithName {
+		// position of expr start
+		exprStartPos := 4
+		if startsWithName {
+			exprStartPos = 3
+		}
+
+		// Pull out name.
+		end := strings.IndexRune(t, '>')
+		if end < 0 {
+			if err = checkUTF8(t); err != nil {
+				return "", err
+			}
+			return "", &Error{ErrInvalidNamedCapture, s}
+		}
+
+		capture := t[:end+1]        // "(?P<name>" or "(?<name>"
+		name := t[exprStartPos:end] // "name"
+		if err = checkUTF8(name); err != nil {
+			return "", err
+		}
+		if !isValidCaptureName(name) {
+			return "", &Error{ErrInvalidNamedCapture, capture}
+		}
+
+		// Like ordinary capture, but named.
+		p.numCap++
+		re := p.op(opLeftParen)
+		re.Cap = p.numCap
+		re.Name = name
+		return t[end+1:], nil
+	}
+
+	// Non-capturing group. Might also twiddle Perl flags.
+	var c rune
+	t = t[2:] // skip (?
+	flags := p.flags
+	sign := +1
+	sawFlag := false
+Loop:
+	for t != "" {
+		if c, t, err = nextRune(t); err != nil {
+			return "", err
+		}
+		switch c {
+		default:
+			break Loop
+
+		// Flags.
+		case 'i':
+			flags |= FoldCase
+			sawFlag = true
+		case 'm':
+			flags &^= OneLine
+			sawFlag = true
+		case 's':
+			flags |= DotNL
+			sawFlag = true
+		case 'U':
+			flags |= NonGreedy
+			sawFlag = true
+
+		// Switch to negation.
+		case '-':
+			if sign < 0 {
+				break Loop
+			}
+			sign = -1
+			// Invert flags so that | above turn into &^ and vice versa.
+			// We'll invert flags again before using it below.
+			flags = ^flags
+			sawFlag = false
+
+		// End of flags, starting group or not.
+		case ':', ')':
+			if sign < 0 {
+				if !sawFlag {
+					break Loop
+				}
+				flags = ^flags
+			}
+			if c == ':' {
+				// Open new group
+				p.op(opLeftParen)
+			}
+			p.flags = flags
+			return t, nil
+		}
+	}
+
+	return "", &Error{ErrInvalidPerlOp, s[:len(s)-len(t)]}
+}
+
+// isValidCaptureName reports whether name
+// is a valid capture name: [A-Za-z0-9_]+.
+// PCRE limits names to 32 bytes.
+// Python rejects names starting with digits.
+// We don't enforce either of those.
+func isValidCaptureName(name string) bool {
+	if name == "" {
+		return false
+	}
+	for _, c := range name {
+		if c != '_' && !isalnum(c) {
+			return false
+		}
+	}
+	return true
+}
+
+// parseInt parses a decimal integer.
+func (p *parser) parseInt(s string) (n int, rest string, ok bool) {
+	if s == "" || s[0] < '0' || '9' < s[0] {
+		return
+	}
+	// Disallow leading zeros.
+	if len(s) >= 2 && s[0] == '0' && '0' <= s[1] && s[1] <= '9' {
+		return
+	}
+	t := s
+	for s != "" && '0' <= s[0] && s[0] <= '9' {
+		s = s[1:]
+	}
+	rest = s
+	ok = true
+	// Have digits, compute value.
+	t = t[:len(t)-len(s)]
+	for i := 0; i < len(t); i++ {
+		// Avoid overflow.
+		if n >= 1e8 {
+			n = -1
+			break
+		}
+		n = n*10 + int(t[i]) - '0'
+	}
+	return
+}
+
+// can this be represented as a character class?
+// single-rune literal string, char class, ., and .|\n.
+func isCharClass(re *Regexp) bool {
+	return re.Op == OpLiteral && len(re.Rune) == 1 ||
+		re.Op == OpCharClass ||
+		re.Op == OpAnyCharNotNL ||
+		re.Op == OpAnyChar
+}
+
+// does re match r?
+func matchRune(re *Regexp, r rune) bool {
+	switch re.Op {
+	case OpLiteral:
+		return len(re.Rune) == 1 && re.Rune[0] == r
+	case OpCharClass:
+		for i := 0; i < len(re.Rune); i += 2 {
+			if re.Rune[i] <= r && r <= re.Rune[i+1] {
+				return true
+			}
+		}
+		return false
+	case OpAnyCharNotNL:
+		return r != '\n'
+	case OpAnyChar:
+		return true
+	}
+	return false
+}
+
+// parseVerticalBar handles a | in the input.
+func (p *parser) parseVerticalBar() error {
+	p.concat()
+
+	// The concatenation we just parsed is on top of the stack.
+	// If it sits above an opVerticalBar, swap it below
+	// (things below an opVerticalBar become an alternation).
+	// Otherwise, push a new vertical bar.
+	if !p.swapVerticalBar() {
+		p.op(opVerticalBar)
+	}
+
+	return nil
+}
+
+// mergeCharClass makes dst = dst|src.
+// The caller must ensure that dst.Op >= src.Op,
+// to reduce the amount of copying.
+func mergeCharClass(dst, src *Regexp) {
+	switch dst.Op {
+	case OpAnyChar:
+		// src doesn't add anything.
+	case OpAnyCharNotNL:
+		// src might add \n
+		if matchRune(src, '\n') {
+			dst.Op = OpAnyChar
+		}
+	case OpCharClass:
+		// src is simpler, so either literal or char class
+		if src.Op == OpLiteral {
+			dst.Rune = appendLiteral(dst.Rune, src.Rune[0], src.Flags)
+		} else {
+			dst.Rune = appendClass(dst.Rune, src.Rune)
+		}
+	case OpLiteral:
+		// both literal
+		if src.Rune[0] == dst.Rune[0] && src.Flags == dst.Flags {
+			break
+		}
+		dst.Op = OpCharClass
+		dst.Rune = appendLiteral(dst.Rune[:0], dst.Rune[0], dst.Flags)
+		dst.Rune = appendLiteral(dst.Rune, src.Rune[0], src.Flags)
+	}
+}
+
+// If the top of the stack is an element followed by an opVerticalBar
+// swapVerticalBar swaps the two and returns true.
+// Otherwise it returns false.
+func (p *parser) swapVerticalBar() bool {
+	// If above and below vertical bar are literal or char class,
+	// can merge into a single char class.
+	n := len(p.stack)
+	if n >= 3 && p.stack[n-2].Op == opVerticalBar && isCharClass(p.stack[n-1]) && isCharClass(p.stack[n-3]) {
+		re1 := p.stack[n-1]
+		re3 := p.stack[n-3]
+		// Make re3 the more complex of the two.
+		if re1.Op > re3.Op {
+			re1, re3 = re3, re1
+			p.stack[n-3] = re3
+		}
+		mergeCharClass(re3, re1)
+		p.reuse(re1)
+		p.stack = p.stack[:n-1]
+		return true
+	}
+
+	if n >= 2 {
+		re1 := p.stack[n-1]
+		re2 := p.stack[n-2]
+		if re2.Op == opVerticalBar {
+			if n >= 3 {
+				// Now out of reach.
+				// Clean opportunistically.
+				cleanAlt(p.stack[n-3])
+			}
+			p.stack[n-2] = re1
+			p.stack[n-1] = re2
+			return true
+		}
+	}
+	return false
+}
+
+// parseRightParen handles a ) in the input.
+func (p *parser) parseRightParen() error {
+	p.concat()
+	if p.swapVerticalBar() {
+		// pop vertical bar
+		p.stack = p.stack[:len(p.stack)-1]
+	}
+	p.alternate()
+
+	n := len(p.stack)
+	if n < 2 {
+		return &Error{ErrUnexpectedParen, p.wholeRegexp}
+	}
+	re1 := p.stack[n-1]
+	re2 := p.stack[n-2]
+	p.stack = p.stack[:n-2]
+	if re2.Op != opLeftParen {
+		return &Error{ErrUnexpectedParen, p.wholeRegexp}
+	}
+	// Restore flags at time of paren.
+	p.flags = re2.Flags
+	if re2.Cap == 0 {
+		// Just for grouping.
+		p.push(re1)
+	} else {
+		re2.Op = OpCapture
+		re2.Sub = re2.Sub0[:1]
+		re2.Sub[0] = re1
+		p.push(re2)
+	}
+	return nil
+}
+
+// parseEscape parses an escape sequence at the beginning of s
+// and returns the rune.
+func (p *parser) parseEscape(s string) (r rune, rest string, err error) {
+	t := s[1:]
+	if t == "" {
+		return 0, "", &Error{ErrTrailingBackslash, ""}
+	}
+	c, t, err := nextRune(t)
+	if err != nil {
+		return 0, "", err
+	}
+
+Switch:
+	switch c {
+	default:
+		if c < utf8.RuneSelf && !isalnum(c) {
+			// Escaped non-word characters are always themselves.
+			// PCRE is not quite so rigorous: it accepts things like
+			// \q, but we don't. We once rejected \_, but too many
+			// programs and people insist on using it, so allow \_.
+			return c, t, nil
+		}
+
+	// Octal escapes.
+	case '1', '2', '3', '4', '5', '6', '7':
+		// Single non-zero digit is a backreference; not supported
+		if t == "" || t[0] < '0' || t[0] > '7' {
+			break
+		}
+		fallthrough
+	case '0':
+		// Consume up to three octal digits; already have one.
+		r = c - '0'
+		for i := 1; i < 3; i++ {
+			if t == "" || t[0] < '0' || t[0] > '7' {
+				break
+			}
+			r = r*8 + rune(t[0]) - '0'
+			t = t[1:]
+		}
+		return r, t, nil
+
+	// Hexadecimal escapes.
+	case 'x':
+		if t == "" {
+			break
+		}
+		if c, t, err = nextRune(t); err != nil {
+			return 0, "", err
+		}
+		if c == '{' {
+			// Any number of digits in braces.
+			// Perl accepts any text at all; it ignores all text
+			// after the first non-hex digit. We require only hex digits,
+			// and at least one.
+			nhex := 0
+			r = 0
+			for {
+				if t == "" {
+					break Switch
+				}
+				if c, t, err = nextRune(t); err != nil {
+					return 0, "", err
+				}
+				if c == '}' {
+					break
+				}
+				v := unhex(c)
+				if v < 0 {
+					break Switch
+				}
+				r = r*16 + v
+				if r > unicode.MaxRune {
+					break Switch
+				}
+				nhex++
+			}
+			if nhex == 0 {
+				break Switch
+			}
+			return r, t, nil
+		}
+
+		// Easy case: two hex digits.
+		x := unhex(c)
+		if c, t, err = nextRune(t); err != nil {
+			return 0, "", err
+		}
+		y := unhex(c)
+		if x < 0 || y < 0 {
+			break
+		}
+		return x*16 + y, t, nil
+
+	// C escapes. There is no case 'b', to avoid misparsing
+	// the Perl word-boundary \b as the C backspace \b
+	// when in POSIX mode. In Perl, /\b/ means word-boundary
+	// but /[\b]/ means backspace. We don't support that.
+	// If you want a backspace, embed a literal backspace
+	// character or use \x08.
+	case 'a':
+		return '\a', t, err
+	case 'f':
+		return '\f', t, err
+	case 'n':
+		return '\n', t, err
+	case 'r':
+		return '\r', t, err
+	case 't':
+		return '\t', t, err
+	case 'v':
+		return '\v', t, err
+	}
+	return 0, "", &Error{ErrInvalidEscape, s[:len(s)-len(t)]}
+}
+
+// parseClassChar parses a character class character at the beginning of s
+// and returns it.
+func (p *parser) parseClassChar(s, wholeClass string) (r rune, rest string, err error) {
+	if s == "" {
+		return 0, "", &Error{Code: ErrMissingBracket, Expr: wholeClass}
+	}
+
+	// Allow regular escape sequences even though
+	// many need not be escaped in this context.
+	if s[0] == '\\' {
+		return p.parseEscape(s)
+	}
+
+	return nextRune(s)
+}
+
+type charGroup struct {
+	sign  int
+	class []rune
+}
+
+// parsePerlClassEscape parses a leading Perl character class escape like \d
+// from the beginning of s. If one is present, it appends the characters to r
+// and returns the new slice r and the remainder of the string.
+func (p *parser) parsePerlClassEscape(s string, r []rune) (out []rune, rest string) {
+	if p.flags&PerlX == 0 || len(s) < 2 || s[0] != '\\' {
+		return
+	}
+	g := perlGroup[s[0:2]]
+	if g.sign == 0 {
+		return
+	}
+	return p.appendGroup(r, g), s[2:]
+}
+
+// parseNamedClass parses a leading POSIX named character class like [:alnum:]
+// from the beginning of s. If one is present, it appends the characters to r
+// and returns the new slice r and the remainder of the string.
+func (p *parser) parseNamedClass(s string, r []rune) (out []rune, rest string, err error) {
+	if len(s) < 2 || s[0] != '[' || s[1] != ':' {
+		return
+	}
+
+	i := strings.Index(s[2:], ":]")
+	if i < 0 {
+		return
+	}
+	i += 2
+	name, s := s[0:i+2], s[i+2:]
+	g := posixGroup[name]
+	if g.sign == 0 {
+		return nil, "", &Error{ErrInvalidCharRange, name}
+	}
+	return p.appendGroup(r, g), s, nil
+}
+
+func (p *parser) appendGroup(r []rune, g charGroup) []rune {
+	if p.flags&FoldCase == 0 {
+		if g.sign < 0 {
+			r = appendNegatedClass(r, g.class)
+		} else {
+			r = appendClass(r, g.class)
+		}
+	} else {
+		tmp := p.tmpClass[:0]
+		tmp = appendFoldedClass(tmp, g.class)
+		p.tmpClass = tmp
+		tmp = cleanClass(&p.tmpClass)
+		if g.sign < 0 {
+			r = appendNegatedClass(r, tmp)
+		} else {
+			r = appendClass(r, tmp)
+		}
+	}
+	return r
+}
+
+var anyTable = &unicode.RangeTable{
+	R16: []unicode.Range16{{Lo: 0, Hi: 1<<16 - 1, Stride: 1}},
+	R32: []unicode.Range32{{Lo: 1 << 16, Hi: unicode.MaxRune, Stride: 1}},
+}
+
+// unicodeTable returns the unicode.RangeTable identified by name
+// and the table of additional fold-equivalent code points.
+func unicodeTable(name string) (*unicode.RangeTable, *unicode.RangeTable) {
+	// Special case: "Any" means any.
+	if name == "Any" {
+		return anyTable, anyTable
+	}
+	if t := unicode.Categories[name]; t != nil {
+		return t, unicode.FoldCategory[name]
+	}
+	if t := unicode.Scripts[name]; t != nil {
+		return t, unicode.FoldScript[name]
+	}
+	return nil, nil
+}
+
+// parseUnicodeClass parses a leading Unicode character class like \p{Han}
+// from the beginning of s. If one is present, it appends the characters to r
+// and returns the new slice r and the remainder of the string.
+func (p *parser) parseUnicodeClass(s string, r []rune) (out []rune, rest string, err error) {
+	if p.flags&UnicodeGroups == 0 || len(s) < 2 || s[0] != '\\' || s[1] != 'p' && s[1] != 'P' {
+		return
+	}
+
+	// Committed to parse or return error.
+	sign := +1
+	if s[1] == 'P' {
+		sign = -1
+	}
+	t := s[2:]
+	c, t, err := nextRune(t)
+	if err != nil {
+		return
+	}
+	var seq, name string
+	if c != '{' {
+		// Single-letter name.
+		seq = s[:len(s)-len(t)]
+		name = seq[2:]
+	} else {
+		// Name is in braces.
+		end := strings.IndexRune(s, '}')
+		if end < 0 {
+			if err = checkUTF8(s); err != nil {
+				return
+			}
+			return nil, "", &Error{ErrInvalidCharRange, s}
+		}
+		seq, t = s[:end+1], s[end+1:]
+		name = s[3:end]
+		if err = checkUTF8(name); err != nil {
+			return
+		}
+	}
+
+	// Group can have leading negation too.  \p{^Han} == \P{Han}, \P{^Han} == \p{Han}.
+	if name != "" && name[0] == '^' {
+		sign = -sign
+		name = name[1:]
+	}
+
+	tab, fold := unicodeTable(name)
+	if tab == nil {
+		return nil, "", &Error{ErrInvalidCharRange, seq}
+	}
+
+	if p.flags&FoldCase == 0 || fold == nil {
+		if sign > 0 {
+			r = appendTable(r, tab)
+		} else {
+			r = appendNegatedTable(r, tab)
+		}
+	} else {
+		// Merge and clean tab and fold in a temporary buffer.
+		// This is necessary for the negative case and just tidy
+		// for the positive case.
+		tmp := p.tmpClass[:0]
+		tmp = appendTable(tmp, tab)
+		tmp = appendTable(tmp, fold)
+		p.tmpClass = tmp
+		tmp = cleanClass(&p.tmpClass)
+		if sign > 0 {
+			r = appendClass(r, tmp)
+		} else {
+			r = appendNegatedClass(r, tmp)
+		}
+	}
+	return r, t, nil
+}
+
+// parseClass parses a character class at the beginning of s
+// and pushes it onto the parse stack.
+func (p *parser) parseClass(s string) (rest string, err error) {
+	t := s[1:] // chop [
+	re := p.newRegexp(OpCharClass)
+	re.Flags = p.flags
+	re.Rune = re.Rune0[:0]
+
+	sign := +1
+	if t != "" && t[0] == '^' {
+		sign = -1
+		t = t[1:]
+
+		// If character class does not match \n, add it here,
+		// so that negation later will do the right thing.
+		if p.flags&ClassNL == 0 {
+			re.Rune = append(re.Rune, '\n', '\n')
+		}
+	}
+
+	class := re.Rune
+	first := true // ] and - are okay as first char in class
+	for t == "" || t[0] != ']' || first {
+		// POSIX: - is only okay unescaped as first or last in class.
+		// Perl: - is okay anywhere.
+		if t != "" && t[0] == '-' && p.flags&PerlX == 0 && !first && (len(t) == 1 || t[1] != ']') {
+			_, size := utf8.DecodeRuneInString(t[1:])
+			return "", &Error{Code: ErrInvalidCharRange, Expr: t[:1+size]}
+		}
+		first = false
+
+		// Look for POSIX [:alnum:] etc.
+		if len(t) > 2 && t[0] == '[' && t[1] == ':' {
+			nclass, nt, err := p.parseNamedClass(t, class)
+			if err != nil {
+				return "", err
+			}
+			if nclass != nil {
+				class, t = nclass, nt
+				continue
+			}
+		}
+
+		// Look for Unicode character group like \p{Han}.
+		nclass, nt, err := p.parseUnicodeClass(t, class)
+		if err != nil {
+			return "", err
+		}
+		if nclass != nil {
+			class, t = nclass, nt
+			continue
+		}
+
+		// Look for Perl character class symbols (extension).
+		if nclass, nt := p.parsePerlClassEscape(t, class); nclass != nil {
+			class, t = nclass, nt
+			continue
+		}
+
+		// Single character or simple range.
+		rng := t
+		var lo, hi rune
+		if lo, t, err = p.parseClassChar(t, s); err != nil {
+			return "", err
+		}
+		hi = lo
+		// [a-] means (a|-) so check for final ].
+		if len(t) >= 2 && t[0] == '-' && t[1] != ']' {
+			t = t[1:]
+			if hi, t, err = p.parseClassChar(t, s); err != nil {
+				return "", err
+			}
+			if hi < lo {
+				rng = rng[:len(rng)-len(t)]
+				return "", &Error{Code: ErrInvalidCharRange, Expr: rng}
+			}
+		}
+		if p.flags&FoldCase == 0 {
+			class = appendRange(class, lo, hi)
+		} else {
+			class = appendFoldedRange(class, lo, hi)
+		}
+	}
+	t = t[1:] // chop ]
+
+	// Use &re.Rune instead of &class to avoid allocation.
+	re.Rune = class
+	class = cleanClass(&re.Rune)
+	if sign < 0 {
+		class = negateClass(class)
+	}
+	re.Rune = class
+	p.push(re)
+	return t, nil
+}
+
+// cleanClass sorts the ranges (pairs of elements of r),
+// merges them, and eliminates duplicates.
+func cleanClass(rp *[]rune) []rune {
+
+	// Sort by lo increasing, hi decreasing to break ties.
+	sort.Sort(ranges{rp})
+
+	r := *rp
+	if len(r) < 2 {
+		return r
+	}
+
+	// Merge abutting, overlapping.
+	w := 2 // write index
+	for i := 2; i < len(r); i += 2 {
+		lo, hi := r[i], r[i+1]
+		if lo <= r[w-1]+1 {
+			// merge with previous range
+			if hi > r[w-1] {
+				r[w-1] = hi
+			}
+			continue
+		}
+		// new disjoint range
+		r[w] = lo
+		r[w+1] = hi
+		w += 2
+	}
+
+	return r[:w]
+}
+
+// inCharClass reports whether r is in the class.
+// It assumes the class has been cleaned by cleanClass.
+func inCharClass(r rune, class []rune) bool {
+	_, ok := sort.Find(len(class)/2, func(i int) int {
+		lo, hi := class[2*i], class[2*i+1]
+		if r > hi {
+			return +1
+		}
+		if r < lo {
+			return -1
+		}
+		return 0
+	})
+	return ok
+}
+
+// appendLiteral returns the result of appending the literal x to the class r.
+func appendLiteral(r []rune, x rune, flags Flags) []rune {
+	if flags&FoldCase != 0 {
+		return appendFoldedRange(r, x, x)
+	}
+	return appendRange(r, x, x)
+}
+
+// appendRange returns the result of appending the range lo-hi to the class r.
+func appendRange(r []rune, lo, hi rune) []rune {
+	// Expand last range or next to last range if it overlaps or abuts.
+	// Checking two ranges helps when appending case-folded
+	// alphabets, so that one range can be expanding A-Z and the
+	// other expanding a-z.
+	n := len(r)
+	for i := 2; i <= 4; i += 2 { // twice, using i=2, i=4
+		if n >= i {
+			rlo, rhi := r[n-i], r[n-i+1]
+			if lo <= rhi+1 && rlo <= hi+1 {
+				if lo < rlo {
+					r[n-i] = lo
+				}
+				if hi > rhi {
+					r[n-i+1] = hi
+				}
+				return r
+			}
+		}
+	}
+
+	return append(r, lo, hi)
+}
+
+const (
+	// minimum and maximum runes involved in folding.
+	// checked during test.
+	minFold = 0x0041
+	maxFold = 0x1e943
+)
+
+// appendFoldedRange returns the result of appending the range lo-hi
+// and its case folding-equivalent runes to the class r.
+func appendFoldedRange(r []rune, lo, hi rune) []rune {
+	// Optimizations.
+	if lo <= minFold && hi >= maxFold {
+		// Range is full: folding can't add more.
+		return appendRange(r, lo, hi)
+	}
+	if hi < minFold || lo > maxFold {
+		// Range is outside folding possibilities.
+		return appendRange(r, lo, hi)
+	}
+	if lo < minFold {
+		// [lo, minFold-1] needs no folding.
+		r = appendRange(r, lo, minFold-1)
+		lo = minFold
+	}
+	if hi > maxFold {
+		// [maxFold+1, hi] needs no folding.
+		r = appendRange(r, maxFold+1, hi)
+		hi = maxFold
+	}
+
+	// Brute force. Depend on appendRange to coalesce ranges on the fly.
+	for c := lo; c <= hi; c++ {
+		r = appendRange(r, c, c)
+		f := unicode.SimpleFold(c)
+		for f != c {
+			r = appendRange(r, f, f)
+			f = unicode.SimpleFold(f)
+		}
+	}
+	return r
+}
+
+// appendClass returns the result of appending the class x to the class r.
+// It assume x is clean.
+func appendClass(r []rune, x []rune) []rune {
+	for i := 0; i < len(x); i += 2 {
+		r = appendRange(r, x[i], x[i+1])
+	}
+	return r
+}
+
+// appendFoldedClass returns the result of appending the case folding of the class x to the class r.
+func appendFoldedClass(r []rune, x []rune) []rune {
+	for i := 0; i < len(x); i += 2 {
+		r = appendFoldedRange(r, x[i], x[i+1])
+	}
+	return r
+}
+
+// appendNegatedClass returns the result of appending the negation of the class x to the class r.
+// It assumes x is clean.
+func appendNegatedClass(r []rune, x []rune) []rune {
+	nextLo := '\u0000'
+	for i := 0; i < len(x); i += 2 {
+		lo, hi := x[i], x[i+1]
+		if nextLo <= lo-1 {
+			r = appendRange(r, nextLo, lo-1)
+		}
+		nextLo = hi + 1
+	}
+	if nextLo <= unicode.MaxRune {
+		r = appendRange(r, nextLo, unicode.MaxRune)
+	}
+	return r
+}
+
+// appendTable returns the result of appending x to the class r.
+func appendTable(r []rune, x *unicode.RangeTable) []rune {
+	for _, xr := range x.R16 {
+		lo, hi, stride := rune(xr.Lo), rune(xr.Hi), rune(xr.Stride)
+		if stride == 1 {
+			r = appendRange(r, lo, hi)
+			continue
+		}
+		for c := lo; c <= hi; c += stride {
+			r = appendRange(r, c, c)
+		}
+	}
+	for _, xr := range x.R32 {
+		lo, hi, stride := rune(xr.Lo), rune(xr.Hi), rune(xr.Stride)
+		if stride == 1 {
+			r = appendRange(r, lo, hi)
+			continue
+		}
+		for c := lo; c <= hi; c += stride {
+			r = appendRange(r, c, c)
+		}
+	}
+	return r
+}
+
+// appendNegatedTable returns the result of appending the negation of x to the class r.
+func appendNegatedTable(r []rune, x *unicode.RangeTable) []rune {
+	nextLo := '\u0000' // lo end of next class to add
+	for _, xr := range x.R16 {
+		lo, hi, stride := rune(xr.Lo), rune(xr.Hi), rune(xr.Stride)
+		if stride == 1 {
+			if nextLo <= lo-1 {
+				r = appendRange(r, nextLo, lo-1)
+			}
+			nextLo = hi + 1
+			continue
+		}
+		for c := lo; c <= hi; c += stride {
+			if nextLo <= c-1 {
+				r = appendRange(r, nextLo, c-1)
+			}
+			nextLo = c + 1
+		}
+	}
+	for _, xr := range x.R32 {
+		lo, hi, stride := rune(xr.Lo), rune(xr.Hi), rune(xr.Stride)
+		if stride == 1 {
+			if nextLo <= lo-1 {
+				r = appendRange(r, nextLo, lo-1)
+			}
+			nextLo = hi + 1
+			continue
+		}
+		for c := lo; c <= hi; c += stride {
+			if nextLo <= c-1 {
+				r = appendRange(r, nextLo, c-1)
+			}
+			nextLo = c + 1
+		}
+	}
+	if nextLo <= unicode.MaxRune {
+		r = appendRange(r, nextLo, unicode.MaxRune)
+	}
+	return r
+}
+
+// negateClass overwrites r and returns r's negation.
+// It assumes the class r is already clean.
+func negateClass(r []rune) []rune {
+	nextLo := '\u0000' // lo end of next class to add
+	w := 0             // write index
+	for i := 0; i < len(r); i += 2 {
+		lo, hi := r[i], r[i+1]
+		if nextLo <= lo-1 {
+			r[w] = nextLo
+			r[w+1] = lo - 1
+			w += 2
+		}
+		nextLo = hi + 1
+	}
+	r = r[:w]
+	if nextLo <= unicode.MaxRune {
+		// It's possible for the negation to have one more
+		// range - this one - than the original class, so use append.
+		r = append(r, nextLo, unicode.MaxRune)
+	}
+	return r
+}
+
+// ranges implements sort.Interface on a []rune.
+// The choice of receiver type definition is strange
+// but avoids an allocation since we already have
+// a *[]rune.
+type ranges struct {
+	p *[]rune
+}
+
+func (ra ranges) Less(i, j int) bool {
+	p := *ra.p
+	i *= 2
+	j *= 2
+	return p[i] < p[j] || p[i] == p[j] && p[i+1] > p[j+1]
+}
+
+func (ra ranges) Len() int {
+	return len(*ra.p) / 2
+}
+
+func (ra ranges) Swap(i, j int) {
+	p := *ra.p
+	i *= 2
+	j *= 2
+	p[i], p[i+1], p[j], p[j+1] = p[j], p[j+1], p[i], p[i+1]
+}
+
+func checkUTF8(s string) error {
+	for s != "" {
+		rune, size := utf8.DecodeRuneInString(s)
+		if rune == utf8.RuneError && size == 1 {
+			return &Error{Code: ErrInvalidUTF8, Expr: s}
+		}
+		s = s[size:]
+	}
+	return nil
+}
+
+func nextRune(s string) (c rune, t string, err error) {
+	c, size := utf8.DecodeRuneInString(s)
+	if c == utf8.RuneError && size == 1 {
+		return 0, "", &Error{Code: ErrInvalidUTF8, Expr: s}
+	}
+	return c, s[size:], nil
+}
+
+func isalnum(c rune) bool {
+	return '0' <= c && c <= '9' || 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z'
+}
+
+func unhex(c rune) rune {
+	if '0' <= c && c <= '9' {
+		return c - '0'
+	}
+	if 'a' <= c && c <= 'f' {
+		return c - 'a' + 10
+	}
+	if 'A' <= c && c <= 'F' {
+		return c - 'A' + 10
+	}
+	return -1
+}
diff --git a/vendor/github.com/grafana/regexp/syntax/perl_groups.go b/vendor/github.com/grafana/regexp/syntax/perl_groups.go
new file mode 100644
index 0000000000000000000000000000000000000000..effe4e6862754899ebeeabe5bedf97b46b06f120
--- /dev/null
+++ b/vendor/github.com/grafana/regexp/syntax/perl_groups.go
@@ -0,0 +1,134 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// GENERATED BY make_perl_groups.pl; DO NOT EDIT.
+// make_perl_groups.pl >perl_groups.go
+
+package syntax
+
+var code1 = []rune{ /* \d */
+	0x30, 0x39,
+}
+
+var code2 = []rune{ /* \s */
+	0x9, 0xa,
+	0xc, 0xd,
+	0x20, 0x20,
+}
+
+var code3 = []rune{ /* \w */
+	0x30, 0x39,
+	0x41, 0x5a,
+	0x5f, 0x5f,
+	0x61, 0x7a,
+}
+
+var perlGroup = map[string]charGroup{
+	`\d`: {+1, code1},
+	`\D`: {-1, code1},
+	`\s`: {+1, code2},
+	`\S`: {-1, code2},
+	`\w`: {+1, code3},
+	`\W`: {-1, code3},
+}
+var code4 = []rune{ /* [:alnum:] */
+	0x30, 0x39,
+	0x41, 0x5a,
+	0x61, 0x7a,
+}
+
+var code5 = []rune{ /* [:alpha:] */
+	0x41, 0x5a,
+	0x61, 0x7a,
+}
+
+var code6 = []rune{ /* [:ascii:] */
+	0x0, 0x7f,
+}
+
+var code7 = []rune{ /* [:blank:] */
+	0x9, 0x9,
+	0x20, 0x20,
+}
+
+var code8 = []rune{ /* [:cntrl:] */
+	0x0, 0x1f,
+	0x7f, 0x7f,
+}
+
+var code9 = []rune{ /* [:digit:] */
+	0x30, 0x39,
+}
+
+var code10 = []rune{ /* [:graph:] */
+	0x21, 0x7e,
+}
+
+var code11 = []rune{ /* [:lower:] */
+	0x61, 0x7a,
+}
+
+var code12 = []rune{ /* [:print:] */
+	0x20, 0x7e,
+}
+
+var code13 = []rune{ /* [:punct:] */
+	0x21, 0x2f,
+	0x3a, 0x40,
+	0x5b, 0x60,
+	0x7b, 0x7e,
+}
+
+var code14 = []rune{ /* [:space:] */
+	0x9, 0xd,
+	0x20, 0x20,
+}
+
+var code15 = []rune{ /* [:upper:] */
+	0x41, 0x5a,
+}
+
+var code16 = []rune{ /* [:word:] */
+	0x30, 0x39,
+	0x41, 0x5a,
+	0x5f, 0x5f,
+	0x61, 0x7a,
+}
+
+var code17 = []rune{ /* [:xdigit:] */
+	0x30, 0x39,
+	0x41, 0x46,
+	0x61, 0x66,
+}
+
+var posixGroup = map[string]charGroup{
+	`[:alnum:]`:   {+1, code4},
+	`[:^alnum:]`:  {-1, code4},
+	`[:alpha:]`:   {+1, code5},
+	`[:^alpha:]`:  {-1, code5},
+	`[:ascii:]`:   {+1, code6},
+	`[:^ascii:]`:  {-1, code6},
+	`[:blank:]`:   {+1, code7},
+	`[:^blank:]`:  {-1, code7},
+	`[:cntrl:]`:   {+1, code8},
+	`[:^cntrl:]`:  {-1, code8},
+	`[:digit:]`:   {+1, code9},
+	`[:^digit:]`:  {-1, code9},
+	`[:graph:]`:   {+1, code10},
+	`[:^graph:]`:  {-1, code10},
+	`[:lower:]`:   {+1, code11},
+	`[:^lower:]`:  {-1, code11},
+	`[:print:]`:   {+1, code12},
+	`[:^print:]`:  {-1, code12},
+	`[:punct:]`:   {+1, code13},
+	`[:^punct:]`:  {-1, code13},
+	`[:space:]`:   {+1, code14},
+	`[:^space:]`:  {-1, code14},
+	`[:upper:]`:   {+1, code15},
+	`[:^upper:]`:  {-1, code15},
+	`[:word:]`:    {+1, code16},
+	`[:^word:]`:   {-1, code16},
+	`[:xdigit:]`:  {+1, code17},
+	`[:^xdigit:]`: {-1, code17},
+}
diff --git a/vendor/github.com/grafana/regexp/syntax/prog.go b/vendor/github.com/grafana/regexp/syntax/prog.go
new file mode 100644
index 0000000000000000000000000000000000000000..6a3705ec8f9090ac0f68b8773c6293459430bd22
--- /dev/null
+++ b/vendor/github.com/grafana/regexp/syntax/prog.go
@@ -0,0 +1,349 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package syntax
+
+import (
+	"strconv"
+	"strings"
+	"unicode"
+	"unicode/utf8"
+)
+
+// Compiled program.
+// May not belong in this package, but convenient for now.
+
+// A Prog is a compiled regular expression program.
+type Prog struct {
+	Inst   []Inst
+	Start  int // index of start instruction
+	NumCap int // number of InstCapture insts in re
+}
+
+// An InstOp is an instruction opcode.
+type InstOp uint8
+
+const (
+	InstAlt InstOp = iota
+	InstAltMatch
+	InstCapture
+	InstEmptyWidth
+	InstMatch
+	InstFail
+	InstNop
+	InstRune
+	InstRune1
+	InstRuneAny
+	InstRuneAnyNotNL
+)
+
+var instOpNames = []string{
+	"InstAlt",
+	"InstAltMatch",
+	"InstCapture",
+	"InstEmptyWidth",
+	"InstMatch",
+	"InstFail",
+	"InstNop",
+	"InstRune",
+	"InstRune1",
+	"InstRuneAny",
+	"InstRuneAnyNotNL",
+}
+
+func (i InstOp) String() string {
+	if uint(i) >= uint(len(instOpNames)) {
+		return ""
+	}
+	return instOpNames[i]
+}
+
+// An EmptyOp specifies a kind or mixture of zero-width assertions.
+type EmptyOp uint8
+
+const (
+	EmptyBeginLine EmptyOp = 1 << iota
+	EmptyEndLine
+	EmptyBeginText
+	EmptyEndText
+	EmptyWordBoundary
+	EmptyNoWordBoundary
+)
+
+// EmptyOpContext returns the zero-width assertions
+// satisfied at the position between the runes r1 and r2.
+// Passing r1 == -1 indicates that the position is
+// at the beginning of the text.
+// Passing r2 == -1 indicates that the position is
+// at the end of the text.
+func EmptyOpContext(r1, r2 rune) EmptyOp {
+	var op EmptyOp = EmptyNoWordBoundary
+	var boundary byte
+	switch {
+	case IsWordChar(r1):
+		boundary = 1
+	case r1 == '\n':
+		op |= EmptyBeginLine
+	case r1 < 0:
+		op |= EmptyBeginText | EmptyBeginLine
+	}
+	switch {
+	case IsWordChar(r2):
+		boundary ^= 1
+	case r2 == '\n':
+		op |= EmptyEndLine
+	case r2 < 0:
+		op |= EmptyEndText | EmptyEndLine
+	}
+	if boundary != 0 { // IsWordChar(r1) != IsWordChar(r2)
+		op ^= (EmptyWordBoundary | EmptyNoWordBoundary)
+	}
+	return op
+}
+
+// IsWordChar reports whether r is considered a “word character”
+// during the evaluation of the \b and \B zero-width assertions.
+// These assertions are ASCII-only: the word characters are [A-Za-z0-9_].
+func IsWordChar(r rune) bool {
+	// Test for lowercase letters first, as these occur more
+	// frequently than uppercase letters in common cases.
+	return 'a' <= r && r <= 'z' || 'A' <= r && r <= 'Z' || '0' <= r && r <= '9' || r == '_'
+}
+
+// An Inst is a single instruction in a regular expression program.
+type Inst struct {
+	Op   InstOp
+	Out  uint32 // all but InstMatch, InstFail
+	Arg  uint32 // InstAlt, InstAltMatch, InstCapture, InstEmptyWidth
+	Rune []rune
+}
+
+func (p *Prog) String() string {
+	var b strings.Builder
+	dumpProg(&b, p)
+	return b.String()
+}
+
+// skipNop follows any no-op or capturing instructions.
+func (p *Prog) skipNop(pc uint32) *Inst {
+	i := &p.Inst[pc]
+	for i.Op == InstNop || i.Op == InstCapture {
+		i = &p.Inst[i.Out]
+	}
+	return i
+}
+
+// op returns i.Op but merges all the Rune special cases into InstRune
+func (i *Inst) op() InstOp {
+	op := i.Op
+	switch op {
+	case InstRune1, InstRuneAny, InstRuneAnyNotNL:
+		op = InstRune
+	}
+	return op
+}
+
+// Prefix returns a literal string that all matches for the
+// regexp must start with. Complete is true if the prefix
+// is the entire match.
+func (p *Prog) Prefix() (prefix string, complete bool) {
+	i := p.skipNop(uint32(p.Start))
+
+	// Avoid allocation of buffer if prefix is empty.
+	if i.op() != InstRune || len(i.Rune) != 1 {
+		return "", i.Op == InstMatch
+	}
+
+	// Have prefix; gather characters.
+	var buf strings.Builder
+	for i.op() == InstRune && len(i.Rune) == 1 && Flags(i.Arg)&FoldCase == 0 && i.Rune[0] != utf8.RuneError {
+		buf.WriteRune(i.Rune[0])
+		i = p.skipNop(i.Out)
+	}
+	return buf.String(), i.Op == InstMatch
+}
+
+// StartCond returns the leading empty-width conditions that must
+// be true in any match. It returns ^EmptyOp(0) if no matches are possible.
+func (p *Prog) StartCond() EmptyOp {
+	var flag EmptyOp
+	pc := uint32(p.Start)
+	i := &p.Inst[pc]
+Loop:
+	for {
+		switch i.Op {
+		case InstEmptyWidth:
+			flag |= EmptyOp(i.Arg)
+		case InstFail:
+			return ^EmptyOp(0)
+		case InstCapture, InstNop:
+			// skip
+		default:
+			break Loop
+		}
+		pc = i.Out
+		i = &p.Inst[pc]
+	}
+	return flag
+}
+
+const noMatch = -1
+
+// MatchRune reports whether the instruction matches (and consumes) r.
+// It should only be called when i.Op == [InstRune].
+func (i *Inst) MatchRune(r rune) bool {
+	return i.MatchRunePos(r) != noMatch
+}
+
+// MatchRunePos checks whether the instruction matches (and consumes) r.
+// If so, MatchRunePos returns the index of the matching rune pair
+// (or, when len(i.Rune) == 1, rune singleton).
+// If not, MatchRunePos returns -1.
+// MatchRunePos should only be called when i.Op == [InstRune].
+func (i *Inst) MatchRunePos(r rune) int {
+	rune := i.Rune
+
+	switch len(rune) {
+	case 0:
+		return noMatch
+
+	case 1:
+		// Special case: single-rune slice is from literal string, not char class.
+		r0 := rune[0]
+		if r == r0 {
+			return 0
+		}
+		if Flags(i.Arg)&FoldCase != 0 {
+			for r1 := unicode.SimpleFold(r0); r1 != r0; r1 = unicode.SimpleFold(r1) {
+				if r == r1 {
+					return 0
+				}
+			}
+		}
+		return noMatch
+
+	case 2:
+		if r >= rune[0] && r <= rune[1] {
+			return 0
+		}
+		return noMatch
+
+	case 4, 6, 8:
+		// Linear search for a few pairs.
+		// Should handle ASCII well.
+		for j := 0; j < len(rune); j += 2 {
+			if r < rune[j] {
+				return noMatch
+			}
+			if r <= rune[j+1] {
+				return j / 2
+			}
+		}
+		return noMatch
+	}
+
+	// Otherwise binary search.
+	lo := 0
+	hi := len(rune) / 2
+	for lo < hi {
+		m := int(uint(lo+hi) >> 1)
+		if c := rune[2*m]; c <= r {
+			if r <= rune[2*m+1] {
+				return m
+			}
+			lo = m + 1
+		} else {
+			hi = m
+		}
+	}
+	return noMatch
+}
+
+// MatchEmptyWidth reports whether the instruction matches
+// an empty string between the runes before and after.
+// It should only be called when i.Op == [InstEmptyWidth].
+func (i *Inst) MatchEmptyWidth(before rune, after rune) bool {
+	switch EmptyOp(i.Arg) {
+	case EmptyBeginLine:
+		return before == '\n' || before == -1
+	case EmptyEndLine:
+		return after == '\n' || after == -1
+	case EmptyBeginText:
+		return before == -1
+	case EmptyEndText:
+		return after == -1
+	case EmptyWordBoundary:
+		return IsWordChar(before) != IsWordChar(after)
+	case EmptyNoWordBoundary:
+		return IsWordChar(before) == IsWordChar(after)
+	}
+	panic("unknown empty width arg")
+}
+
+func (i *Inst) String() string {
+	var b strings.Builder
+	dumpInst(&b, i)
+	return b.String()
+}
+
+func bw(b *strings.Builder, args ...string) {
+	for _, s := range args {
+		b.WriteString(s)
+	}
+}
+
+func dumpProg(b *strings.Builder, p *Prog) {
+	for j := range p.Inst {
+		i := &p.Inst[j]
+		pc := strconv.Itoa(j)
+		if len(pc) < 3 {
+			b.WriteString("   "[len(pc):])
+		}
+		if j == p.Start {
+			pc += "*"
+		}
+		bw(b, pc, "\t")
+		dumpInst(b, i)
+		bw(b, "\n")
+	}
+}
+
+func u32(i uint32) string {
+	return strconv.FormatUint(uint64(i), 10)
+}
+
+func dumpInst(b *strings.Builder, i *Inst) {
+	switch i.Op {
+	case InstAlt:
+		bw(b, "alt -> ", u32(i.Out), ", ", u32(i.Arg))
+	case InstAltMatch:
+		bw(b, "altmatch -> ", u32(i.Out), ", ", u32(i.Arg))
+	case InstCapture:
+		bw(b, "cap ", u32(i.Arg), " -> ", u32(i.Out))
+	case InstEmptyWidth:
+		bw(b, "empty ", u32(i.Arg), " -> ", u32(i.Out))
+	case InstMatch:
+		bw(b, "match")
+	case InstFail:
+		bw(b, "fail")
+	case InstNop:
+		bw(b, "nop -> ", u32(i.Out))
+	case InstRune:
+		if i.Rune == nil {
+			// shouldn't happen
+			bw(b, "rune <nil>")
+		}
+		bw(b, "rune ", strconv.QuoteToASCII(string(i.Rune)))
+		if Flags(i.Arg)&FoldCase != 0 {
+			bw(b, "/i")
+		}
+		bw(b, " -> ", u32(i.Out))
+	case InstRune1:
+		bw(b, "rune1 ", strconv.QuoteToASCII(string(i.Rune)), " -> ", u32(i.Out))
+	case InstRuneAny:
+		bw(b, "any -> ", u32(i.Out))
+	case InstRuneAnyNotNL:
+		bw(b, "anynotnl -> ", u32(i.Out))
+	}
+}
diff --git a/vendor/github.com/grafana/regexp/syntax/regexp.go b/vendor/github.com/grafana/regexp/syntax/regexp.go
new file mode 100644
index 0000000000000000000000000000000000000000..8ad3653abba3ccc36b4e38673a7048e583de2e0d
--- /dev/null
+++ b/vendor/github.com/grafana/regexp/syntax/regexp.go
@@ -0,0 +1,464 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package syntax
+
+// Note to implementers:
+// In this package, re is always a *Regexp and r is always a rune.
+
+import (
+	"slices"
+	"strconv"
+	"strings"
+	"unicode"
+)
+
+// A Regexp is a node in a regular expression syntax tree.
+type Regexp struct {
+	Op       Op // operator
+	Flags    Flags
+	Sub      []*Regexp  // subexpressions, if any
+	Sub0     [1]*Regexp // storage for short Sub
+	Rune     []rune     // matched runes, for OpLiteral, OpCharClass
+	Rune0    [2]rune    // storage for short Rune
+	Min, Max int        // min, max for OpRepeat
+	Cap      int        // capturing index, for OpCapture
+	Name     string     // capturing name, for OpCapture
+}
+
+//go:generate stringer -type Op -trimprefix Op
+
+// An Op is a single regular expression operator.
+type Op uint8
+
+// Operators are listed in precedence order, tightest binding to weakest.
+// Character class operators are listed simplest to most complex
+// (OpLiteral, OpCharClass, OpAnyCharNotNL, OpAnyChar).
+
+const (
+	OpNoMatch        Op = 1 + iota // matches no strings
+	OpEmptyMatch                   // matches empty string
+	OpLiteral                      // matches Runes sequence
+	OpCharClass                    // matches Runes interpreted as range pair list
+	OpAnyCharNotNL                 // matches any character except newline
+	OpAnyChar                      // matches any character
+	OpBeginLine                    // matches empty string at beginning of line
+	OpEndLine                      // matches empty string at end of line
+	OpBeginText                    // matches empty string at beginning of text
+	OpEndText                      // matches empty string at end of text
+	OpWordBoundary                 // matches word boundary `\b`
+	OpNoWordBoundary               // matches word non-boundary `\B`
+	OpCapture                      // capturing subexpression with index Cap, optional name Name
+	OpStar                         // matches Sub[0] zero or more times
+	OpPlus                         // matches Sub[0] one or more times
+	OpQuest                        // matches Sub[0] zero or one times
+	OpRepeat                       // matches Sub[0] at least Min times, at most Max (Max == -1 is no limit)
+	OpConcat                       // matches concatenation of Subs
+	OpAlternate                    // matches alternation of Subs
+)
+
+const opPseudo Op = 128 // where pseudo-ops start
+
+// Equal reports whether x and y have identical structure.
+func (x *Regexp) Equal(y *Regexp) bool {
+	if x == nil || y == nil {
+		return x == y
+	}
+	if x.Op != y.Op {
+		return false
+	}
+	switch x.Op {
+	case OpEndText:
+		// The parse flags remember whether this is \z or \Z.
+		if x.Flags&WasDollar != y.Flags&WasDollar {
+			return false
+		}
+
+	case OpLiteral, OpCharClass:
+		return slices.Equal(x.Rune, y.Rune)
+
+	case OpAlternate, OpConcat:
+		return slices.EqualFunc(x.Sub, y.Sub, func(a, b *Regexp) bool { return a.Equal(b) })
+
+	case OpStar, OpPlus, OpQuest:
+		if x.Flags&NonGreedy != y.Flags&NonGreedy || !x.Sub[0].Equal(y.Sub[0]) {
+			return false
+		}
+
+	case OpRepeat:
+		if x.Flags&NonGreedy != y.Flags&NonGreedy || x.Min != y.Min || x.Max != y.Max || !x.Sub[0].Equal(y.Sub[0]) {
+			return false
+		}
+
+	case OpCapture:
+		if x.Cap != y.Cap || x.Name != y.Name || !x.Sub[0].Equal(y.Sub[0]) {
+			return false
+		}
+	}
+	return true
+}
+
+// printFlags is a bit set indicating which flags (including non-capturing parens) to print around a regexp.
+type printFlags uint8
+
+const (
+	flagI    printFlags = 1 << iota // (?i:
+	flagM                           // (?m:
+	flagS                           // (?s:
+	flagOff                         // )
+	flagPrec                        // (?: )
+	negShift = 5                    // flagI<<negShift is (?-i:
+)
+
+// addSpan enables the flags f around start..last,
+// by setting flags[start] = f and flags[last] = flagOff.
+func addSpan(start, last *Regexp, f printFlags, flags *map[*Regexp]printFlags) {
+	if *flags == nil {
+		*flags = make(map[*Regexp]printFlags)
+	}
+	(*flags)[start] = f
+	(*flags)[last] |= flagOff // maybe start==last
+}
+
+// calcFlags calculates the flags to print around each subexpression in re,
+// storing that information in (*flags)[sub] for each affected subexpression.
+// The first time an entry needs to be written to *flags, calcFlags allocates the map.
+// calcFlags also calculates the flags that must be active or can't be active
+// around re and returns those flags.
+func calcFlags(re *Regexp, flags *map[*Regexp]printFlags) (must, cant printFlags) {
+	switch re.Op {
+	default:
+		return 0, 0
+
+	case OpLiteral:
+		// If literal is fold-sensitive, return (flagI, 0) or (0, flagI)
+		// according to whether (?i) is active.
+		// If literal is not fold-sensitive, return 0, 0.
+		for _, r := range re.Rune {
+			if minFold <= r && r <= maxFold && unicode.SimpleFold(r) != r {
+				if re.Flags&FoldCase != 0 {
+					return flagI, 0
+				} else {
+					return 0, flagI
+				}
+			}
+		}
+		return 0, 0
+
+	case OpCharClass:
+		// If literal is fold-sensitive, return 0, flagI - (?i) has been compiled out.
+		// If literal is not fold-sensitive, return 0, 0.
+		for i := 0; i < len(re.Rune); i += 2 {
+			lo := max(minFold, re.Rune[i])
+			hi := min(maxFold, re.Rune[i+1])
+			for r := lo; r <= hi; r++ {
+				for f := unicode.SimpleFold(r); f != r; f = unicode.SimpleFold(f) {
+					if !(lo <= f && f <= hi) && !inCharClass(f, re.Rune) {
+						return 0, flagI
+					}
+				}
+			}
+		}
+		return 0, 0
+
+	case OpAnyCharNotNL: // (?-s).
+		return 0, flagS
+
+	case OpAnyChar: // (?s).
+		return flagS, 0
+
+	case OpBeginLine, OpEndLine: // (?m)^ (?m)$
+		return flagM, 0
+
+	case OpEndText:
+		if re.Flags&WasDollar != 0 { // (?-m)$
+			return 0, flagM
+		}
+		return 0, 0
+
+	case OpCapture, OpStar, OpPlus, OpQuest, OpRepeat:
+		return calcFlags(re.Sub[0], flags)
+
+	case OpConcat, OpAlternate:
+		// Gather the must and cant for each subexpression.
+		// When we find a conflicting subexpression, insert the necessary
+		// flags around the previously identified span and start over.
+		var must, cant, allCant printFlags
+		start := 0
+		last := 0
+		did := false
+		for i, sub := range re.Sub {
+			subMust, subCant := calcFlags(sub, flags)
+			if must&subCant != 0 || subMust&cant != 0 {
+				if must != 0 {
+					addSpan(re.Sub[start], re.Sub[last], must, flags)
+				}
+				must = 0
+				cant = 0
+				start = i
+				did = true
+			}
+			must |= subMust
+			cant |= subCant
+			allCant |= subCant
+			if subMust != 0 {
+				last = i
+			}
+			if must == 0 && start == i {
+				start++
+			}
+		}
+		if !did {
+			// No conflicts: pass the accumulated must and cant upward.
+			return must, cant
+		}
+		if must != 0 {
+			// Conflicts found; need to finish final span.
+			addSpan(re.Sub[start], re.Sub[last], must, flags)
+		}
+		return 0, allCant
+	}
+}
+
+// writeRegexp writes the Perl syntax for the regular expression re to b.
+func writeRegexp(b *strings.Builder, re *Regexp, f printFlags, flags map[*Regexp]printFlags) {
+	f |= flags[re]
+	if f&flagPrec != 0 && f&^(flagOff|flagPrec) != 0 && f&flagOff != 0 {
+		// flagPrec is redundant with other flags being added and terminated
+		f &^= flagPrec
+	}
+	if f&^(flagOff|flagPrec) != 0 {
+		b.WriteString(`(?`)
+		if f&flagI != 0 {
+			b.WriteString(`i`)
+		}
+		if f&flagM != 0 {
+			b.WriteString(`m`)
+		}
+		if f&flagS != 0 {
+			b.WriteString(`s`)
+		}
+		if f&((flagM|flagS)<<negShift) != 0 {
+			b.WriteString(`-`)
+			if f&(flagM<<negShift) != 0 {
+				b.WriteString(`m`)
+			}
+			if f&(flagS<<negShift) != 0 {
+				b.WriteString(`s`)
+			}
+		}
+		b.WriteString(`:`)
+	}
+	if f&flagOff != 0 {
+		defer b.WriteString(`)`)
+	}
+	if f&flagPrec != 0 {
+		b.WriteString(`(?:`)
+		defer b.WriteString(`)`)
+	}
+
+	switch re.Op {
+	default:
+		b.WriteString("<invalid op" + strconv.Itoa(int(re.Op)) + ">")
+	case OpNoMatch:
+		b.WriteString(`[^\x00-\x{10FFFF}]`)
+	case OpEmptyMatch:
+		b.WriteString(`(?:)`)
+	case OpLiteral:
+		for _, r := range re.Rune {
+			escape(b, r, false)
+		}
+	case OpCharClass:
+		if len(re.Rune)%2 != 0 {
+			b.WriteString(`[invalid char class]`)
+			break
+		}
+		b.WriteRune('[')
+		if len(re.Rune) == 0 {
+			b.WriteString(`^\x00-\x{10FFFF}`)
+		} else if re.Rune[0] == 0 && re.Rune[len(re.Rune)-1] == unicode.MaxRune && len(re.Rune) > 2 {
+			// Contains 0 and MaxRune. Probably a negated class.
+			// Print the gaps.
+			b.WriteRune('^')
+			for i := 1; i < len(re.Rune)-1; i += 2 {
+				lo, hi := re.Rune[i]+1, re.Rune[i+1]-1
+				escape(b, lo, lo == '-')
+				if lo != hi {
+					if hi != lo+1 {
+						b.WriteRune('-')
+					}
+					escape(b, hi, hi == '-')
+				}
+			}
+		} else {
+			for i := 0; i < len(re.Rune); i += 2 {
+				lo, hi := re.Rune[i], re.Rune[i+1]
+				escape(b, lo, lo == '-')
+				if lo != hi {
+					if hi != lo+1 {
+						b.WriteRune('-')
+					}
+					escape(b, hi, hi == '-')
+				}
+			}
+		}
+		b.WriteRune(']')
+	case OpAnyCharNotNL, OpAnyChar:
+		b.WriteString(`.`)
+	case OpBeginLine:
+		b.WriteString(`^`)
+	case OpEndLine:
+		b.WriteString(`$`)
+	case OpBeginText:
+		b.WriteString(`\A`)
+	case OpEndText:
+		if re.Flags&WasDollar != 0 {
+			b.WriteString(`$`)
+		} else {
+			b.WriteString(`\z`)
+		}
+	case OpWordBoundary:
+		b.WriteString(`\b`)
+	case OpNoWordBoundary:
+		b.WriteString(`\B`)
+	case OpCapture:
+		if re.Name != "" {
+			b.WriteString(`(?P<`)
+			b.WriteString(re.Name)
+			b.WriteRune('>')
+		} else {
+			b.WriteRune('(')
+		}
+		if re.Sub[0].Op != OpEmptyMatch {
+			writeRegexp(b, re.Sub[0], flags[re.Sub[0]], flags)
+		}
+		b.WriteRune(')')
+	case OpStar, OpPlus, OpQuest, OpRepeat:
+		p := printFlags(0)
+		sub := re.Sub[0]
+		if sub.Op > OpCapture || sub.Op == OpLiteral && len(sub.Rune) > 1 {
+			p = flagPrec
+		}
+		writeRegexp(b, sub, p, flags)
+
+		switch re.Op {
+		case OpStar:
+			b.WriteRune('*')
+		case OpPlus:
+			b.WriteRune('+')
+		case OpQuest:
+			b.WriteRune('?')
+		case OpRepeat:
+			b.WriteRune('{')
+			b.WriteString(strconv.Itoa(re.Min))
+			if re.Max != re.Min {
+				b.WriteRune(',')
+				if re.Max >= 0 {
+					b.WriteString(strconv.Itoa(re.Max))
+				}
+			}
+			b.WriteRune('}')
+		}
+		if re.Flags&NonGreedy != 0 {
+			b.WriteRune('?')
+		}
+	case OpConcat:
+		for _, sub := range re.Sub {
+			p := printFlags(0)
+			if sub.Op == OpAlternate {
+				p = flagPrec
+			}
+			writeRegexp(b, sub, p, flags)
+		}
+	case OpAlternate:
+		for i, sub := range re.Sub {
+			if i > 0 {
+				b.WriteRune('|')
+			}
+			writeRegexp(b, sub, 0, flags)
+		}
+	}
+}
+
+func (re *Regexp) String() string {
+	var b strings.Builder
+	var flags map[*Regexp]printFlags
+	must, cant := calcFlags(re, &flags)
+	must |= (cant &^ flagI) << negShift
+	if must != 0 {
+		must |= flagOff
+	}
+	writeRegexp(&b, re, must, flags)
+	return b.String()
+}
+
+const meta = `\.+*?()|[]{}^$`
+
+func escape(b *strings.Builder, r rune, force bool) {
+	if unicode.IsPrint(r) {
+		if strings.ContainsRune(meta, r) || force {
+			b.WriteRune('\\')
+		}
+		b.WriteRune(r)
+		return
+	}
+
+	switch r {
+	case '\a':
+		b.WriteString(`\a`)
+	case '\f':
+		b.WriteString(`\f`)
+	case '\n':
+		b.WriteString(`\n`)
+	case '\r':
+		b.WriteString(`\r`)
+	case '\t':
+		b.WriteString(`\t`)
+	case '\v':
+		b.WriteString(`\v`)
+	default:
+		if r < 0x100 {
+			b.WriteString(`\x`)
+			s := strconv.FormatInt(int64(r), 16)
+			if len(s) == 1 {
+				b.WriteRune('0')
+			}
+			b.WriteString(s)
+			break
+		}
+		b.WriteString(`\x{`)
+		b.WriteString(strconv.FormatInt(int64(r), 16))
+		b.WriteString(`}`)
+	}
+}
+
+// MaxCap walks the regexp to find the maximum capture index.
+func (re *Regexp) MaxCap() int {
+	m := 0
+	if re.Op == OpCapture {
+		m = re.Cap
+	}
+	for _, sub := range re.Sub {
+		if n := sub.MaxCap(); m < n {
+			m = n
+		}
+	}
+	return m
+}
+
+// CapNames walks the regexp to find the names of capturing groups.
+func (re *Regexp) CapNames() []string {
+	names := make([]string, re.MaxCap()+1)
+	re.capNames(names)
+	return names
+}
+
+func (re *Regexp) capNames(names []string) {
+	if re.Op == OpCapture {
+		names[re.Cap] = re.Name
+	}
+	for _, sub := range re.Sub {
+		sub.capNames(names)
+	}
+}
diff --git a/vendor/github.com/grafana/regexp/syntax/simplify.go b/vendor/github.com/grafana/regexp/syntax/simplify.go
new file mode 100644
index 0000000000000000000000000000000000000000..e439325139932c87a2a4367cb58586b1e2b3ebdf
--- /dev/null
+++ b/vendor/github.com/grafana/regexp/syntax/simplify.go
@@ -0,0 +1,151 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package syntax
+
+// Simplify returns a regexp equivalent to re but without counted repetitions
+// and with various other simplifications, such as rewriting /(?:a+)+/ to /a+/.
+// The resulting regexp will execute correctly but its string representation
+// will not produce the same parse tree, because capturing parentheses
+// may have been duplicated or removed. For example, the simplified form
+// for /(x){1,2}/ is /(x)(x)?/ but both parentheses capture as $1.
+// The returned regexp may share structure with or be the original.
+func (re *Regexp) Simplify() *Regexp {
+	if re == nil {
+		return nil
+	}
+	switch re.Op {
+	case OpCapture, OpConcat, OpAlternate:
+		// Simplify children, building new Regexp if children change.
+		nre := re
+		for i, sub := range re.Sub {
+			nsub := sub.Simplify()
+			if nre == re && nsub != sub {
+				// Start a copy.
+				nre = new(Regexp)
+				*nre = *re
+				nre.Rune = nil
+				nre.Sub = append(nre.Sub0[:0], re.Sub[:i]...)
+			}
+			if nre != re {
+				nre.Sub = append(nre.Sub, nsub)
+			}
+		}
+		return nre
+
+	case OpStar, OpPlus, OpQuest:
+		sub := re.Sub[0].Simplify()
+		return simplify1(re.Op, re.Flags, sub, re)
+
+	case OpRepeat:
+		// Special special case: x{0} matches the empty string
+		// and doesn't even need to consider x.
+		if re.Min == 0 && re.Max == 0 {
+			return &Regexp{Op: OpEmptyMatch}
+		}
+
+		// The fun begins.
+		sub := re.Sub[0].Simplify()
+
+		// x{n,} means at least n matches of x.
+		if re.Max == -1 {
+			// Special case: x{0,} is x*.
+			if re.Min == 0 {
+				return simplify1(OpStar, re.Flags, sub, nil)
+			}
+
+			// Special case: x{1,} is x+.
+			if re.Min == 1 {
+				return simplify1(OpPlus, re.Flags, sub, nil)
+			}
+
+			// General case: x{4,} is xxxx+.
+			nre := &Regexp{Op: OpConcat}
+			nre.Sub = nre.Sub0[:0]
+			for i := 0; i < re.Min-1; i++ {
+				nre.Sub = append(nre.Sub, sub)
+			}
+			nre.Sub = append(nre.Sub, simplify1(OpPlus, re.Flags, sub, nil))
+			return nre
+		}
+
+		// Special case x{0} handled above.
+
+		// Special case: x{1} is just x.
+		if re.Min == 1 && re.Max == 1 {
+			return sub
+		}
+
+		// General case: x{n,m} means n copies of x and m copies of x?
+		// The machine will do less work if we nest the final m copies,
+		// so that x{2,5} = xx(x(x(x)?)?)?
+
+		// Build leading prefix: xx.
+		var prefix *Regexp
+		if re.Min > 0 {
+			prefix = &Regexp{Op: OpConcat}
+			prefix.Sub = prefix.Sub0[:0]
+			for i := 0; i < re.Min; i++ {
+				prefix.Sub = append(prefix.Sub, sub)
+			}
+		}
+
+		// Build and attach suffix: (x(x(x)?)?)?
+		if re.Max > re.Min {
+			suffix := simplify1(OpQuest, re.Flags, sub, nil)
+			for i := re.Min + 1; i < re.Max; i++ {
+				nre2 := &Regexp{Op: OpConcat}
+				nre2.Sub = append(nre2.Sub0[:0], sub, suffix)
+				suffix = simplify1(OpQuest, re.Flags, nre2, nil)
+			}
+			if prefix == nil {
+				return suffix
+			}
+			prefix.Sub = append(prefix.Sub, suffix)
+		}
+		if prefix != nil {
+			return prefix
+		}
+
+		// Some degenerate case like min > max or min < max < 0.
+		// Handle as impossible match.
+		return &Regexp{Op: OpNoMatch}
+	}
+
+	return re
+}
+
+// simplify1 implements Simplify for the unary OpStar,
+// OpPlus, and OpQuest operators. It returns the simple regexp
+// equivalent to
+//
+//	Regexp{Op: op, Flags: flags, Sub: {sub}}
+//
+// under the assumption that sub is already simple, and
+// without first allocating that structure. If the regexp
+// to be returned turns out to be equivalent to re, simplify1
+// returns re instead.
+//
+// simplify1 is factored out of Simplify because the implementation
+// for other operators generates these unary expressions.
+// Letting them call simplify1 makes sure the expressions they
+// generate are simple.
+func simplify1(op Op, flags Flags, sub, re *Regexp) *Regexp {
+	// Special case: repeat the empty string as much as
+	// you want, but it's still the empty string.
+	if sub.Op == OpEmptyMatch {
+		return sub
+	}
+	// The operators are idempotent if the flags match.
+	if op == sub.Op && flags&NonGreedy == sub.Flags&NonGreedy {
+		return sub
+	}
+	if re != nil && re.Op == op && re.Flags&NonGreedy == flags&NonGreedy && sub == re.Sub[0] {
+		return re
+	}
+
+	re = &Regexp{Op: op, Flags: flags}
+	re.Sub = append(re.Sub0[:0], sub)
+	return re
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go
index 41cd4f5030e4f9dd7580dd08ae712c016cc04298..bbe7decf09bc61c5b2166c9ef3be4754d5df8913 100644
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go
@@ -148,22 +148,20 @@ func DefaultHTTPErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marsh
 	}
 
 	md, ok := ServerMetadataFromContext(ctx)
-	if !ok {
-		grpclog.Error("Failed to extract ServerMetadata from context")
-	}
-
-	handleForwardResponseServerMetadata(w, mux, md)
-
-	// RFC 7230 https://tools.ietf.org/html/rfc7230#section-4.1.2
-	// Unless the request includes a TE header field indicating "trailers"
-	// is acceptable, as described in Section 4.3, a server SHOULD NOT
-	// generate trailer fields that it believes are necessary for the user
-	// agent to receive.
-	doForwardTrailers := requestAcceptsTrailers(r)
-
-	if doForwardTrailers {
-		handleForwardResponseTrailerHeader(w, mux, md)
-		w.Header().Set("Transfer-Encoding", "chunked")
+	if ok {
+		handleForwardResponseServerMetadata(w, mux, md)
+
+		// RFC 7230 https://tools.ietf.org/html/rfc7230#section-4.1.2
+		// Unless the request includes a TE header field indicating "trailers"
+		// is acceptable, as described in Section 4.3, a server SHOULD NOT
+		// generate trailer fields that it believes are necessary for the user
+		// agent to receive.
+		doForwardTrailers := requestAcceptsTrailers(r)
+
+		if doForwardTrailers {
+			handleForwardResponseTrailerHeader(w, mux, md)
+			w.Header().Set("Transfer-Encoding", "chunked")
+		}
 	}
 
 	st := HTTPStatusFromCode(s.Code())
@@ -176,7 +174,7 @@ func DefaultHTTPErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marsh
 		grpclog.Errorf("Failed to write response: %v", err)
 	}
 
-	if doForwardTrailers {
+	if ok && requestAcceptsTrailers(r) {
 		handleForwardResponseTrailer(w, mux, md)
 	}
 }
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go
index f0727cf7c06c8fac6089f427f071e36f1dd74557..2f0b9e9e0f8691d89d455222e8025a89044460f6 100644
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go
@@ -153,12 +153,10 @@ type responseBody interface {
 // ForwardResponseMessage forwards the message "resp" from gRPC server to REST client.
 func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, req *http.Request, resp proto.Message, opts ...func(context.Context, http.ResponseWriter, proto.Message) error) {
 	md, ok := ServerMetadataFromContext(ctx)
-	if !ok {
-		grpclog.Error("Failed to extract ServerMetadata from context")
+	if ok {
+		handleForwardResponseServerMetadata(w, mux, md)
 	}
 
-	handleForwardResponseServerMetadata(w, mux, md)
-
 	// RFC 7230 https://tools.ietf.org/html/rfc7230#section-4.1.2
 	// Unless the request includes a TE header field indicating "trailers"
 	// is acceptable, as described in Section 4.3, a server SHOULD NOT
@@ -166,7 +164,7 @@ func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marsha
 	// agent to receive.
 	doForwardTrailers := requestAcceptsTrailers(req)
 
-	if doForwardTrailers {
+	if ok && doForwardTrailers {
 		handleForwardResponseTrailerHeader(w, mux, md)
 		w.Header().Set("Transfer-Encoding", "chunked")
 	}
@@ -204,7 +202,7 @@ func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marsha
 		grpclog.Errorf("Failed to write response: %v", err)
 	}
 
-	if doForwardTrailers {
+	if ok && doForwardTrailers {
 		handleForwardResponseTrailer(w, mux, md)
 	}
 }
diff --git a/vendor/github.com/libp2p/go-reuseport/control_freebsd.go b/vendor/github.com/libp2p/go-reuseport/control_freebsd.go
new file mode 100644
index 0000000000000000000000000000000000000000..cec1b11aa81e5d9529f161b15aa49553edaa88c0
--- /dev/null
+++ b/vendor/github.com/libp2p/go-reuseport/control_freebsd.go
@@ -0,0 +1,27 @@
+//go:build freebsd
+
+package reuseport
+
+import (
+	"syscall"
+
+	"golang.org/x/sys/unix"
+)
+
+func Control(network, address string, c syscall.RawConn) (err error) {
+	controlErr := c.Control(func(fd uintptr) {
+		err = unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_REUSEADDR, 1)
+		if err != nil {
+			return
+		}
+		err = unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_REUSEPORT, 1)
+		if err != nil {
+			return
+		}
+		err = unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_REUSEPORT_LB, 1)
+	})
+	if controlErr != nil {
+		err = controlErr
+	}
+	return
+}
diff --git a/vendor/github.com/libp2p/go-reuseport/control_unix.go b/vendor/github.com/libp2p/go-reuseport/control_unix.go
index 4197d1f741e7a54bc1faeaad2626b99e269d1554..e80688b5efb9cea8156cbb3c0376672d31c5ddf3 100644
--- a/vendor/github.com/libp2p/go-reuseport/control_unix.go
+++ b/vendor/github.com/libp2p/go-reuseport/control_unix.go
@@ -1,4 +1,4 @@
-//go:build !plan9 && !windows && !wasm
+//go:build !plan9 && !windows && !wasm && !freebsd
 
 package reuseport
 
diff --git a/vendor/github.com/libp2p/go-reuseport/version.json b/vendor/github.com/libp2p/go-reuseport/version.json
index a654d65abc2b1c8b7658fe3f56a3dbb15831251d..372b6eab3ee0fab29ad774cfbbe4fa47ddbfc1cb 100644
--- a/vendor/github.com/libp2p/go-reuseport/version.json
+++ b/vendor/github.com/libp2p/go-reuseport/version.json
@@ -1,3 +1,3 @@
 {
-  "version": "v0.3.0"
+  "version": "v0.4.0"
 }
diff --git a/vendor/github.com/mailru/easyjson/jlexer/bytestostr.go b/vendor/github.com/mailru/easyjson/jlexer/bytestostr.go
index ff7b27c5b203749598ebc7843a6ae06d02b089d3..e68108f8687688c78c571bfcd02b146b9238ab8d 100644
--- a/vendor/github.com/mailru/easyjson/jlexer/bytestostr.go
+++ b/vendor/github.com/mailru/easyjson/jlexer/bytestostr.go
@@ -8,7 +8,6 @@
 package jlexer
 
 import (
-	"reflect"
 	"unsafe"
 )
 
@@ -18,7 +17,5 @@ import (
 // chunk may be either blocked from being freed by GC because of a single string or the buffer.Data
 // may be garbage-collected even when the string exists.
 func bytesToStr(data []byte) string {
-	h := (*reflect.SliceHeader)(unsafe.Pointer(&data))
-	shdr := reflect.StringHeader{Data: h.Data, Len: h.Len}
-	return *(*string)(unsafe.Pointer(&shdr))
+	return *(*string)(unsafe.Pointer(&data))
 }
diff --git a/vendor/github.com/mailru/easyjson/jlexer/lexer.go b/vendor/github.com/mailru/easyjson/jlexer/lexer.go
index b5f5e26132934ec5a4a4027531d7a0a958549080..a27705b12b54df96e36fd1bbe175e0838a7df418 100644
--- a/vendor/github.com/mailru/easyjson/jlexer/lexer.go
+++ b/vendor/github.com/mailru/easyjson/jlexer/lexer.go
@@ -19,21 +19,21 @@ import (
 	"github.com/josharian/intern"
 )
 
-// tokenKind determines type of a token.
-type tokenKind byte
+// TokenKind determines type of a token.
+type TokenKind byte
 
 const (
-	tokenUndef  tokenKind = iota // No token.
-	tokenDelim                   // Delimiter: one of '{', '}', '[' or ']'.
-	tokenString                  // A string literal, e.g. "abc\u1234"
-	tokenNumber                  // Number literal, e.g. 1.5e5
-	tokenBool                    // Boolean literal: true or false.
-	tokenNull                    // null keyword.
+	TokenUndef  TokenKind = iota // No token.
+	TokenDelim                   // Delimiter: one of '{', '}', '[' or ']'.
+	TokenString                  // A string literal, e.g. "abc\u1234"
+	TokenNumber                  // Number literal, e.g. 1.5e5
+	TokenBool                    // Boolean literal: true or false.
+	TokenNull                    // null keyword.
 )
 
 // token describes a single token: type, position in the input and value.
 type token struct {
-	kind tokenKind // Type of a token.
+	kind TokenKind // Type of a token.
 
 	boolValue       bool   // Value if a boolean literal token.
 	byteValueCloned bool   // true if byteValue was allocated and does not refer to original json body
@@ -47,7 +47,7 @@ type Lexer struct {
 
 	start int   // Start of the current token.
 	pos   int   // Current unscanned position in the input stream.
-	token token // Last scanned token, if token.kind != tokenUndef.
+	token token // Last scanned token, if token.kind != TokenUndef.
 
 	firstElement bool // Whether current element is the first in array or an object.
 	wantSep      byte // A comma or a colon character, which need to occur before a token.
@@ -59,7 +59,7 @@ type Lexer struct {
 
 // FetchToken scans the input for the next token.
 func (r *Lexer) FetchToken() {
-	r.token.kind = tokenUndef
+	r.token.kind = TokenUndef
 	r.start = r.pos
 
 	// Check if r.Data has r.pos element
@@ -90,7 +90,7 @@ func (r *Lexer) FetchToken() {
 				r.errSyntax()
 			}
 
-			r.token.kind = tokenString
+			r.token.kind = TokenString
 			r.fetchString()
 			return
 
@@ -99,7 +99,7 @@ func (r *Lexer) FetchToken() {
 				r.errSyntax()
 			}
 			r.firstElement = true
-			r.token.kind = tokenDelim
+			r.token.kind = TokenDelim
 			r.token.delimValue = r.Data[r.pos]
 			r.pos++
 			return
@@ -109,7 +109,7 @@ func (r *Lexer) FetchToken() {
 				r.errSyntax()
 			}
 			r.wantSep = 0
-			r.token.kind = tokenDelim
+			r.token.kind = TokenDelim
 			r.token.delimValue = r.Data[r.pos]
 			r.pos++
 			return
@@ -118,7 +118,7 @@ func (r *Lexer) FetchToken() {
 			if r.wantSep != 0 {
 				r.errSyntax()
 			}
-			r.token.kind = tokenNumber
+			r.token.kind = TokenNumber
 			r.fetchNumber()
 			return
 
@@ -127,7 +127,7 @@ func (r *Lexer) FetchToken() {
 				r.errSyntax()
 			}
 
-			r.token.kind = tokenNull
+			r.token.kind = TokenNull
 			r.fetchNull()
 			return
 
@@ -136,7 +136,7 @@ func (r *Lexer) FetchToken() {
 				r.errSyntax()
 			}
 
-			r.token.kind = tokenBool
+			r.token.kind = TokenBool
 			r.token.boolValue = true
 			r.fetchTrue()
 			return
@@ -146,7 +146,7 @@ func (r *Lexer) FetchToken() {
 				r.errSyntax()
 			}
 
-			r.token.kind = tokenBool
+			r.token.kind = TokenBool
 			r.token.boolValue = false
 			r.fetchFalse()
 			return
@@ -391,7 +391,7 @@ func (r *Lexer) fetchString() {
 
 // scanToken scans the next token if no token is currently available in the lexer.
 func (r *Lexer) scanToken() {
-	if r.token.kind != tokenUndef || r.fatalError != nil {
+	if r.token.kind != TokenUndef || r.fatalError != nil {
 		return
 	}
 
@@ -400,7 +400,7 @@ func (r *Lexer) scanToken() {
 
 // consume resets the current token to allow scanning the next one.
 func (r *Lexer) consume() {
-	r.token.kind = tokenUndef
+	r.token.kind = TokenUndef
 	r.token.byteValueCloned = false
 	r.token.delimValue = 0
 }
@@ -443,10 +443,10 @@ func (r *Lexer) errInvalidToken(expected string) {
 		switch expected {
 		case "[":
 			r.token.delimValue = ']'
-			r.token.kind = tokenDelim
+			r.token.kind = TokenDelim
 		case "{":
 			r.token.delimValue = '}'
-			r.token.kind = tokenDelim
+			r.token.kind = TokenDelim
 		}
 		r.addNonfatalError(&LexerError{
 			Reason: fmt.Sprintf("expected %s", expected),
@@ -475,7 +475,7 @@ func (r *Lexer) GetPos() int {
 
 // Delim consumes a token and verifies that it is the given delimiter.
 func (r *Lexer) Delim(c byte) {
-	if r.token.kind == tokenUndef && r.Ok() {
+	if r.token.kind == TokenUndef && r.Ok() {
 		r.FetchToken()
 	}
 
@@ -489,7 +489,7 @@ func (r *Lexer) Delim(c byte) {
 
 // IsDelim returns true if there was no scanning error and next token is the given delimiter.
 func (r *Lexer) IsDelim(c byte) bool {
-	if r.token.kind == tokenUndef && r.Ok() {
+	if r.token.kind == TokenUndef && r.Ok() {
 		r.FetchToken()
 	}
 	return !r.Ok() || r.token.delimValue == c
@@ -497,10 +497,10 @@ func (r *Lexer) IsDelim(c byte) bool {
 
 // Null verifies that the next token is null and consumes it.
 func (r *Lexer) Null() {
-	if r.token.kind == tokenUndef && r.Ok() {
+	if r.token.kind == TokenUndef && r.Ok() {
 		r.FetchToken()
 	}
-	if !r.Ok() || r.token.kind != tokenNull {
+	if !r.Ok() || r.token.kind != TokenNull {
 		r.errInvalidToken("null")
 	}
 	r.consume()
@@ -508,15 +508,15 @@ func (r *Lexer) Null() {
 
 // IsNull returns true if the next token is a null keyword.
 func (r *Lexer) IsNull() bool {
-	if r.token.kind == tokenUndef && r.Ok() {
+	if r.token.kind == TokenUndef && r.Ok() {
 		r.FetchToken()
 	}
-	return r.Ok() && r.token.kind == tokenNull
+	return r.Ok() && r.token.kind == TokenNull
 }
 
 // Skip skips a single token.
 func (r *Lexer) Skip() {
-	if r.token.kind == tokenUndef && r.Ok() {
+	if r.token.kind == TokenUndef && r.Ok() {
 		r.FetchToken()
 	}
 	r.consume()
@@ -621,10 +621,10 @@ func (r *Lexer) Consumed() {
 }
 
 func (r *Lexer) unsafeString(skipUnescape bool) (string, []byte) {
-	if r.token.kind == tokenUndef && r.Ok() {
+	if r.token.kind == TokenUndef && r.Ok() {
 		r.FetchToken()
 	}
-	if !r.Ok() || r.token.kind != tokenString {
+	if !r.Ok() || r.token.kind != TokenString {
 		r.errInvalidToken("string")
 		return "", nil
 	}
@@ -664,10 +664,10 @@ func (r *Lexer) UnsafeFieldName(skipUnescape bool) string {
 
 // String reads a string literal.
 func (r *Lexer) String() string {
-	if r.token.kind == tokenUndef && r.Ok() {
+	if r.token.kind == TokenUndef && r.Ok() {
 		r.FetchToken()
 	}
-	if !r.Ok() || r.token.kind != tokenString {
+	if !r.Ok() || r.token.kind != TokenString {
 		r.errInvalidToken("string")
 		return ""
 	}
@@ -687,10 +687,10 @@ func (r *Lexer) String() string {
 
 // StringIntern reads a string literal, and performs string interning on it.
 func (r *Lexer) StringIntern() string {
-	if r.token.kind == tokenUndef && r.Ok() {
+	if r.token.kind == TokenUndef && r.Ok() {
 		r.FetchToken()
 	}
-	if !r.Ok() || r.token.kind != tokenString {
+	if !r.Ok() || r.token.kind != TokenString {
 		r.errInvalidToken("string")
 		return ""
 	}
@@ -705,10 +705,10 @@ func (r *Lexer) StringIntern() string {
 
 // Bytes reads a string literal and base64 decodes it into a byte slice.
 func (r *Lexer) Bytes() []byte {
-	if r.token.kind == tokenUndef && r.Ok() {
+	if r.token.kind == TokenUndef && r.Ok() {
 		r.FetchToken()
 	}
-	if !r.Ok() || r.token.kind != tokenString {
+	if !r.Ok() || r.token.kind != TokenString {
 		r.errInvalidToken("string")
 		return nil
 	}
@@ -731,10 +731,10 @@ func (r *Lexer) Bytes() []byte {
 
 // Bool reads a true or false boolean keyword.
 func (r *Lexer) Bool() bool {
-	if r.token.kind == tokenUndef && r.Ok() {
+	if r.token.kind == TokenUndef && r.Ok() {
 		r.FetchToken()
 	}
-	if !r.Ok() || r.token.kind != tokenBool {
+	if !r.Ok() || r.token.kind != TokenBool {
 		r.errInvalidToken("bool")
 		return false
 	}
@@ -744,10 +744,10 @@ func (r *Lexer) Bool() bool {
 }
 
 func (r *Lexer) number() string {
-	if r.token.kind == tokenUndef && r.Ok() {
+	if r.token.kind == TokenUndef && r.Ok() {
 		r.FetchToken()
 	}
-	if !r.Ok() || r.token.kind != tokenNumber {
+	if !r.Ok() || r.token.kind != TokenNumber {
 		r.errInvalidToken("number")
 		return ""
 	}
@@ -1151,7 +1151,7 @@ func (r *Lexer) GetNonFatalErrors() []*LexerError {
 // JsonNumber fetches and json.Number from 'encoding/json' package.
 // Both int, float or string, contains them are valid values
 func (r *Lexer) JsonNumber() json.Number {
-	if r.token.kind == tokenUndef && r.Ok() {
+	if r.token.kind == TokenUndef && r.Ok() {
 		r.FetchToken()
 	}
 	if !r.Ok() {
@@ -1160,11 +1160,11 @@ func (r *Lexer) JsonNumber() json.Number {
 	}
 
 	switch r.token.kind {
-	case tokenString:
+	case TokenString:
 		return json.Number(r.String())
-	case tokenNumber:
+	case TokenNumber:
 		return json.Number(r.Raw())
-	case tokenNull:
+	case TokenNull:
 		r.Null()
 		return json.Number("")
 	default:
@@ -1175,7 +1175,7 @@ func (r *Lexer) JsonNumber() json.Number {
 
 // Interface fetches an interface{} analogous to the 'encoding/json' package.
 func (r *Lexer) Interface() interface{} {
-	if r.token.kind == tokenUndef && r.Ok() {
+	if r.token.kind == TokenUndef && r.Ok() {
 		r.FetchToken()
 	}
 
@@ -1183,13 +1183,13 @@ func (r *Lexer) Interface() interface{} {
 		return nil
 	}
 	switch r.token.kind {
-	case tokenString:
+	case TokenString:
 		return r.String()
-	case tokenNumber:
+	case TokenNumber:
 		return r.Float64()
-	case tokenBool:
+	case TokenBool:
 		return r.Bool()
-	case tokenNull:
+	case TokenNull:
 		r.Null()
 		return nil
 	}
@@ -1242,3 +1242,16 @@ func (r *Lexer) WantColon() {
 	r.wantSep = ':'
 	r.firstElement = false
 }
+
+// CurrentToken returns current token kind if there were no errors and TokenUndef otherwise
+func (r *Lexer) CurrentToken() TokenKind {
+	if r.token.kind == TokenUndef && r.Ok() {
+		r.FetchToken()
+	}
+
+	if !r.Ok() {
+		return TokenUndef
+	}
+
+	return r.token.kind
+}
diff --git a/vendor/github.com/mailru/easyjson/jwriter/writer.go b/vendor/github.com/mailru/easyjson/jwriter/writer.go
index 2c5b20105bb90926e780775bb341c9dff1cc2689..34b0ade46852dc65635b60fdf32c88a24e74d2a4 100644
--- a/vendor/github.com/mailru/easyjson/jwriter/writer.go
+++ b/vendor/github.com/mailru/easyjson/jwriter/writer.go
@@ -67,6 +67,18 @@ func (w *Writer) RawString(s string) {
 	w.Buffer.AppendString(s)
 }
 
+// RawBytesString appends string from bytes to the buffer.
+func (w *Writer) RawBytesString(data []byte, err error) {
+	switch {
+	case w.Error != nil:
+		return
+	case err != nil:
+		w.Error = err
+	default:
+		w.String(string(data))
+	}
+}
+
 // Raw appends raw binary data to the buffer or sets the error if it is given. Useful for
 // calling with results of MarshalJSON-like functions.
 func (w *Writer) Raw(data []byte, err error) {
diff --git a/vendor/github.com/minio/minio-go/v7/api-append-object.go b/vendor/github.com/minio/minio-go/v7/api-append-object.go
new file mode 100644
index 0000000000000000000000000000000000000000..fca08c3733e128b34243ddac0995e3ee7d0bf5c0
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-append-object.go
@@ -0,0 +1,226 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2025 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+	"bytes"
+	"context"
+	"errors"
+	"fmt"
+	"io"
+	"net/http"
+	"strconv"
+
+	"github.com/minio/minio-go/v7/pkg/s3utils"
+)
+
+// AppendObjectOptions https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-objects-append.html
+type AppendObjectOptions struct {
+	// Provide a progress reader to indicate the current append() progress.
+	Progress io.Reader
+	// ChunkSize indicates the maximum append() size,
+	// it is useful when you want to control how much data
+	// per append() you are interested in sending to server
+	// while keeping the input io.Reader of a longer length.
+	ChunkSize uint64
+	// Aggressively disable sha256 payload, it is automatically
+	// turned-off for TLS supporting endpoints, useful in benchmarks
+	// where you are interested in the peak() numbers.
+	DisableContentSha256 bool
+
+	customHeaders http.Header
+	checksumType  ChecksumType
+}
+
+// Header returns the custom header for AppendObject API
+func (opts AppendObjectOptions) Header() (header http.Header) {
+	header = make(http.Header)
+	for k, v := range opts.customHeaders {
+		header[k] = v
+	}
+	return header
+}
+
+func (opts *AppendObjectOptions) setWriteOffset(offset int64) {
+	if len(opts.customHeaders) == 0 {
+		opts.customHeaders = make(http.Header)
+	}
+	opts.customHeaders["x-amz-write-offset-bytes"] = []string{strconv.FormatInt(offset, 10)}
+}
+
+func (opts *AppendObjectOptions) setChecksumParams(info ObjectInfo) {
+	if len(opts.customHeaders) == 0 {
+		opts.customHeaders = make(http.Header)
+	}
+	fullObject := info.ChecksumMode == ChecksumFullObjectMode.String()
+	switch {
+	case info.ChecksumCRC32 != "":
+		if fullObject {
+			opts.checksumType = ChecksumFullObjectCRC32
+		}
+	case info.ChecksumCRC32C != "":
+		if fullObject {
+			opts.checksumType = ChecksumFullObjectCRC32C
+		}
+	case info.ChecksumCRC64NVME != "":
+		// CRC64NVME only has a full object variant
+		// so it does not carry any special full object
+		// modifier
+		opts.checksumType = ChecksumCRC64NVME
+	}
+}
+
+func (opts AppendObjectOptions) validate(c *Client) (err error) {
+	if opts.ChunkSize > maxPartSize {
+		return errInvalidArgument("Append chunkSize cannot be larger than max part size allowed")
+	}
+	switch {
+	case !c.trailingHeaderSupport:
+		return errInvalidArgument("AppendObject() requires Client with TrailingHeaders enabled")
+	case c.overrideSignerType.IsV2():
+		return errInvalidArgument("AppendObject() cannot be used with v2 signatures")
+	case s3utils.IsGoogleEndpoint(*c.endpointURL):
+		return errInvalidArgument("AppendObject() cannot be used with GCS endpoints")
+	}
+
+	return nil
+}
+
+// appendObjectDo - executes the append object http operation.
+// NOTE: You must have WRITE permissions on a bucket to add an object to it.
+func (c *Client) appendObjectDo(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64, opts AppendObjectOptions) (UploadInfo, error) {
+	// Input validation.
+	if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+		return UploadInfo{}, err
+	}
+	if err := s3utils.CheckValidObjectName(objectName); err != nil {
+		return UploadInfo{}, err
+	}
+
+	// Set headers.
+	customHeader := opts.Header()
+
+	// Populate request metadata.
+	reqMetadata := requestMetadata{
+		bucketName:    bucketName,
+		objectName:    objectName,
+		customHeader:  customHeader,
+		contentBody:   reader,
+		contentLength: size,
+		streamSha256:  !opts.DisableContentSha256,
+	}
+
+	if opts.checksumType.IsSet() {
+		reqMetadata.addCrc = &opts.checksumType
+	}
+
+	// Execute PUT an objectName.
+	resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata)
+	defer closeResponse(resp)
+	if err != nil {
+		return UploadInfo{}, err
+	}
+	if resp != nil {
+		if resp.StatusCode != http.StatusOK {
+			return UploadInfo{}, httpRespToErrorResponse(resp, bucketName, objectName)
+		}
+	}
+
+	h := resp.Header
+
+	// When AppendObject() is used, S3 Express will return final object size as x-amz-object-size
+	if amzSize := h.Get("x-amz-object-size"); amzSize != "" {
+		size, err = strconv.ParseInt(amzSize, 10, 64)
+		if err != nil {
+			return UploadInfo{}, err
+		}
+	}
+
+	return UploadInfo{
+		Bucket: bucketName,
+		Key:    objectName,
+		ETag:   trimEtag(h.Get("ETag")),
+		Size:   size,
+
+		// Checksum values
+		ChecksumCRC32:     h.Get(ChecksumCRC32.Key()),
+		ChecksumCRC32C:    h.Get(ChecksumCRC32C.Key()),
+		ChecksumSHA1:      h.Get(ChecksumSHA1.Key()),
+		ChecksumSHA256:    h.Get(ChecksumSHA256.Key()),
+		ChecksumCRC64NVME: h.Get(ChecksumCRC64NVME.Key()),
+		ChecksumMode:      h.Get(ChecksumFullObjectMode.Key()),
+	}, nil
+}
+
+// AppendObject - S3 Express Zone https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-objects-append.html
+func (c *Client) AppendObject(ctx context.Context, bucketName, objectName string, reader io.Reader, objectSize int64,
+	opts AppendObjectOptions,
+) (info UploadInfo, err error) {
+	if objectSize < 0 && opts.ChunkSize == 0 {
+		return UploadInfo{}, errors.New("object size must be provided when no chunk size is provided")
+	}
+
+	if err = opts.validate(c); err != nil {
+		return UploadInfo{}, err
+	}
+
+	oinfo, err := c.StatObject(ctx, bucketName, objectName, StatObjectOptions{Checksum: true})
+	if err != nil {
+		return UploadInfo{}, err
+	}
+	if oinfo.ChecksumMode != ChecksumFullObjectMode.String() {
+		return UploadInfo{}, fmt.Errorf("append API is not allowed on objects that are not full_object checksum type: %s", oinfo.ChecksumMode)
+	}
+	opts.setChecksumParams(oinfo)   // set the appropriate checksum params based on the existing object checksum metadata.
+	opts.setWriteOffset(oinfo.Size) // First append must set the current object size as the offset.
+
+	if opts.ChunkSize > 0 {
+		finalObjSize := int64(-1)
+		if objectSize > 0 {
+			finalObjSize = info.Size + objectSize
+		}
+		totalPartsCount, partSize, lastPartSize, err := OptimalPartInfo(finalObjSize, opts.ChunkSize)
+		if err != nil {
+			return UploadInfo{}, err
+		}
+		buf := make([]byte, partSize)
+		var partNumber int
+		for partNumber = 1; partNumber <= totalPartsCount; partNumber++ {
+			// Proceed to upload the part.
+			if partNumber == totalPartsCount {
+				partSize = lastPartSize
+			}
+			n, err := readFull(reader, buf)
+			if err != nil {
+				return info, err
+			}
+			if n != int(partSize) {
+				return info, io.ErrUnexpectedEOF
+			}
+			rd := newHook(bytes.NewReader(buf[:n]), opts.Progress)
+			uinfo, err := c.appendObjectDo(ctx, bucketName, objectName, rd, partSize, opts)
+			if err != nil {
+				return info, err
+			}
+			opts.setWriteOffset(uinfo.Size)
+		}
+	}
+
+	rd := newHook(reader, opts.Progress)
+	return c.appendObjectDo(ctx, bucketName, objectName, rd, objectSize, opts)
+}
diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-notification.go b/vendor/github.com/minio/minio-go/v7/api-bucket-notification.go
index 33811b98fe87d39ccb2ca981dbfeaa174110bd4d..b1e5b0aae66af110b667e192d1a1c3cdac850347 100644
--- a/vendor/github.com/minio/minio-go/v7/api-bucket-notification.go
+++ b/vendor/github.com/minio/minio-go/v7/api-bucket-notification.go
@@ -157,13 +157,6 @@ func (c *Client) ListenBucketNotification(ctx context.Context, bucketName, prefi
 			return
 		}
 
-		// Continuously run and listen on bucket notification.
-		// Create a done channel to control 'ListObjects' go routine.
-		retryDoneCh := make(chan struct{}, 1)
-
-		// Indicate to our routine to exit cleanly upon return.
-		defer close(retryDoneCh)
-
 		// Prepare urlValues to pass into the request on every loop
 		urlValues := make(url.Values)
 		urlValues.Set("ping", "10")
@@ -172,7 +165,7 @@ func (c *Client) ListenBucketNotification(ctx context.Context, bucketName, prefi
 		urlValues["events"] = events
 
 		// Wait on the jitter retry loop.
-		for range c.newRetryTimerContinous(time.Second, time.Second*30, MaxJitter, retryDoneCh) {
+		for range c.newRetryTimerContinous(time.Second, time.Second*30, MaxJitter) {
 			// Execute GET on bucket to list objects.
 			resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
 				bucketName:       bucketName,
diff --git a/vendor/github.com/minio/minio-go/v7/api-datatypes.go b/vendor/github.com/minio/minio-go/v7/api-datatypes.go
index 2118c7c77fbe6734eb265c325c7c0d901bccfcad..39ff9d27c16a9f523b2a5ed055e751b970b1e691 100644
--- a/vendor/github.com/minio/minio-go/v7/api-datatypes.go
+++ b/vendor/github.com/minio/minio-go/v7/api-datatypes.go
@@ -148,6 +148,7 @@ type UploadInfo struct {
 	ChecksumSHA1      string
 	ChecksumSHA256    string
 	ChecksumCRC64NVME string
+	ChecksumMode      string
 }
 
 // RestoreInfo contains information of the restore operation of an archived object
@@ -223,6 +224,7 @@ type ObjectInfo struct {
 	ChecksumSHA1      string
 	ChecksumSHA256    string
 	ChecksumCRC64NVME string
+	ChecksumMode      string
 
 	Internal *struct {
 		K int // Data blocks
diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go b/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go
index 03bd34f76bac04edecfc05957f1729bc8a943d81..84bc19b28f9913f7f66ee7e095df543b1b74f3e7 100644
--- a/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go
+++ b/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go
@@ -457,5 +457,6 @@ func (c *Client) completeMultipartUpload(ctx context.Context, bucketName, object
 		ChecksumCRC32:     completeMultipartUploadResult.ChecksumCRC32,
 		ChecksumCRC32C:    completeMultipartUploadResult.ChecksumCRC32C,
 		ChecksumCRC64NVME: completeMultipartUploadResult.ChecksumCRC64NVME,
+		ChecksumMode:      completeMultipartUploadResult.ChecksumType,
 	}, nil
 }
diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go b/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go
index 82c0ae9e4bc1478c4ace7cd2a3ccf47c6f964f8d..987a3c6928020190dfd061f235c3013752185ca0 100644
--- a/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go
+++ b/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go
@@ -805,5 +805,6 @@ func (c *Client) putObjectDo(ctx context.Context, bucketName, objectName string,
 		ChecksumSHA1:      h.Get(ChecksumSHA1.Key()),
 		ChecksumSHA256:    h.Get(ChecksumSHA256.Key()),
 		ChecksumCRC64NVME: h.Get(ChecksumCRC64NVME.Key()),
+		ChecksumMode:      h.Get(ChecksumFullObjectMode.Key()),
 	}, nil
 }
diff --git a/vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go b/vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go
index 08a5a7b6ede5c1c9d5717276acf1bebc1535d016..3204263dc725804b6d6753e041c7ece74f7b41f5 100644
--- a/vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go
+++ b/vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go
@@ -366,6 +366,7 @@ type completeMultipartUploadResult struct {
 	ChecksumSHA1      string
 	ChecksumSHA256    string
 	ChecksumCRC64NVME string
+	ChecksumType      string
 }
 
 // CompletePart sub container lists individual part numbers and their
diff --git a/vendor/github.com/minio/minio-go/v7/api.go b/vendor/github.com/minio/minio-go/v7/api.go
index b64f576159c44eb5672dde2d5b3d1901d366f400..1e457d807d50af719d8a6d68f4a8423371988225 100644
--- a/vendor/github.com/minio/minio-go/v7/api.go
+++ b/vendor/github.com/minio/minio-go/v7/api.go
@@ -155,7 +155,7 @@ type Options struct {
 // Global constants.
 const (
 	libraryName    = "minio-go"
-	libraryVersion = "v7.0.89"
+	libraryVersion = "v7.0.90"
 )
 
 // User Agent should always following the below style.
@@ -660,13 +660,7 @@ func (c *Client) executeMethod(ctx context.Context, method string, metadata requ
 		metadata.trailer.Set(metadata.addCrc.Key(), base64.StdEncoding.EncodeToString(crc.Sum(nil)))
 	}
 
-	// Create cancel context to control 'newRetryTimer' go routine.
-	retryCtx, cancel := context.WithCancel(ctx)
-
-	// Indicate to our routine to exit cleanly upon return.
-	defer cancel()
-
-	for range c.newRetryTimer(retryCtx, reqRetry, DefaultRetryUnit, DefaultRetryCap, MaxJitter) {
+	for range c.newRetryTimer(ctx, reqRetry, DefaultRetryUnit, DefaultRetryCap, MaxJitter) {
 		// Retry executes the following function body if request has an
 		// error until maxRetries have been exhausted, retry attempts are
 		// performed after waiting for a given period of time in a
@@ -779,7 +773,7 @@ func (c *Client) executeMethod(ctx context.Context, method string, metadata requ
 	}
 
 	// Return an error when retry is canceled or deadlined
-	if e := retryCtx.Err(); e != nil {
+	if e := ctx.Err(); e != nil {
 		return nil, e
 	}
 
@@ -909,6 +903,11 @@ func (c *Client) newRequest(ctx context.Context, method string, metadata request
 
 	// For anonymous requests just return.
 	if signerType.IsAnonymous() {
+		if len(metadata.trailer) > 0 {
+			req.Header.Set("X-Amz-Content-Sha256", unsignedPayloadTrailer)
+			return signer.UnsignedTrailer(*req, metadata.trailer), nil
+		}
+
 		return req, nil
 	}
 
@@ -1066,3 +1065,11 @@ func (c *Client) CredContext() *credentials.CredContext {
 		Endpoint: c.endpointURL.String(),
 	}
 }
+
+// GetCreds returns the access creds for the client
+func (c *Client) GetCreds() (credentials.Value, error) {
+	if c.credsProvider == nil {
+		return credentials.Value{}, errors.New("no credentials provider")
+	}
+	return c.credsProvider.GetWithContext(c.CredContext())
+}
diff --git a/vendor/github.com/minio/minio-go/v7/checksum.go b/vendor/github.com/minio/minio-go/v7/checksum.go
index c7456cda2e6b14c0bae525e07eed9be22a0f614c..5c24bf64a59305d944899bee92f4d272308fcd9a 100644
--- a/vendor/github.com/minio/minio-go/v7/checksum.go
+++ b/vendor/github.com/minio/minio-go/v7/checksum.go
@@ -34,6 +34,43 @@ import (
 	"github.com/minio/crc64nvme"
 )
 
+// ChecksumMode contains information about the checksum mode on the object
+type ChecksumMode uint32
+
+const (
+	// ChecksumFullObjectMode Full object checksum `csumCombine(csum1, csum2...)...), csumN...)`
+	ChecksumFullObjectMode ChecksumMode = 1 << iota
+
+	// ChecksumCompositeMode Composite checksum `csum([csum1 + csum2 ... + csumN])`
+	ChecksumCompositeMode
+
+	// Keep after all valid checksums
+	checksumLastMode
+
+	// checksumModeMask is a mask for valid checksum mode types.
+	checksumModeMask = checksumLastMode - 1
+)
+
+// Is returns if c is all of t.
+func (c ChecksumMode) Is(t ChecksumMode) bool {
+	return c&t == t
+}
+
+// Key returns the header key.
+func (c ChecksumMode) Key() string {
+	return amzChecksumMode
+}
+
+func (c ChecksumMode) String() string {
+	switch c & checksumModeMask {
+	case ChecksumFullObjectMode:
+		return "FULL_OBJECT"
+	case ChecksumCompositeMode:
+		return "COMPOSITE"
+	}
+	return ""
+}
+
 // ChecksumType contains information about the checksum type.
 type ChecksumType uint32
 
@@ -75,6 +112,7 @@ const (
 	amzChecksumSHA1      = "x-amz-checksum-sha1"
 	amzChecksumSHA256    = "x-amz-checksum-sha256"
 	amzChecksumCRC64NVME = "x-amz-checksum-crc64nvme"
+	amzChecksumMode      = "x-amz-checksum-type"
 )
 
 // Base returns the base type, without modifiers.
@@ -397,7 +435,7 @@ func addAutoChecksumHeaders(opts *PutObjectOptions) {
 	}
 	opts.UserMetadata["X-Amz-Checksum-Algorithm"] = opts.AutoChecksum.String()
 	if opts.AutoChecksum.FullObjectRequested() {
-		opts.UserMetadata["X-Amz-Checksum-Type"] = "FULL_OBJECT"
+		opts.UserMetadata[amzChecksumMode] = ChecksumFullObjectMode.String()
 	}
 }
 
@@ -414,7 +452,10 @@ func applyAutoChecksum(opts *PutObjectOptions, allParts []ObjectPart) {
 	} else if opts.AutoChecksum.CanMergeCRC() {
 		crc, err := opts.AutoChecksum.FullObjectChecksum(allParts)
 		if err == nil {
-			opts.UserMetadata = map[string]string{opts.AutoChecksum.KeyCapitalized(): crc.Encoded(), "X-Amz-Checksum-Type": "FULL_OBJECT"}
+			opts.UserMetadata = map[string]string{
+				opts.AutoChecksum.KeyCapitalized(): crc.Encoded(),
+				amzChecksumMode:                    ChecksumFullObjectMode.String(),
+			}
 		}
 	}
 }
diff --git a/vendor/github.com/minio/minio-go/v7/hook-reader.go b/vendor/github.com/minio/minio-go/v7/hook-reader.go
index 07bc7dbcfc8f69c0dac98feb2fd9685b8e90bc42..61268a1045d92f6587f9903707a4c56f1ff9dbb8 100644
--- a/vendor/github.com/minio/minio-go/v7/hook-reader.go
+++ b/vendor/github.com/minio/minio-go/v7/hook-reader.go
@@ -20,7 +20,6 @@ package minio
 import (
 	"fmt"
 	"io"
-	"sync"
 )
 
 // hookReader hooks additional reader in the source stream. It is
@@ -28,7 +27,6 @@ import (
 // notified about the exact number of bytes read from the primary
 // source on each Read operation.
 type hookReader struct {
-	mu     sync.RWMutex
 	source io.Reader
 	hook   io.Reader
 }
@@ -36,9 +34,6 @@ type hookReader struct {
 // Seek implements io.Seeker. Seeks source first, and if necessary
 // seeks hook if Seek method is appropriately found.
 func (hr *hookReader) Seek(offset int64, whence int) (n int64, err error) {
-	hr.mu.Lock()
-	defer hr.mu.Unlock()
-
 	// Verify for source has embedded Seeker, use it.
 	sourceSeeker, ok := hr.source.(io.Seeker)
 	if ok {
@@ -70,9 +65,6 @@ func (hr *hookReader) Seek(offset int64, whence int) (n int64, err error) {
 // value 'n' number of bytes are reported through the hook. Returns
 // error for all non io.EOF conditions.
 func (hr *hookReader) Read(b []byte) (n int, err error) {
-	hr.mu.RLock()
-	defer hr.mu.RUnlock()
-
 	n, err = hr.source.Read(b)
 	if err != nil && err != io.EOF {
 		return n, err
@@ -92,7 +84,7 @@ func (hr *hookReader) Read(b []byte) (n int, err error) {
 // reports the data read from the source to the hook.
 func newHook(source, hook io.Reader) io.Reader {
 	if hook == nil {
-		return &hookReader{source: source}
+		return source
 	}
 	return &hookReader{
 		source: source,
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v4.go b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v4.go
index 09ece53a0c9380e27c48167a76226ee82d4983d5..f6d459edcc2f9035477dc939547d080295d2be05 100644
--- a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v4.go
+++ b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v4.go
@@ -338,6 +338,29 @@ func signV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, locati
 	return &req
 }
 
+// UnsignedTrailer will do chunked encoding with a custom trailer.
+func UnsignedTrailer(req http.Request, trailer http.Header) *http.Request {
+	if len(trailer) == 0 {
+		return &req
+	}
+	// Initial time.
+	t := time.Now().UTC()
+
+	// Set x-amz-date.
+	req.Header.Set("X-Amz-Date", t.Format(iso8601DateFormat))
+
+	for k := range trailer {
+		req.Header.Add("X-Amz-Trailer", strings.ToLower(k))
+	}
+
+	req.Header.Set("Content-Encoding", "aws-chunked")
+	req.Header.Set("x-amz-decoded-content-length", strconv.FormatInt(req.ContentLength, 10))
+
+	// Use custom chunked encoding.
+	req.Trailer = trailer
+	return StreamingUnsignedV4(&req, "", req.ContentLength, t)
+}
+
 // SignV4 sign the request before Do(), in accordance with
 // http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html.
 func SignV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, location string) *http.Request {
diff --git a/vendor/github.com/minio/minio-go/v7/retry-continous.go b/vendor/github.com/minio/minio-go/v7/retry-continous.go
index 81fcf16f1b912cc9ad1923aa3da136801ba4fc34..21e9fd455e58569c920d3e5ca5042fcb89712502 100644
--- a/vendor/github.com/minio/minio-go/v7/retry-continous.go
+++ b/vendor/github.com/minio/minio-go/v7/retry-continous.go
@@ -17,12 +17,14 @@
 
 package minio
 
-import "time"
+import (
+	"iter"
+	"math"
+	"time"
+)
 
 // newRetryTimerContinous creates a timer with exponentially increasing delays forever.
-func (c *Client) newRetryTimerContinous(baseSleep, maxSleep time.Duration, jitter float64, doneCh chan struct{}) <-chan int {
-	attemptCh := make(chan int)
-
+func (c *Client) newRetryTimerContinous(baseSleep, maxSleep time.Duration, jitter float64) iter.Seq[int] {
 	// normalize jitter to the range [0, 1.0]
 	if jitter < NoJitter {
 		jitter = NoJitter
@@ -44,26 +46,20 @@ func (c *Client) newRetryTimerContinous(baseSleep, maxSleep time.Duration, jitte
 		if sleep > maxSleep {
 			sleep = maxSleep
 		}
-		if jitter != NoJitter {
+		if math.Abs(jitter-NoJitter) > 1e-9 {
 			sleep -= time.Duration(c.random.Float64() * float64(sleep) * jitter)
 		}
 		return sleep
 	}
 
-	go func() {
-		defer close(attemptCh)
+	return func(yield func(int) bool) {
 		var nextBackoff int
 		for {
-			select {
-			// Attempts starts.
-			case attemptCh <- nextBackoff:
-				nextBackoff++
-			case <-doneCh:
-				// Stop the routine.
+			if !yield(nextBackoff) {
 				return
 			}
+			nextBackoff++
 			time.Sleep(exponentialBackoffWait(nextBackoff))
 		}
-	}()
-	return attemptCh
+	}
 }
diff --git a/vendor/github.com/minio/minio-go/v7/retry.go b/vendor/github.com/minio/minio-go/v7/retry.go
index ed954017ca29c1cc38668959a37ee17659c59742..b83d1b2e5d0a6c1cf6cb0d6282b5beb1ab91bcf7 100644
--- a/vendor/github.com/minio/minio-go/v7/retry.go
+++ b/vendor/github.com/minio/minio-go/v7/retry.go
@@ -21,6 +21,8 @@ import (
 	"context"
 	"crypto/x509"
 	"errors"
+	"iter"
+	"math"
 	"net/http"
 	"net/url"
 	"time"
@@ -45,9 +47,7 @@ var DefaultRetryCap = time.Second
 
 // newRetryTimer creates a timer with exponentially increasing
 // delays until the maximum retry attempts are reached.
-func (c *Client) newRetryTimer(ctx context.Context, maxRetry int, baseSleep, maxSleep time.Duration, jitter float64) <-chan int {
-	attemptCh := make(chan int)
-
+func (c *Client) newRetryTimer(ctx context.Context, maxRetry int, baseSleep, maxSleep time.Duration, jitter float64) iter.Seq[int] {
 	// computes the exponential backoff duration according to
 	// https://www.awsarchitectureblog.com/2015/03/backoff.html
 	exponentialBackoffWait := func(attempt int) time.Duration {
@@ -64,18 +64,22 @@ func (c *Client) newRetryTimer(ctx context.Context, maxRetry int, baseSleep, max
 		if sleep > maxSleep {
 			sleep = maxSleep
 		}
-		if jitter != NoJitter {
+		if math.Abs(jitter-NoJitter) > 1e-9 {
 			sleep -= time.Duration(c.random.Float64() * float64(sleep) * jitter)
 		}
 		return sleep
 	}
 
-	go func() {
-		defer close(attemptCh)
-		for i := 0; i < maxRetry; i++ {
-			select {
-			case attemptCh <- i + 1:
-			case <-ctx.Done():
+	return func(yield func(int) bool) {
+		// if context is already canceled, skip yield
+		select {
+		case <-ctx.Done():
+			return
+		default:
+		}
+
+		for i := range maxRetry {
+			if !yield(i) {
 				return
 			}
 
@@ -85,8 +89,7 @@ func (c *Client) newRetryTimer(ctx context.Context, maxRetry int, baseSleep, max
 				return
 			}
 		}
-	}()
-	return attemptCh
+	}
 }
 
 // List of AWS S3 error codes which are retryable.
diff --git a/vendor/github.com/minio/minio-go/v7/utils.go b/vendor/github.com/minio/minio-go/v7/utils.go
index 027bb6ce380c9624cd3fc7f062f683ccf85b366c..6024bfa5b21ca452af2eb888887dbeb60fc59255 100644
--- a/vendor/github.com/minio/minio-go/v7/utils.go
+++ b/vendor/github.com/minio/minio-go/v7/utils.go
@@ -390,6 +390,7 @@ func ToObjectInfo(bucketName, objectName string, h http.Header) (ObjectInfo, err
 		ChecksumSHA1:      h.Get(ChecksumSHA1.Key()),
 		ChecksumSHA256:    h.Get(ChecksumSHA256.Key()),
 		ChecksumCRC64NVME: h.Get(ChecksumCRC64NVME.Key()),
+		ChecksumMode:      h.Get(ChecksumFullObjectMode.Key()),
 	}, nil
 }
 
diff --git a/vendor/github.com/netobserv/loki-client-go/pkg/logproto/extensions.go b/vendor/github.com/netobserv/loki-client-go/pkg/logproto/extensions.go
index 3a68d2614893fcd84ddca5513e00a312742f240b..3adbe47fa43e72a1bac4de4a6558931fe07d2ca9 100644
--- a/vendor/github.com/netobserv/loki-client-go/pkg/logproto/extensions.go
+++ b/vendor/github.com/netobserv/loki-client-go/pkg/logproto/extensions.go
@@ -1,6 +1,6 @@
 package logproto
 
-import "github.com/prometheus/prometheus/pkg/labels"
+import "github.com/prometheus/prometheus/model/labels"
 
 // Note, this is not very efficient and use should be minimized as it requires label construction on each comparison
 type SeriesIdentifiers []SeriesIdentifier
diff --git a/vendor/github.com/pion/logging/.gitignore b/vendor/github.com/pion/logging/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..6e2f206a9f6c85b9fd6f5daaa574429299af112e
--- /dev/null
+++ b/vendor/github.com/pion/logging/.gitignore
@@ -0,0 +1,28 @@
+# SPDX-FileCopyrightText: 2023 The Pion community <https://pion.ly>
+# SPDX-License-Identifier: MIT
+
+### JetBrains IDE ###
+#####################
+.idea/
+
+### Emacs Temporary Files ###
+#############################
+*~
+
+### Folders ###
+###############
+bin/
+vendor/
+node_modules/
+
+### Files ###
+#############
+*.ivf
+*.ogg
+tags
+cover.out
+*.sw[poe]
+*.wasm
+examples/sfu-ws/cert.pem
+examples/sfu-ws/key.pem
+wasm_exec.js
diff --git a/vendor/github.com/pion/logging/.golangci.yml b/vendor/github.com/pion/logging/.golangci.yml
index ffb0058e6c32ceb2fcb4a119aca8b4e05e03e272..50211be0ca81a371d104e2aa8f320d0e5fa51b1b 100644
--- a/vendor/github.com/pion/logging/.golangci.yml
+++ b/vendor/github.com/pion/logging/.golangci.yml
@@ -1,13 +1,138 @@
+# SPDX-FileCopyrightText: 2023 The Pion community <https://pion.ly>
+# SPDX-License-Identifier: MIT
+
+run:
+  timeout: 5m
+
 linters-settings:
   govet:
-    check-shadowing: true
+    enable:
+      - shadow
   misspell:
     locale: US
+  exhaustive:
+    default-signifies-exhaustive: true
+  gomodguard:
+    blocked:
+      modules:
+        - github.com/pkg/errors:
+            recommendations:
+              - errors
+  forbidigo:
+    forbid:
+      - ^fmt.Print(f|ln)?$
+      - ^log.(Panic|Fatal|Print)(f|ln)?$
+      - ^os.Exit$
+      - ^panic$
+      - ^print(ln)?$
+  varnamelen:
+    max-distance: 12
+    min-name-length: 2
+    ignore-type-assert-ok: true
+    ignore-map-index-ok: true
+    ignore-chan-recv-ok: true
+    ignore-decls:
+      - i int
+      - n int
+      - w io.Writer
+      - r io.Reader
+      - b []byte
 
 linters:
-  enable-all: true
+  enable:
+    - asciicheck       # Simple linter to check that your code does not contain non-ASCII identifiers
+    - bidichk          # Checks for dangerous unicode character sequences
+    - bodyclose        # checks whether HTTP response body is closed successfully
+    - containedctx     # containedctx is a linter that detects struct contained context.Context field
+    - contextcheck     # check the function whether use a non-inherited context
+    - cyclop           # checks function and package cyclomatic complexity
+    - decorder         # check declaration order and count of types, constants, variables and functions
+    - dogsled          # Checks assignments with too many blank identifiers (e.g. x, _, _, _, := f())
+    - dupl             # Tool for code clone detection
+    - durationcheck    # check for two durations multiplied together
+    - err113           # Golang linter to check the errors handling expressions
+    - errcheck         # Errcheck is a program for checking for unchecked errors in go programs. These unchecked errors can be critical bugs in some cases
+    - errchkjson       # Checks types passed to the json encoding functions. Reports unsupported types and optionally reports occations, where the check for the returned error can be omitted.
+    - errname          # Checks that sentinel errors are prefixed with the `Err` and error types are suffixed with the `Error`.
+    - errorlint        # errorlint is a linter for that can be used to find code that will cause problems with the error wrapping scheme introduced in Go 1.13.
+    - exhaustive       # check exhaustiveness of enum switch statements
+    - exportloopref    # checks for pointers to enclosing loop variables
+    - forbidigo        # Forbids identifiers
+    - forcetypeassert  # finds forced type assertions
+    - funlen           # Tool for detection of long functions
+    - gci              # Gci control golang package import order and make it always deterministic.
+    - gochecknoglobals # Checks that no globals are present in Go code
+    - gocognit         # Computes and checks the cognitive complexity of functions
+    - goconst          # Finds repeated strings that could be replaced by a constant
+    - gocritic         # The most opinionated Go source code linter
+    - gocyclo          # Computes and checks the cyclomatic complexity of functions
+    - godot            # Check if comments end in a period
+    - godox            # Tool for detection of FIXME, TODO and other comment keywords
+    - gofmt            # Gofmt checks whether code was gofmt-ed. By default this tool runs with -s option to check for code simplification
+    - gofumpt          # Gofumpt checks whether code was gofumpt-ed.
+    - goheader         # Checks is file header matches to pattern
+    - goimports        # Goimports does everything that gofmt does. Additionally it checks unused imports
+    - gomoddirectives  # Manage the use of 'replace', 'retract', and 'excludes' directives in go.mod.
+    - goprintffuncname # Checks that printf-like functions are named with `f` at the end
+    - gosec            # Inspects source code for security problems
+    - gosimple         # Linter for Go source code that specializes in simplifying a code
+    - govet            # Vet examines Go source code and reports suspicious constructs, such as Printf calls whose arguments do not align with the format string
+    - grouper          # An analyzer to analyze expression groups.
+    - importas         # Enforces consistent import aliases
+    - ineffassign      # Detects when assignments to existing variables are not used
+    - lll              # Reports long lines
+    - maintidx         # maintidx measures the maintainability index of each function.
+    - makezero         # Finds slice declarations with non-zero initial length
+    - misspell         # Finds commonly misspelled English words in comments
+    - nakedret         # Finds naked returns in functions greater than a specified function length
+    - nestif           # Reports deeply nested if statements
+    - nilerr           # Finds the code that returns nil even if it checks that the error is not nil.
+    - nilnil           # Checks that there is no simultaneous return of `nil` error and an invalid value.
+    - nlreturn         # nlreturn checks for a new line before return and branch statements to increase code clarity
+    - noctx            # noctx finds sending http request without context.Context
+    - predeclared      # find code that shadows one of Go's predeclared identifiers
+    - revive           # golint replacement, finds style mistakes
+    - staticcheck      # Staticcheck is a go vet on steroids, applying a ton of static analysis checks
+    - stylecheck       # Stylecheck is a replacement for golint
+    - tagliatelle      # Checks the struct tags.
+    - tenv             # tenv is analyzer that detects using os.Setenv instead of t.Setenv since Go1.17
+    - thelper          # thelper detects golang test helpers without t.Helper() call and checks the consistency of test helpers
+    - typecheck        # Like the front-end of a Go compiler, parses and type-checks Go code
+    - unconvert        # Remove unnecessary type conversions
+    - unparam          # Reports unused function parameters
+    - unused           # Checks Go code for unused constants, variables, functions and types
+    - varnamelen       # checks that the length of a variable's name matches its scope
+    - wastedassign     # wastedassign finds wasted assignment statements
+    - whitespace       # Tool for detection of leading and trailing whitespace
+  disable:
+    - depguard         # Go linter that checks if package imports are in a list of acceptable packages
+    - gochecknoinits   # Checks that no init functions are present in Go code
+    - gomodguard       # Allow and block list linter for direct Go module dependencies. This is different from depguard where there are different block types for example version constraints and module recommendations.
+    - interfacebloat   # A linter that checks length of interface.
+    - ireturn          # Accept Interfaces, Return Concrete Types
+    - mnd              # An analyzer to detect magic numbers
+    - nolintlint       # Reports ill-formed or insufficient nolint directives
+    - paralleltest     # paralleltest detects missing usage of t.Parallel() method in your Go test
+    - prealloc         # Finds slice declarations that could potentially be preallocated
+    - promlinter       # Check Prometheus metrics naming via promlint
+    - rowserrcheck     # checks whether Err of rows is checked successfully
+    - sqlclosecheck    # Checks that sql.Rows and sql.Stmt are closed.
+    - testpackage      # linter that makes you use a separate _test package
+    - tparallel        # tparallel detects inappropriate usage of t.Parallel() method in your Go test codes
+    - wrapcheck        # Checks that errors returned from external packages are wrapped
+    - wsl              # Whitespace Linter - Forces you to use empty lines!
 
 issues:
   exclude-use-default: false
-  max-per-linter: 0
-  max-same-issues: 50
+  exclude-dirs-use-default: false
+  exclude-rules:
+    # Allow complex tests and examples, better to be self contained
+    - path: (examples|main\.go|_test\.go)
+      linters:
+        - forbidigo
+        - gocognit
+
+    # Allow forbidden identifiers in CLI commands
+    - path: cmd
+      linters:
+        - forbidigo
diff --git a/vendor/github.com/pion/logging/.goreleaser.yml b/vendor/github.com/pion/logging/.goreleaser.yml
new file mode 100644
index 0000000000000000000000000000000000000000..30093e9d6dbf3792e5b2dea5a2a585407fd96087
--- /dev/null
+++ b/vendor/github.com/pion/logging/.goreleaser.yml
@@ -0,0 +1,5 @@
+# SPDX-FileCopyrightText: 2023 The Pion community <https://pion.ly>
+# SPDX-License-Identifier: MIT
+
+builds:
+- skip: true
diff --git a/vendor/github.com/pion/logging/.travis.yml b/vendor/github.com/pion/logging/.travis.yml
deleted file mode 100644
index b96a1edb9afeaaaee48396c74fc3928fa3ad08d7..0000000000000000000000000000000000000000
--- a/vendor/github.com/pion/logging/.travis.yml
+++ /dev/null
@@ -1,19 +0,0 @@
-language: go
-
-go:
-  - "1.x" # use the latest Go release
-
-env:
-  - GO111MODULE=on
-
-before_script:
-  - curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | bash -s -- -b $GOPATH/bin v1.15.0
-
-script:
-  - golangci-lint run ./...
-#  - rm -rf examples # Remove examples, no test coverage for them
-  - go test -coverpkg=$(go list ./... | tr '\n' ',') -coverprofile=cover.out -v -race -covermode=atomic ./...
-  - bash <(curl -s https://codecov.io/bash)
-  - bash .github/assert-contributors.sh
-  - bash .github/lint-disallowed-functions-in-library.sh
-  - bash .github/lint-commit-message.sh
diff --git a/vendor/github.com/pion/logging/LICENSE b/vendor/github.com/pion/logging/LICENSE
index ab602974d200aa6849e6ad8220951ef9a78d9f08..491caf6b0f171ffca5b1510b08998d03d015bc7d 100644
--- a/vendor/github.com/pion/logging/LICENSE
+++ b/vendor/github.com/pion/logging/LICENSE
@@ -1,21 +1,9 @@
 MIT License
 
-Copyright (c) 2018 
+Copyright (c) 2023 The Pion community <https://pion.ly>
 
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
 
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
 
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/pion/logging/README.md b/vendor/github.com/pion/logging/README.md
index c15471d618b32fcc060d5d280010cdf72d0db5ea..b9824abb6fdf9298bdaf411a74f3fb6fdce8b841 100644
--- a/vendor/github.com/pion/logging/README.md
+++ b/vendor/github.com/pion/logging/README.md
@@ -8,8 +8,8 @@
   <a href="https://pion.ly"><img src="https://img.shields.io/badge/pion-logging-gray.svg?longCache=true&colorB=brightgreen" alt="Pion transport"></a>
   <a href="http://gophers.slack.com/messages/pion"><img src="https://img.shields.io/badge/join-us%20on%20slack-gray.svg?longCache=true&logo=slack&colorB=brightgreen" alt="Slack Widget"></a>
   <br>
-  <a href="https://travis-ci.org/pion/logging"><img src="https://travis-ci.org/pion/logging.svg?branch=master" alt="Build Status"></a>
-  <a href="https://godoc.org/github.com/pion/logging"><img src="https://godoc.org/github.com/pion/logging?status.svg" alt="GoDoc"></a>
+  <img alt="GitHub Workflow Status" src="https://img.shields.io/github/actions/workflow/status/pion/logging/test.yaml">
+  <a href="https://pkg.go.dev/github.com/pion/logging"><img src="https://pkg.go.dev/badge/github.com/pion/logging.svg" alt="Go Reference"></a>
   <a href="https://codecov.io/gh/pion/logging"><img src="https://codecov.io/gh/pion/logging/branch/master/graph/badge.svg" alt="Coverage Status"></a>
   <a href="https://goreportcard.com/report/github.com/pion/logging"><img src="https://goreportcard.com/badge/github.com/pion/logging" alt="Go Report Card"></a>
   <a href="LICENSE"><img src="https://img.shields.io/badge/License-MIT-yellow.svg" alt="License: MIT"></a>
@@ -20,22 +20,15 @@
 The library is used as a part of our WebRTC implementation. Please refer to that [roadmap](https://github.com/pion/webrtc/issues/9) to track our major milestones.
 
 ### Community
-Pion has an active community on the [Golang Slack](https://invite.slack.golangbridge.org/). Sign up and join the **#pion** channel for discussions and support. You can also use [Pion mailing list](https://groups.google.com/forum/#!forum/pion).
+Pion has an active community on the [Slack](https://pion.ly/slack).
 
-We are always looking to support **your projects**. Please reach out if you have something to build!
+Follow the [Pion Twitter](https://twitter.com/_pion) for project updates and important WebRTC news.
 
+We are always looking to support **your projects**. Please reach out if you have something to build!
 If you need commercial support or don't want to use public methods you can contact us at [team@pion.ly](mailto:team@pion.ly)
 
 ### Contributing
-Check out the **[contributing wiki](https://github.com/pion/webrtc/wiki/Contributing)** to join the group of amazing people making this project possible:
-
-* [John Bradley](https://github.com/kc5nra) - *Original Author*
-* [Sean DuBois](https://github.com/Sean-Der) - *Original Author*
-* [Michael MacDonald](https://github.com/mjmac) - *Original Author*
-* [Woodrow Douglass](https://github.com/wdouglass) - *Test coverage*
-* [Michiel De Backker](https://github.com/backkem) - *Docs*
-* [Hugo Arregui](https://github.com/hugoArregui) - *Custom Logs*
-* [Justin Okamoto](https://github.com/justinokamoto) - *Disabled Logs Update*
+Check out the [contributing wiki](https://github.com/pion/webrtc/wiki/Contributing) to join the group of amazing people making this project possible
 
 ### License
 MIT License - see [LICENSE](LICENSE) for full text
diff --git a/vendor/github.com/pion/logging/codecov.yml b/vendor/github.com/pion/logging/codecov.yml
new file mode 100644
index 0000000000000000000000000000000000000000..263e4d45c60b042379470917d60ac5c8145d1b91
--- /dev/null
+++ b/vendor/github.com/pion/logging/codecov.yml
@@ -0,0 +1,22 @@
+#
+# DO NOT EDIT THIS FILE
+#
+# It is automatically copied from https://github.com/pion/.goassets repository.
+#
+# SPDX-FileCopyrightText: 2023 The Pion community <https://pion.ly>
+# SPDX-License-Identifier: MIT
+
+coverage:
+  status:
+    project:
+      default:
+        # Allow decreasing 2% of total coverage to avoid noise.
+        threshold: 2%
+    patch:
+      default:
+        target: 70%
+        only_pulls: true
+
+ignore:
+  - "examples/*"
+  - "examples/**/*"
diff --git a/vendor/github.com/pion/logging/logger.go b/vendor/github.com/pion/logging/logger.go
index 35f650581916b8e92a6fe59593e682572090ae4f..eb1e56af618a87b9dceae2335257933d3eddf81e 100644
--- a/vendor/github.com/pion/logging/logger.go
+++ b/vendor/github.com/pion/logging/logger.go
@@ -1,3 +1,7 @@
+// SPDX-FileCopyrightText: 2023 The Pion community <https://pion.ly>
+// SPDX-License-Identifier: MIT
+
+// Package logging provides the logging library used by Pion
 package logging
 
 import (
@@ -9,8 +13,8 @@ import (
 	"sync"
 )
 
-// Use this abstraction to ensure thread-safe access to the logger's io.Writer
-// (which could change at runtime)
+// Use this abstraction to ensure thread-safe access to the logger's io.Writer.
+// (which could change at runtime).
 type loggerWriter struct {
 	sync.RWMutex
 	output io.Writer
@@ -25,11 +29,12 @@ func (lw *loggerWriter) SetOutput(output io.Writer) {
 func (lw *loggerWriter) Write(data []byte) (int, error) {
 	lw.RLock()
 	defer lw.RUnlock()
+
 	return lw.output.Write(data)
 }
 
-// DefaultLeveledLogger encapsulates functionality for providing logging at
-// user-defined levels
+// DefaultLeveledLogger encapsulates functionality for providing logging at.
+// user-defined levels.
 type DefaultLeveledLogger struct {
 	level  LogLevel
 	writer *loggerWriter
@@ -41,44 +46,50 @@ type DefaultLeveledLogger struct {
 }
 
 // WithTraceLogger is a chainable configuration function which sets the
-// Trace-level logger
+// Trace-level logger.
 func (ll *DefaultLeveledLogger) WithTraceLogger(log *log.Logger) *DefaultLeveledLogger {
 	ll.trace = log
+
 	return ll
 }
 
 // WithDebugLogger is a chainable configuration function which sets the
-// Debug-level logger
+// Debug-level logger.
 func (ll *DefaultLeveledLogger) WithDebugLogger(log *log.Logger) *DefaultLeveledLogger {
 	ll.debug = log
+
 	return ll
 }
 
 // WithInfoLogger is a chainable configuration function which sets the
-// Info-level logger
+// Info-level logger.
 func (ll *DefaultLeveledLogger) WithInfoLogger(log *log.Logger) *DefaultLeveledLogger {
 	ll.info = log
+
 	return ll
 }
 
 // WithWarnLogger is a chainable configuration function which sets the
-// Warn-level logger
+// Warn-level logger.
 func (ll *DefaultLeveledLogger) WithWarnLogger(log *log.Logger) *DefaultLeveledLogger {
 	ll.warn = log
+
 	return ll
 }
 
 // WithErrorLogger is a chainable configuration function which sets the
-// Error-level logger
+// Error-level logger.
 func (ll *DefaultLeveledLogger) WithErrorLogger(log *log.Logger) *DefaultLeveledLogger {
 	ll.err = log
+
 	return ll
 }
 
 // WithOutput is a chainable configuration function which sets the logger's
-// logging output to the supplied io.Writer
+// logging output to the supplied io.Writer.
 func (ll *DefaultLeveledLogger) WithOutput(output io.Writer) *DefaultLeveledLogger {
 	ll.writer.SetOutput(output)
+
 	return ll
 }
 
@@ -94,70 +105,71 @@ func (ll *DefaultLeveledLogger) logf(logger *log.Logger, level LogLevel, format
 	}
 }
 
-// SetLevel sets the logger's logging level
+// SetLevel sets the logger's logging level.
 func (ll *DefaultLeveledLogger) SetLevel(newLevel LogLevel) {
 	ll.level.Set(newLevel)
 }
 
-// Trace emits the preformatted message if the logger is at or below LogLevelTrace
+// Trace emits the preformatted message if the logger is at or below LogLevelTrace.
 func (ll *DefaultLeveledLogger) Trace(msg string) {
-	ll.logf(ll.trace, LogLevelTrace, msg)
+	ll.logf(ll.trace, LogLevelTrace, msg) // nolint: govet
 }
 
-// Tracef formats and emits a message if the logger is at or below LogLevelTrace
+// Tracef formats and emits a message if the logger is at or below LogLevelTrace.
 func (ll *DefaultLeveledLogger) Tracef(format string, args ...interface{}) {
 	ll.logf(ll.trace, LogLevelTrace, format, args...)
 }
 
-// Debug emits the preformatted message if the logger is at or below LogLevelDebug
+// Debug emits the preformatted message if the logger is at or below LogLevelDebug.
 func (ll *DefaultLeveledLogger) Debug(msg string) {
-	ll.logf(ll.debug, LogLevelDebug, msg)
+	ll.logf(ll.debug, LogLevelDebug, msg) // nolint: govet
 }
 
-// Debugf formats and emits a message if the logger is at or below LogLevelDebug
+// Debugf formats and emits a message if the logger is at or below LogLevelDebug.
 func (ll *DefaultLeveledLogger) Debugf(format string, args ...interface{}) {
 	ll.logf(ll.debug, LogLevelDebug, format, args...)
 }
 
-// Info emits the preformatted message if the logger is at or below LogLevelInfo
+// Info emits the preformatted message if the logger is at or below LogLevelInfo.
 func (ll *DefaultLeveledLogger) Info(msg string) {
-	ll.logf(ll.info, LogLevelInfo, msg)
+	ll.logf(ll.info, LogLevelInfo, msg) // nolint: govet
 }
 
-// Infof formats and emits a message if the logger is at or below LogLevelInfo
+// Infof formats and emits a message if the logger is at or below LogLevelInfo.
 func (ll *DefaultLeveledLogger) Infof(format string, args ...interface{}) {
 	ll.logf(ll.info, LogLevelInfo, format, args...)
 }
 
-// Warn emits the preformatted message if the logger is at or below LogLevelWarn
+// Warn emits the preformatted message if the logger is at or below LogLevelWarn.
 func (ll *DefaultLeveledLogger) Warn(msg string) {
-	ll.logf(ll.warn, LogLevelWarn, msg)
+	ll.logf(ll.warn, LogLevelWarn, msg) // nolint: govet
 }
 
-// Warnf formats and emits a message if the logger is at or below LogLevelWarn
+// Warnf formats and emits a message if the logger is at or below LogLevelWarn.
 func (ll *DefaultLeveledLogger) Warnf(format string, args ...interface{}) {
 	ll.logf(ll.warn, LogLevelWarn, format, args...)
 }
 
-// Error emits the preformatted message if the logger is at or below LogLevelError
+// Error emits the preformatted message if the logger is at or below LogLevelError.
 func (ll *DefaultLeveledLogger) Error(msg string) {
-	ll.logf(ll.err, LogLevelError, msg)
+	ll.logf(ll.err, LogLevelError, msg) // nolint: govet
 }
 
-// Errorf formats and emits a message if the logger is at or below LogLevelError
+// Errorf formats and emits a message if the logger is at or below LogLevelError.
 func (ll *DefaultLeveledLogger) Errorf(format string, args ...interface{}) {
 	ll.logf(ll.err, LogLevelError, format, args...)
 }
 
-// NewDefaultLeveledLoggerForScope returns a configured LeveledLogger
+// NewDefaultLeveledLoggerForScope returns a configured LeveledLogger.
 func NewDefaultLeveledLoggerForScope(scope string, level LogLevel, writer io.Writer) *DefaultLeveledLogger {
 	if writer == nil {
-		writer = os.Stdout
+		writer = os.Stderr
 	}
 	logger := &DefaultLeveledLogger{
 		writer: &loggerWriter{output: writer},
 		level:  level,
 	}
+
 	return logger.
 		WithTraceLogger(log.New(logger.writer, fmt.Sprintf("%s TRACE: ", scope), log.Lmicroseconds|log.Lshortfile)).
 		WithDebugLogger(log.New(logger.writer, fmt.Sprintf("%s DEBUG: ", scope), log.Lmicroseconds|log.Lshortfile)).
@@ -166,19 +178,19 @@ func NewDefaultLeveledLoggerForScope(scope string, level LogLevel, writer io.Wri
 		WithErrorLogger(log.New(logger.writer, fmt.Sprintf("%s ERROR: ", scope), log.LstdFlags))
 }
 
-// DefaultLoggerFactory define levels by scopes and creates new DefaultLeveledLogger
+// DefaultLoggerFactory define levels by scopes and creates new DefaultLeveledLogger.
 type DefaultLoggerFactory struct {
 	Writer          io.Writer
 	DefaultLogLevel LogLevel
 	ScopeLevels     map[string]LogLevel
 }
 
-// NewDefaultLoggerFactory creates a new DefaultLoggerFactory
+// NewDefaultLoggerFactory creates a new DefaultLoggerFactory.
 func NewDefaultLoggerFactory() *DefaultLoggerFactory {
 	factory := DefaultLoggerFactory{}
 	factory.DefaultLogLevel = LogLevelError
 	factory.ScopeLevels = make(map[string]LogLevel)
-	factory.Writer = os.Stdout
+	factory.Writer = os.Stderr
 
 	logLevels := map[string]LogLevel{
 		"DISABLE": LogLevelDisabled,
@@ -201,7 +213,10 @@ func NewDefaultLoggerFactory() *DefaultLoggerFactory {
 		}
 
 		if strings.ToLower(env) == "all" {
-			factory.DefaultLogLevel = level
+			if factory.DefaultLogLevel < level {
+				factory.DefaultLogLevel = level
+			}
+
 			continue
 		}
 
@@ -214,7 +229,7 @@ func NewDefaultLoggerFactory() *DefaultLoggerFactory {
 	return &factory
 }
 
-// NewLogger returns a configured LeveledLogger for the given , argsscope
+// NewLogger returns a configured LeveledLogger for the given, argsscope.
 func (f *DefaultLoggerFactory) NewLogger(scope string) LeveledLogger {
 	logLevel := f.DefaultLogLevel
 	if f.ScopeLevels != nil {
@@ -224,5 +239,6 @@ func (f *DefaultLoggerFactory) NewLogger(scope string) LeveledLogger {
 			logLevel = scopeLevel
 		}
 	}
+
 	return NewDefaultLeveledLoggerForScope(scope, logLevel, f.Writer)
 }
diff --git a/vendor/github.com/pion/logging/renovate.json b/vendor/github.com/pion/logging/renovate.json
new file mode 100644
index 0000000000000000000000000000000000000000..f1bb98c6ad0944c435c7ec6fc3c59a95348e98e8
--- /dev/null
+++ b/vendor/github.com/pion/logging/renovate.json
@@ -0,0 +1,6 @@
+{
+  "$schema": "https://docs.renovatebot.com/renovate-schema.json",
+  "extends": [
+    "github>pion/renovate-config"
+  ]
+}
diff --git a/vendor/github.com/pion/logging/scoped.go b/vendor/github.com/pion/logging/scoped.go
index 678bab4265a7e08286bc9fbbff31e4b8afd3afb8..7b3a550ee5a28f5ee05b5ce0a56f3af4fce814e1 100644
--- a/vendor/github.com/pion/logging/scoped.go
+++ b/vendor/github.com/pion/logging/scoped.go
@@ -1,18 +1,21 @@
+// SPDX-FileCopyrightText: 2023 The Pion community <https://pion.ly>
+// SPDX-License-Identifier: MIT
+
 package logging
 
 import (
 	"sync/atomic"
 )
 
-// LogLevel represents the level at which the logger will emit log messages
+// LogLevel represents the level at which the logger will emit log messages.
 type LogLevel int32
 
-// Set updates the LogLevel to the supplied value
+// Set updates the LogLevel to the supplied value.
 func (ll *LogLevel) Set(newLevel LogLevel) {
 	atomic.StoreInt32((*int32)(ll), int32(newLevel))
 }
 
-// Get retrieves the current LogLevel value
+// Get retrieves the current LogLevel value.
 func (ll *LogLevel) Get() LogLevel {
 	return LogLevel(atomic.LoadInt32((*int32)(ll)))
 }
@@ -37,22 +40,22 @@ func (ll LogLevel) String() string {
 }
 
 const (
-	// LogLevelDisabled completely disables logging of any events
+	// LogLevelDisabled completely disables logging of any events.
 	LogLevelDisabled LogLevel = iota
 	// LogLevelError is for fatal errors which should be handled by user code,
-	// but are logged to ensure that they are seen
+	// but are logged to ensure that they are seen.
 	LogLevelError
-	// LogLevelWarn is for logging abnormal, but non-fatal library operation
+	// LogLevelWarn is for logging abnormal, but non-fatal library operation.
 	LogLevelWarn
-	// LogLevelInfo is for logging normal library operation (e.g. state transitions, etc.)
+	// LogLevelInfo is for logging normal library operation (e.g. state transitions, etc.).
 	LogLevelInfo
-	// LogLevelDebug is for logging low-level library information (e.g. internal operations)
+	// LogLevelDebug is for logging low-level library information (e.g. internal operations).
 	LogLevelDebug
-	// LogLevelTrace is for logging very low-level library information (e.g. network traces)
+	// LogLevelTrace is for logging very low-level library information (e.g. network traces).
 	LogLevelTrace
 )
 
-// LeveledLogger is the basic pion Logger interface
+// LeveledLogger is the basic pion Logger interface.
 type LeveledLogger interface {
 	Trace(msg string)
 	Tracef(format string, args ...interface{})
@@ -66,7 +69,7 @@ type LeveledLogger interface {
 	Errorf(format string, args ...interface{})
 }
 
-// LoggerFactory is the basic pion LoggerFactory interface
+// LoggerFactory is the basic pion LoggerFactory interface.
 type LoggerFactory interface {
 	NewLogger(scope string) LeveledLogger
 }
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collectorfunc.go b/vendor/github.com/prometheus/client_golang/prometheus/collectorfunc.go
new file mode 100644
index 0000000000000000000000000000000000000000..9a71a15db1d6c12df36c7dd6f184fe0b356a1f8b
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/collectorfunc.go
@@ -0,0 +1,30 @@
+// Copyright 2025 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+// CollectorFunc is a convenient way to implement a Prometheus Collector
+// without interface boilerplate.
+// This implementation is based on DescribeByCollect method.
+// familiarize yourself to it before using.
+type CollectorFunc func(chan<- Metric)
+
+// Collect calls the defined CollectorFunc function with the provided Metrics channel
+func (f CollectorFunc) Collect(ch chan<- Metric) {
+	f(ch)
+}
+
+// Describe sends the descriptor information using DescribeByCollect
+func (f CollectorFunc) Describe(ch chan<- *Desc) {
+	DescribeByCollect(f, ch)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
index 28eed26727acb05720ec44c8c65f6a01ca8f949a..763d99e3623f8009e3e288502935d1775e6e04a9 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
@@ -41,11 +41,11 @@ import (
 	"sync"
 	"time"
 
-	"github.com/klauspost/compress/zstd"
 	"github.com/prometheus/common/expfmt"
 
 	"github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil"
 	"github.com/prometheus/client_golang/prometheus"
+	"github.com/prometheus/client_golang/prometheus/promhttp/internal"
 )
 
 const (
@@ -65,7 +65,13 @@ const (
 	Zstd     Compression = "zstd"
 )
 
-var defaultCompressionFormats = []Compression{Identity, Gzip, Zstd}
+func defaultCompressionFormats() []Compression {
+	if internal.NewZstdWriter != nil {
+		return []Compression{Identity, Gzip, Zstd}
+	} else {
+		return []Compression{Identity, Gzip}
+	}
+}
 
 var gzipPool = sync.Pool{
 	New: func() interface{} {
@@ -138,7 +144,7 @@ func HandlerForTransactional(reg prometheus.TransactionalGatherer, opts HandlerO
 	// Select compression formats to offer based on default or user choice.
 	var compressions []string
 	if !opts.DisableCompression {
-		offers := defaultCompressionFormats
+		offers := defaultCompressionFormats()
 		if len(opts.OfferedCompressions) > 0 {
 			offers = opts.OfferedCompressions
 		}
@@ -466,14 +472,12 @@ func negotiateEncodingWriter(r *http.Request, rw io.Writer, compressions []strin
 
 	switch selected {
 	case "zstd":
-		// TODO(mrueg): Replace klauspost/compress with stdlib implementation once https://github.com/golang/go/issues/62513 is implemented.
-		z, err := zstd.NewWriter(rw, zstd.WithEncoderLevel(zstd.SpeedFastest))
-		if err != nil {
-			return nil, "", func() {}, err
+		if internal.NewZstdWriter == nil {
+			// The content encoding was not implemented yet.
+			return nil, "", func() {}, fmt.Errorf("content compression format not recognized: %s. Valid formats are: %s", selected, defaultCompressionFormats())
 		}
-
-		z.Reset(rw)
-		return z, selected, func() { _ = z.Close() }, nil
+		writer, closeWriter, err := internal.NewZstdWriter(rw)
+		return writer, selected, closeWriter, err
 	case "gzip":
 		gz := gzipPool.Get().(*gzip.Writer)
 		gz.Reset(rw)
@@ -483,6 +487,6 @@ func negotiateEncodingWriter(r *http.Request, rw io.Writer, compressions []strin
 		return rw, selected, func() {}, nil
 	default:
 		// The content encoding was not implemented yet.
-		return nil, "", func() {}, fmt.Errorf("content compression format not recognized: %s. Valid formats are: %s", selected, defaultCompressionFormats)
+		return nil, "", func() {}, fmt.Errorf("content compression format not recognized: %s. Valid formats are: %s", selected, defaultCompressionFormats())
 	}
 }
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/internal/compression.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/internal/compression.go
new file mode 100644
index 0000000000000000000000000000000000000000..c5039590f7723f99796698e1db63c76ce479c791
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/internal/compression.go
@@ -0,0 +1,21 @@
+// Copyright 2025 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package internal
+
+import (
+	"io"
+)
+
+// NewZstdWriter enables zstd write support if non-nil.
+var NewZstdWriter func(rw io.Writer) (_ io.Writer, closeWriter func(), _ error)
diff --git a/vendor/github.com/prometheus/common/config/headers.go b/vendor/github.com/prometheus/common/config/headers.go
index 7276742ec906fb7441bea0401a40157a5f4be3d1..9beaae26c2036010f09c83f6b0129e48e3a982d9 100644
--- a/vendor/github.com/prometheus/common/config/headers.go
+++ b/vendor/github.com/prometheus/common/config/headers.go
@@ -24,9 +24,9 @@ import (
 	"strings"
 )
 
-// reservedHeaders that change the connection, are set by Prometheus, or can
+// ReservedHeaders that change the connection, are set by Prometheus, or can
 // be changed otherwise.
-var reservedHeaders = map[string]struct{}{
+var ReservedHeaders = map[string]struct{}{
 	"Authorization":                       {},
 	"Host":                                {},
 	"Content-Encoding":                    {},
@@ -72,7 +72,7 @@ func (h *Headers) SetDirectory(dir string) {
 // Validate validates the Headers config.
 func (h *Headers) Validate() error {
 	for n := range h.Headers {
-		if _, ok := reservedHeaders[http.CanonicalHeaderKey(n)]; ok {
+		if _, ok := ReservedHeaders[http.CanonicalHeaderKey(n)]; ok {
 			return fmt.Errorf("setting header %q is not allowed", http.CanonicalHeaderKey(n))
 		}
 	}
diff --git a/vendor/github.com/prometheus/common/model/alert.go b/vendor/github.com/prometheus/common/model/alert.go
index bd3a39e3e14715599ed36322d242996cbddaf9ac..460f554f2945daf09ffb99bd4c4961fc84d35d68 100644
--- a/vendor/github.com/prometheus/common/model/alert.go
+++ b/vendor/github.com/prometheus/common/model/alert.go
@@ -65,7 +65,7 @@ func (a *Alert) Resolved() bool {
 	return a.ResolvedAt(time.Now())
 }
 
-// ResolvedAt returns true off the activity interval ended before
+// ResolvedAt returns true iff the activity interval ended before
 // the given timestamp.
 func (a *Alert) ResolvedAt(ts time.Time) bool {
 	if a.EndsAt.IsZero() {
diff --git a/vendor/github.com/prometheus/common/model/labels.go b/vendor/github.com/prometheus/common/model/labels.go
index 73b7aa3e60bdd741c88b8adf0b3a67417bc3c11d..f4a387605f1a22a54a7764d829de6c2964630dab 100644
--- a/vendor/github.com/prometheus/common/model/labels.go
+++ b/vendor/github.com/prometheus/common/model/labels.go
@@ -22,7 +22,7 @@ import (
 )
 
 const (
-	// AlertNameLabel is the name of the label containing the an alert's name.
+	// AlertNameLabel is the name of the label containing the alert's name.
 	AlertNameLabel = "alertname"
 
 	// ExportedLabelPrefix is the prefix to prepend to the label names present in
diff --git a/vendor/github.com/prometheus/common/model/metric.go b/vendor/github.com/prometheus/common/model/metric.go
index 5766107cf95f08de910669a39beb7033e929e611..a6b01755bd4c8f2a43526555739b95556b393f90 100644
--- a/vendor/github.com/prometheus/common/model/metric.go
+++ b/vendor/github.com/prometheus/common/model/metric.go
@@ -27,13 +27,25 @@ import (
 )
 
 var (
-	// NameValidationScheme determines the method of name validation to be used by
-	// all calls to IsValidMetricName() and LabelName IsValid(). Setting UTF-8
-	// mode in isolation from other components that don't support UTF-8 may result
-	// in bugs or other undefined behavior. This value can be set to
-	// LegacyValidation during startup if a binary is not UTF-8-aware binaries. To
-	// avoid need for locking, this value should be set once, ideally in an
-	// init(), before multiple goroutines are started.
+	// NameValidationScheme determines the global default method of the name
+	// validation to be used by all calls to IsValidMetricName() and LabelName
+	// IsValid().
+	//
+	// Deprecated: This variable should not be used and might be removed in the
+	// far future. If you wish to stick to the legacy name validation use
+	// `IsValidLegacyMetricName()` and `LabelName.IsValidLegacy()` methods
+	// instead. This variable is here as an escape hatch for emergency cases,
+	// given the recent change from `LegacyValidation` to `UTF8Validation`, e.g.,
+	// to delay UTF-8 migrations in time or aid in debugging unforeseen results of
+	// the change. In such a case, a temporary assignment to `LegacyValidation`
+	// value in the `init()` function in your main.go or so, could be considered.
+	//
+	// Historically we opted for a global variable for feature gating different
+	// validation schemes in operations that were not otherwise easily adjustable
+	// (e.g. Labels yaml unmarshaling). That could have been a mistake, a separate
+	// Labels structure or package might have been a better choice. Given the
+	// change was made and many upgraded the common already, we live this as-is
+	// with this warning and learning for the future.
 	NameValidationScheme = UTF8Validation
 
 	// NameEscapingScheme defines the default way that names will be escaped when
@@ -50,7 +62,7 @@ var (
 type ValidationScheme int
 
 const (
-	// LegacyValidation is a setting that requirets that metric and label names
+	// LegacyValidation is a setting that requires that all metric and label names
 	// conform to the original Prometheus character requirements described by
 	// MetricNameRE and LabelNameRE.
 	LegacyValidation ValidationScheme = iota
diff --git a/vendor/github.com/prometheus/procfs/.golangci.yml b/vendor/github.com/prometheus/procfs/.golangci.yml
index 126df9e67acf6467c60f60ab7d2e5c5f7d28cc42..b43e09f683d6c4da2f2e7d30c04299aa39894c99 100644
--- a/vendor/github.com/prometheus/procfs/.golangci.yml
+++ b/vendor/github.com/prometheus/procfs/.golangci.yml
@@ -2,7 +2,10 @@
 linters:
   enable:
   - errcheck
+  - forbidigo
   - godot
+  - gofmt
+  - goimports
   - gosimple
   - govet
   - ineffassign
@@ -12,11 +15,17 @@ linters:
   - testifylint
   - unused
 
-linter-settings:
+linters-settings:
+  forbidigo:
+    forbid:
+      - p: ^fmt\.Print.*$
+        msg: Do not commit print statements.
   godot:
     capital: true
     exclude:
     # Ignore "See: URL"
     - 'See:'
+  goimports:
+      local-prefixes: github.com/prometheus/procfs
   misspell:
     locale: US
diff --git a/vendor/github.com/prometheus/procfs/Makefile.common b/vendor/github.com/prometheus/procfs/Makefile.common
index 1617292350681d67c1ae4a06be68c397826df8b8..cbb5d86382aaa0d2ccb9f4535ca51aeff374730c 100644
--- a/vendor/github.com/prometheus/procfs/Makefile.common
+++ b/vendor/github.com/prometheus/procfs/Makefile.common
@@ -61,7 +61,7 @@ PROMU_URL     := https://github.com/prometheus/promu/releases/download/v$(PROMU_
 SKIP_GOLANGCI_LINT :=
 GOLANGCI_LINT :=
 GOLANGCI_LINT_OPTS ?=
-GOLANGCI_LINT_VERSION ?= v1.59.0
+GOLANGCI_LINT_VERSION ?= v1.60.2
 # golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64.
 # windows isn't included here because of the path separator being different.
 ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
@@ -275,3 +275,9 @@ $(1)_precheck:
 		exit 1; \
 	fi
 endef
+
+govulncheck: install-govulncheck
+	govulncheck ./...
+
+install-govulncheck:
+	command -v govulncheck > /dev/null || go install golang.org/x/vuln/cmd/govulncheck@latest
diff --git a/vendor/github.com/prometheus/procfs/README.md b/vendor/github.com/prometheus/procfs/README.md
index 1224816c2ade9c8239320aa494fe10b4956feba3..0718239cf19aad4058dfbba2d5e168d386797483 100644
--- a/vendor/github.com/prometheus/procfs/README.md
+++ b/vendor/github.com/prometheus/procfs/README.md
@@ -47,15 +47,15 @@ However, most of the API includes unit tests which can be run with `make test`.
 The procfs library includes a set of test fixtures which include many example files from
 the `/proc` and `/sys` filesystems.  These fixtures are included as a [ttar](https://github.com/ideaship/ttar) file
 which is extracted automatically during testing.  To add/update the test fixtures, first
-ensure the `fixtures` directory is up to date by removing the existing directory and then
-extracting the ttar file using `make fixtures/.unpacked` or just `make test`.
+ensure the `testdata/fixtures` directory is up to date by removing the existing directory and then
+extracting the ttar file using `make testdata/fixtures/.unpacked` or just `make test`.
 
 ```bash
 rm -rf testdata/fixtures
 make test
 ```
 
-Next, make the required changes to the extracted files in the `fixtures` directory.  When
+Next, make the required changes to the extracted files in the `testdata/fixtures` directory.  When
 the changes are complete, run `make update_fixtures` to create a new `fixtures.ttar` file
 based on the updated `fixtures` directory.  And finally, verify the changes using
 `git diff testdata/fixtures.ttar`.
diff --git a/vendor/github.com/prometheus/procfs/arp.go b/vendor/github.com/prometheus/procfs/arp.go
index cdcc8a7ccc46342803260f65b349587a590ef991..2e53344151f54d7a9007f5aebcbfd5af5367b254 100644
--- a/vendor/github.com/prometheus/procfs/arp.go
+++ b/vendor/github.com/prometheus/procfs/arp.go
@@ -23,9 +23,9 @@ import (
 
 // Learned from include/uapi/linux/if_arp.h.
 const (
-	// completed entry (ha valid).
+	// Completed entry (ha valid).
 	ATFComplete = 0x02
-	// permanent entry.
+	// Permanent entry.
 	ATFPermanent = 0x04
 	// Publish entry.
 	ATFPublish = 0x08
diff --git a/vendor/github.com/prometheus/procfs/fs.go b/vendor/github.com/prometheus/procfs/fs.go
index 4980c875bfce18709637cf488489ea14bb922c83..9bdaccc7c8a477195224e26f7da7e85eed9ee293 100644
--- a/vendor/github.com/prometheus/procfs/fs.go
+++ b/vendor/github.com/prometheus/procfs/fs.go
@@ -24,8 +24,14 @@ type FS struct {
 	isReal bool
 }
 
-// DefaultMountPoint is the common mount point of the proc filesystem.
-const DefaultMountPoint = fs.DefaultProcMountPoint
+const (
+	// DefaultMountPoint is the common mount point of the proc filesystem.
+	DefaultMountPoint = fs.DefaultProcMountPoint
+
+	// SectorSize represents the size of a sector in bytes.
+	// It is specific to Linux block I/O operations.
+	SectorSize = 512
+)
 
 // NewDefaultFS returns a new proc FS mounted under the default proc mountPoint.
 // It will error if the mount point directory can't be read or is a file.
diff --git a/vendor/github.com/prometheus/procfs/fs_statfs_notype.go b/vendor/github.com/prometheus/procfs/fs_statfs_notype.go
index 134767d69ac1302da1308af26b1848dabb67d7b8..1b5bdbdf84ac211eab54e46f1373944118ccb440 100644
--- a/vendor/github.com/prometheus/procfs/fs_statfs_notype.go
+++ b/vendor/github.com/prometheus/procfs/fs_statfs_notype.go
@@ -17,7 +17,7 @@
 package procfs
 
 // isRealProc returns true on architectures that don't have a Type argument
-// in their Statfs_t struct
-func isRealProc(mountPoint string) (bool, error) {
+// in their Statfs_t struct.
+func isRealProc(_ string) (bool, error) {
 	return true, nil
 }
diff --git a/vendor/github.com/prometheus/procfs/fscache.go b/vendor/github.com/prometheus/procfs/fscache.go
index cf2e3eaa03c6705362637abf9167acb08ef1e81d..7db863307793288c8ecd6ab8172a5a24c9bfefaa 100644
--- a/vendor/github.com/prometheus/procfs/fscache.go
+++ b/vendor/github.com/prometheus/procfs/fscache.go
@@ -162,7 +162,7 @@ type Fscacheinfo struct {
 	ReleaseRequestsAgainstPagesStoredByTimeLockGranted uint64
 	// Number of release reqs ignored due to in-progress store
 	ReleaseRequestsIgnoredDueToInProgressStore uint64
-	// Number of page stores cancelled due to release req
+	// Number of page stores canceled due to release req
 	PageStoresCancelledByReleaseRequests uint64
 	VmscanWaiting                        uint64
 	// Number of times async ops added to pending queues
@@ -171,11 +171,11 @@ type Fscacheinfo struct {
 	OpsRunning uint64
 	// Number of times async ops queued for processing
 	OpsEnqueued uint64
-	// Number of async ops cancelled
+	// Number of async ops canceled
 	OpsCancelled uint64
 	// Number of async ops rejected due to object lookup/create failure
 	OpsRejected uint64
-	// Number of async ops initialised
+	// Number of async ops initialized
 	OpsInitialised uint64
 	// Number of async ops queued for deferred release
 	OpsDeferred uint64
diff --git a/vendor/github.com/prometheus/procfs/internal/fs/fs.go b/vendor/github.com/prometheus/procfs/internal/fs/fs.go
index 3c18c7610ef5678e6529fddc15daad52df83c5a3..3a43e83915f50377dcfb4c9dd18904a27d3bd9e0 100644
--- a/vendor/github.com/prometheus/procfs/internal/fs/fs.go
+++ b/vendor/github.com/prometheus/procfs/internal/fs/fs.go
@@ -28,6 +28,9 @@ const (
 
 	// DefaultConfigfsMountPoint is the common mount point of the configfs.
 	DefaultConfigfsMountPoint = "/sys/kernel/config"
+
+	// DefaultSelinuxMountPoint is the common mount point of the selinuxfs.
+	DefaultSelinuxMountPoint = "/sys/fs/selinux"
 )
 
 // FS represents a pseudo-filesystem, normally /proc or /sys, which provides an
diff --git a/vendor/github.com/prometheus/procfs/internal/util/parse.go b/vendor/github.com/prometheus/procfs/internal/util/parse.go
index 14272dc78857db3b1d5f66cf432ff89741949c98..5a7d2df06ae336fb231ab986d63beb3483fbf54c 100644
--- a/vendor/github.com/prometheus/procfs/internal/util/parse.go
+++ b/vendor/github.com/prometheus/procfs/internal/util/parse.go
@@ -14,6 +14,7 @@
 package util
 
 import (
+	"errors"
 	"os"
 	"strconv"
 	"strings"
@@ -110,3 +111,16 @@ func ParseBool(b string) *bool {
 	}
 	return &truth
 }
+
+// ReadHexFromFile reads a file and attempts to parse a uint64 from a hexadecimal format 0xXX.
+func ReadHexFromFile(path string) (uint64, error) {
+	data, err := os.ReadFile(path)
+	if err != nil {
+		return 0, err
+	}
+	hexString := strings.TrimSpace(string(data))
+	if !strings.HasPrefix(hexString, "0x") {
+		return 0, errors.New("invalid format: hex string does not start with '0x'")
+	}
+	return strconv.ParseUint(hexString[2:], 16, 64)
+}
diff --git a/vendor/github.com/prometheus/procfs/mountstats.go b/vendor/github.com/prometheus/procfs/mountstats.go
index 75a3b6c810ffd1bb710381d9c698f8a475b4fedb..b6c8d1a5700861f2737813f1520d461d029e6620 100644
--- a/vendor/github.com/prometheus/procfs/mountstats.go
+++ b/vendor/github.com/prometheus/procfs/mountstats.go
@@ -45,11 +45,11 @@ const (
 	fieldTransport11TCPLen = 13
 	fieldTransport11UDPLen = 10
 
-	// kernel version >= 4.14 MaxLen
+	// Kernel version >= 4.14 MaxLen
 	// See: https://elixir.bootlin.com/linux/v6.4.8/source/net/sunrpc/xprtrdma/xprt_rdma.h#L393
 	fieldTransport11RDMAMaxLen = 28
 
-	// kernel version <= 4.2 MinLen
+	// Kernel version <= 4.2 MinLen
 	// See: https://elixir.bootlin.com/linux/v4.2.8/source/net/sunrpc/xprtrdma/xprt_rdma.h#L331
 	fieldTransport11RDMAMinLen = 20
 )
diff --git a/vendor/github.com/prometheus/procfs/net_dev_snmp6.go b/vendor/github.com/prometheus/procfs/net_dev_snmp6.go
new file mode 100644
index 0000000000000000000000000000000000000000..f50b38e352880c1d47ddf9e92d407696ae347625
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/net_dev_snmp6.go
@@ -0,0 +1,96 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"bufio"
+	"errors"
+	"io"
+	"os"
+	"strconv"
+	"strings"
+)
+
+// NetDevSNMP6 is parsed from files in /proc/net/dev_snmp6/ or /proc/<PID>/net/dev_snmp6/.
+// The outer map's keys are interface names and the inner map's keys are stat names.
+//
+// If you'd like a total across all interfaces, please use the Snmp6() method of the Proc type.
+type NetDevSNMP6 map[string]map[string]uint64
+
+// Returns kernel/system statistics read from interface files within the /proc/net/dev_snmp6/
+// directory.
+func (fs FS) NetDevSNMP6() (NetDevSNMP6, error) {
+	return newNetDevSNMP6(fs.proc.Path("net/dev_snmp6"))
+}
+
+// Returns kernel/system statistics read from interface files within the /proc/<PID>/net/dev_snmp6/
+// directory.
+func (p Proc) NetDevSNMP6() (NetDevSNMP6, error) {
+	return newNetDevSNMP6(p.path("net/dev_snmp6"))
+}
+
+// newNetDevSNMP6 creates a new NetDevSNMP6 from the contents of the given directory.
+func newNetDevSNMP6(dir string) (NetDevSNMP6, error) {
+	netDevSNMP6 := make(NetDevSNMP6)
+
+	// The net/dev_snmp6 folders contain one file per interface
+	ifaceFiles, err := os.ReadDir(dir)
+	if err != nil {
+		// On systems with IPv6 disabled, this directory won't exist.
+		// Do nothing.
+		if errors.Is(err, os.ErrNotExist) {
+			return netDevSNMP6, err
+		}
+		return netDevSNMP6, err
+	}
+
+	for _, iFaceFile := range ifaceFiles {
+		f, err := os.Open(dir + "/" + iFaceFile.Name())
+		if err != nil {
+			return netDevSNMP6, err
+		}
+		defer f.Close()
+
+		netDevSNMP6[iFaceFile.Name()], err = parseNetDevSNMP6Stats(f)
+		if err != nil {
+			return netDevSNMP6, err
+		}
+	}
+
+	return netDevSNMP6, nil
+}
+
+func parseNetDevSNMP6Stats(r io.Reader) (map[string]uint64, error) {
+	m := make(map[string]uint64)
+
+	scanner := bufio.NewScanner(r)
+	for scanner.Scan() {
+		stat := strings.Fields(scanner.Text())
+		if len(stat) < 2 {
+			continue
+		}
+		key, val := stat[0], stat[1]
+
+		// Expect stat name to contain "6" or be "ifIndex"
+		if strings.Contains(key, "6") || key == "ifIndex" {
+			v, err := strconv.ParseUint(val, 10, 64)
+			if err != nil {
+				return m, err
+			}
+
+			m[key] = v
+		}
+	}
+	return m, scanner.Err()
+}
diff --git a/vendor/github.com/prometheus/procfs/net_ip_socket.go b/vendor/github.com/prometheus/procfs/net_ip_socket.go
index b70f1fc7a4abaf19b5feaf0f4b540c43761946ef..19e3378f72d78d7b93d02412befa89d7b5ddc797 100644
--- a/vendor/github.com/prometheus/procfs/net_ip_socket.go
+++ b/vendor/github.com/prometheus/procfs/net_ip_socket.go
@@ -25,7 +25,7 @@ import (
 )
 
 const (
-	// readLimit is used by io.LimitReader while reading the content of the
+	// Maximum size limit used by io.LimitReader while reading the content of the
 	// /proc/net/udp{,6} files. The number of lines inside such a file is dynamic
 	// as each line represents a single used socket.
 	// In theory, the number of available sockets is 65535 (2^16 - 1) per IP.
@@ -50,12 +50,12 @@ type (
 		// UsedSockets shows the total number of parsed lines representing the
 		// number of used sockets.
 		UsedSockets uint64
-		// Drops shows the total number of dropped packets of all UPD sockets.
+		// Drops shows the total number of dropped packets of all UDP sockets.
 		Drops *uint64
 	}
 
-	// netIPSocketLine represents the fields parsed from a single line
-	// in /proc/net/{t,u}dp{,6}. Fields which are not used by IPSocket are skipped.
+	// A single line parser for fields from /proc/net/{t,u}dp{,6}.
+	// Fields which are not used by IPSocket are skipped.
 	// Drops is non-nil for udp{,6}, but nil for tcp{,6}.
 	// For the proc file format details, see https://linux.die.net/man/5/proc.
 	netIPSocketLine struct {
diff --git a/vendor/github.com/prometheus/procfs/net_tcp.go b/vendor/github.com/prometheus/procfs/net_tcp.go
index 52776295572d1ef6e581fd672f7a2a41f8589cd4..0396d72015c01a55e0139065e8f9a5cdfcab1776 100644
--- a/vendor/github.com/prometheus/procfs/net_tcp.go
+++ b/vendor/github.com/prometheus/procfs/net_tcp.go
@@ -25,24 +25,28 @@ type (
 
 // NetTCP returns the IPv4 kernel/networking statistics for TCP datagrams
 // read from /proc/net/tcp.
+// Deprecated: Use github.com/mdlayher/netlink#Conn (with syscall.AF_INET) instead.
 func (fs FS) NetTCP() (NetTCP, error) {
 	return newNetTCP(fs.proc.Path("net/tcp"))
 }
 
 // NetTCP6 returns the IPv6 kernel/networking statistics for TCP datagrams
 // read from /proc/net/tcp6.
+// Deprecated: Use github.com/mdlayher/netlink#Conn (with syscall.AF_INET6) instead.
 func (fs FS) NetTCP6() (NetTCP, error) {
 	return newNetTCP(fs.proc.Path("net/tcp6"))
 }
 
 // NetTCPSummary returns already computed statistics like the total queue lengths
 // for TCP datagrams read from /proc/net/tcp.
+// Deprecated: Use github.com/mdlayher/netlink#Conn (with syscall.AF_INET) instead.
 func (fs FS) NetTCPSummary() (*NetTCPSummary, error) {
 	return newNetTCPSummary(fs.proc.Path("net/tcp"))
 }
 
 // NetTCP6Summary returns already computed statistics like the total queue lengths
 // for TCP datagrams read from /proc/net/tcp6.
+// Deprecated: Use github.com/mdlayher/netlink#Conn (with syscall.AF_INET6) instead.
 func (fs FS) NetTCP6Summary() (*NetTCPSummary, error) {
 	return newNetTCPSummary(fs.proc.Path("net/tcp6"))
 }
diff --git a/vendor/github.com/prometheus/procfs/net_unix.go b/vendor/github.com/prometheus/procfs/net_unix.go
index d868cebdaae8a061a82557e477bdd2b5be49b7eb..d7e0cacb4c67012b5e910f750f3e8fd14b9057fd 100644
--- a/vendor/github.com/prometheus/procfs/net_unix.go
+++ b/vendor/github.com/prometheus/procfs/net_unix.go
@@ -121,12 +121,12 @@ func parseNetUNIX(r io.Reader) (*NetUNIX, error) {
 	return &nu, nil
 }
 
-func (u *NetUNIX) parseLine(line string, hasInode bool, min int) (*NetUNIXLine, error) {
+func (u *NetUNIX) parseLine(line string, hasInode bool, minFields int) (*NetUNIXLine, error) {
 	fields := strings.Fields(line)
 
 	l := len(fields)
-	if l < min {
-		return nil, fmt.Errorf("%w: expected at least %d fields but got %d", ErrFileParse, min, l)
+	if l < minFields {
+		return nil, fmt.Errorf("%w: expected at least %d fields but got %d", ErrFileParse, minFields, l)
 	}
 
 	// Field offsets are as follows:
@@ -172,7 +172,7 @@ func (u *NetUNIX) parseLine(line string, hasInode bool, min int) (*NetUNIXLine,
 	}
 
 	// Path field is optional.
-	if l > min {
+	if l > minFields {
 		// Path occurs at either index 6 or 7 depending on whether inode is
 		// already present.
 		pathIdx := 7
diff --git a/vendor/github.com/prometheus/procfs/proc_cgroup.go b/vendor/github.com/prometheus/procfs/proc_cgroup.go
index daeed7f571af3847e5cd2e896ac2909a67416c6c..4a64347c03a25d9bae8a910e484a3cf4eada8e2e 100644
--- a/vendor/github.com/prometheus/procfs/proc_cgroup.go
+++ b/vendor/github.com/prometheus/procfs/proc_cgroup.go
@@ -24,7 +24,7 @@ import (
 )
 
 // Cgroup models one line from /proc/[pid]/cgroup. Each Cgroup struct describes the placement of a PID inside a
-// specific control hierarchy. The kernel has two cgroup APIs, v1 and v2. v1 has one hierarchy per available resource
+// specific control hierarchy. The kernel has two cgroup APIs, v1 and v2. The v1 has one hierarchy per available resource
 // controller, while v2 has one unified hierarchy shared by all controllers. Regardless of v1 or v2, all hierarchies
 // contain all running processes, so the question answerable with a Cgroup struct is 'where is this process in
 // this hierarchy' (where==what path on the specific cgroupfs). By prefixing this path with the mount point of
diff --git a/vendor/github.com/prometheus/procfs/proc_io.go b/vendor/github.com/prometheus/procfs/proc_io.go
index 776f34971730d5d210b567df2e46fdb3f1b12d5d..d15b66ddb64ab82dbeaa243e19c2505659f59abb 100644
--- a/vendor/github.com/prometheus/procfs/proc_io.go
+++ b/vendor/github.com/prometheus/procfs/proc_io.go
@@ -50,7 +50,7 @@ func (p Proc) IO() (ProcIO, error) {
 
 	ioFormat := "rchar: %d\nwchar: %d\nsyscr: %d\nsyscw: %d\n" +
 		"read_bytes: %d\nwrite_bytes: %d\n" +
-		"cancelled_write_bytes: %d\n"
+		"cancelled_write_bytes: %d\n" //nolint:misspell
 
 	_, err = fmt.Sscanf(string(data), ioFormat, &pio.RChar, &pio.WChar, &pio.SyscR,
 		&pio.SyscW, &pio.ReadBytes, &pio.WriteBytes, &pio.CancelledWriteBytes)
diff --git a/vendor/github.com/prometheus/procfs/proc_smaps.go b/vendor/github.com/prometheus/procfs/proc_smaps.go
index 09060e820803fb1cf237be6050d966a3f667a204..9a297afcf89e6989d6ea2d237cd5172011c7d7e3 100644
--- a/vendor/github.com/prometheus/procfs/proc_smaps.go
+++ b/vendor/github.com/prometheus/procfs/proc_smaps.go
@@ -19,7 +19,6 @@ package procfs
 import (
 	"bufio"
 	"errors"
-	"fmt"
 	"os"
 	"regexp"
 	"strconv"
@@ -29,7 +28,7 @@ import (
 )
 
 var (
-	// match the header line before each mapped zone in `/proc/pid/smaps`.
+	// Match the header line before each mapped zone in `/proc/pid/smaps`.
 	procSMapsHeaderLine = regexp.MustCompile(`^[a-f0-9].*$`)
 )
 
@@ -117,7 +116,6 @@ func (p Proc) procSMapsRollupManual() (ProcSMapsRollup, error) {
 func (s *ProcSMapsRollup) parseLine(line string) error {
 	kv := strings.SplitN(line, ":", 2)
 	if len(kv) != 2 {
-		fmt.Println(line)
 		return errors.New("invalid net/dev line, missing colon")
 	}
 
diff --git a/vendor/github.com/prometheus/procfs/proc_status.go b/vendor/github.com/prometheus/procfs/proc_status.go
index a055197c63e81f5b4178e54b4ed984cf14cc9d1e..dd8aa56885ec3d340b030e5957ae9d2dd573f78a 100644
--- a/vendor/github.com/prometheus/procfs/proc_status.go
+++ b/vendor/github.com/prometheus/procfs/proc_status.go
@@ -146,7 +146,11 @@ func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintByt
 			}
 		}
 	case "NSpid":
-		s.NSpids = calcNSPidsList(vString)
+		nspids, err := calcNSPidsList(vString)
+		if err != nil {
+			return err
+		}
+		s.NSpids = nspids
 	case "VmPeak":
 		s.VmPeak = vUintBytes
 	case "VmSize":
@@ -222,17 +226,17 @@ func calcCpusAllowedList(cpuString string) []uint64 {
 	return g
 }
 
-func calcNSPidsList(nspidsString string) []uint64 {
-	s := strings.Split(nspidsString, " ")
+func calcNSPidsList(nspidsString string) ([]uint64, error) {
+	s := strings.Split(nspidsString, "\t")
 	var nspids []uint64
 
 	for _, nspid := range s {
-		nspid, _ := strconv.ParseUint(nspid, 10, 64)
-		if nspid == 0 {
-			continue
+		nspid, err := strconv.ParseUint(nspid, 10, 64)
+		if err != nil {
+			return nil, err
 		}
 		nspids = append(nspids, nspid)
 	}
 
-	return nspids
+	return nspids, nil
 }
diff --git a/vendor/github.com/prometheus/prometheus/NOTICE b/vendor/github.com/prometheus/prometheus/NOTICE
index 5e4f509896b864eadb9cb24aa9dc07bf6c9dbb70..8605c258e32df9d6673152fbcb7bf8409bfd4c49 100644
--- a/vendor/github.com/prometheus/prometheus/NOTICE
+++ b/vendor/github.com/prometheus/prometheus/NOTICE
@@ -91,8 +91,18 @@ https://github.com/dgryski/go-tsz
 Copyright (c) 2015,2016 Damian Gryski <damian@gryski.com>
 See https://github.com/dgryski/go-tsz/blob/master/LICENSE for license details.
 
+The Go programming language
+https://go.dev/
+Copyright (c) 2009 The Go Authors
+See https://go.dev/LICENSE for license details.
+
+The Codicon icon font from Microsoft
+https://github.com/microsoft/vscode-codicons
+Copyright (c) Microsoft Corporation and other contributors
+See https://github.com/microsoft/vscode-codicons/blob/main/LICENSE for license details.
+
 We also use code from a large number of npm packages. For details, see:
-- https://github.com/prometheus/prometheus/blob/master/web/ui/react-app/package.json
-- https://github.com/prometheus/prometheus/blob/master/web/ui/react-app/package-lock.json
+- https://github.com/prometheus/prometheus/blob/main/web/ui/react-app/package.json
+- https://github.com/prometheus/prometheus/blob/main/web/ui/react-app/package-lock.json
 - The individual package licenses as copied from the node_modules directory can be found in
   the npm_licenses.tar.bz2 archive in release tarballs and Docker images.
diff --git a/vendor/github.com/prometheus/prometheus/model/exemplar/exemplar.go b/vendor/github.com/prometheus/prometheus/model/exemplar/exemplar.go
new file mode 100644
index 0000000000000000000000000000000000000000..d03940f1b29062c5387b8fb975a935d840192a43
--- /dev/null
+++ b/vendor/github.com/prometheus/prometheus/model/exemplar/exemplar.go
@@ -0,0 +1,67 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package exemplar
+
+import "github.com/prometheus/prometheus/model/labels"
+
+// ExemplarMaxLabelSetLength is defined by OpenMetrics: "The combined length of
+// the label names and values of an Exemplar's LabelSet MUST NOT exceed 128
+// UTF-8 characters."
+// https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#exemplars
+const ExemplarMaxLabelSetLength = 128
+
+// Exemplar is additional information associated with a time series.
+type Exemplar struct {
+	Labels labels.Labels `json:"labels"`
+	Value  float64       `json:"value"`
+	Ts     int64         `json:"timestamp"`
+	HasTs  bool
+}
+
+type QueryResult struct {
+	SeriesLabels labels.Labels `json:"seriesLabels"`
+	Exemplars    []Exemplar    `json:"exemplars"`
+}
+
+// Equals compares if the exemplar e is the same as e2. Note that if HasTs is false for
+// both exemplars then the timestamps will be ignored for the comparison. This can come up
+// when an exemplar is exported without it's own timestamp, in which case the scrape timestamp
+// is assigned to the Ts field. However we still want to treat the same exemplar, scraped without
+// an exported timestamp, as a duplicate of itself for each subsequent scrape.
+func (e Exemplar) Equals(e2 Exemplar) bool {
+	if !labels.Equal(e.Labels, e2.Labels) {
+		return false
+	}
+
+	if (e.HasTs || e2.HasTs) && e.Ts != e2.Ts {
+		return false
+	}
+
+	return e.Value == e2.Value
+}
+
+// Compare first timestamps, then values, then labels.
+func Compare(a, b Exemplar) int {
+	if a.Ts < b.Ts {
+		return -1
+	} else if a.Ts > b.Ts {
+		return 1
+	}
+	if a.Value < b.Value {
+		return -1
+	} else if a.Value > b.Value {
+		return 1
+	}
+	return labels.Compare(a.Labels, b.Labels)
+}
diff --git a/vendor/github.com/prometheus/prometheus/model/histogram/float_histogram.go b/vendor/github.com/prometheus/prometheus/model/histogram/float_histogram.go
new file mode 100644
index 0000000000000000000000000000000000000000..e5519a56d65c744df4eeaefa4b3d7f9bb4b992bf
--- /dev/null
+++ b/vendor/github.com/prometheus/prometheus/model/histogram/float_histogram.go
@@ -0,0 +1,1360 @@
+// Copyright 2021 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package histogram
+
+import (
+	"errors"
+	"fmt"
+	"math"
+	"strings"
+)
+
+// FloatHistogram is similar to Histogram but uses float64 for all
+// counts. Additionally, bucket counts are absolute and not deltas.
+//
+// A FloatHistogram is needed by PromQL to handle operations that might result
+// in fractional counts. Since the counts in a histogram are unlikely to be too
+// large to be represented precisely by a float64, a FloatHistogram can also be
+// used to represent a histogram with integer counts and thus serves as a more
+// generalized representation.
+type FloatHistogram struct {
+	// Counter reset information.
+	CounterResetHint CounterResetHint
+	// Currently valid schema numbers are -4 <= n <= 8 for exponential buckets.
+	// They are all for base-2 bucket schemas, where 1 is a bucket boundary in
+	// each case, and then each power of two is divided into 2^n logarithmic buckets.
+	// Or in other words, each bucket boundary is the previous boundary times
+	// 2^(2^-n). Another valid schema number is -53 for custom buckets, defined by
+	// the CustomValues field.
+	Schema int32
+	// Width of the zero bucket.
+	ZeroThreshold float64
+	// Observations falling into the zero bucket. Must be zero or positive.
+	ZeroCount float64
+	// Total number of observations. Must be zero or positive.
+	Count float64
+	// Sum of observations. This is also used as the stale marker.
+	Sum float64
+	// Spans for positive and negative buckets (see Span below).
+	PositiveSpans, NegativeSpans []Span
+	// Observation counts in buckets. Each represents an absolute count and
+	// must be zero or positive.
+	PositiveBuckets, NegativeBuckets []float64
+	// Holds the custom (usually upper) bounds for bucket definitions, otherwise nil.
+	// This slice is interned, to be treated as immutable and copied by reference.
+	// These numbers should be strictly increasing. This field is only used when the
+	// schema is for custom buckets, and the ZeroThreshold, ZeroCount, NegativeSpans
+	// and NegativeBuckets fields are not used in that case.
+	CustomValues []float64
+}
+
+func (h *FloatHistogram) UsesCustomBuckets() bool {
+	return IsCustomBucketsSchema(h.Schema)
+}
+
+// Copy returns a deep copy of the Histogram.
+func (h *FloatHistogram) Copy() *FloatHistogram {
+	c := FloatHistogram{
+		CounterResetHint: h.CounterResetHint,
+		Schema:           h.Schema,
+		Count:            h.Count,
+		Sum:              h.Sum,
+	}
+
+	if h.UsesCustomBuckets() {
+		if len(h.CustomValues) != 0 {
+			c.CustomValues = make([]float64, len(h.CustomValues))
+			copy(c.CustomValues, h.CustomValues)
+		}
+	} else {
+		c.ZeroThreshold = h.ZeroThreshold
+		c.ZeroCount = h.ZeroCount
+
+		if len(h.NegativeSpans) != 0 {
+			c.NegativeSpans = make([]Span, len(h.NegativeSpans))
+			copy(c.NegativeSpans, h.NegativeSpans)
+		}
+		if len(h.NegativeBuckets) != 0 {
+			c.NegativeBuckets = make([]float64, len(h.NegativeBuckets))
+			copy(c.NegativeBuckets, h.NegativeBuckets)
+		}
+	}
+
+	if len(h.PositiveSpans) != 0 {
+		c.PositiveSpans = make([]Span, len(h.PositiveSpans))
+		copy(c.PositiveSpans, h.PositiveSpans)
+	}
+	if len(h.PositiveBuckets) != 0 {
+		c.PositiveBuckets = make([]float64, len(h.PositiveBuckets))
+		copy(c.PositiveBuckets, h.PositiveBuckets)
+	}
+
+	return &c
+}
+
+// CopyTo makes a deep copy into the given FloatHistogram.
+// The destination object has to be a non-nil pointer.
+func (h *FloatHistogram) CopyTo(to *FloatHistogram) {
+	to.CounterResetHint = h.CounterResetHint
+	to.Schema = h.Schema
+	to.Count = h.Count
+	to.Sum = h.Sum
+
+	if h.UsesCustomBuckets() {
+		to.ZeroThreshold = 0
+		to.ZeroCount = 0
+
+		to.NegativeSpans = clearIfNotNil(to.NegativeSpans)
+		to.NegativeBuckets = clearIfNotNil(to.NegativeBuckets)
+
+		to.CustomValues = resize(to.CustomValues, len(h.CustomValues))
+		copy(to.CustomValues, h.CustomValues)
+	} else {
+		to.ZeroThreshold = h.ZeroThreshold
+		to.ZeroCount = h.ZeroCount
+
+		to.NegativeSpans = resize(to.NegativeSpans, len(h.NegativeSpans))
+		copy(to.NegativeSpans, h.NegativeSpans)
+
+		to.NegativeBuckets = resize(to.NegativeBuckets, len(h.NegativeBuckets))
+		copy(to.NegativeBuckets, h.NegativeBuckets)
+
+		to.CustomValues = clearIfNotNil(to.CustomValues)
+	}
+
+	to.PositiveSpans = resize(to.PositiveSpans, len(h.PositiveSpans))
+	copy(to.PositiveSpans, h.PositiveSpans)
+
+	to.PositiveBuckets = resize(to.PositiveBuckets, len(h.PositiveBuckets))
+	copy(to.PositiveBuckets, h.PositiveBuckets)
+}
+
+// CopyToSchema works like Copy, but the returned deep copy has the provided
+// target schema, which must be ≤ the original schema (i.e. it must have a lower
+// resolution). This method panics if a custom buckets schema is used in the
+// receiving FloatHistogram or as the provided targetSchema.
+func (h *FloatHistogram) CopyToSchema(targetSchema int32) *FloatHistogram {
+	if targetSchema == h.Schema {
+		// Fast path.
+		return h.Copy()
+	}
+	if h.UsesCustomBuckets() {
+		panic(fmt.Errorf("cannot reduce resolution to %d when there are custom buckets", targetSchema))
+	}
+	if IsCustomBucketsSchema(targetSchema) {
+		panic("cannot reduce resolution to custom buckets schema")
+	}
+	if targetSchema > h.Schema {
+		panic(fmt.Errorf("cannot copy from schema %d to %d", h.Schema, targetSchema))
+	}
+	c := FloatHistogram{
+		Schema:        targetSchema,
+		ZeroThreshold: h.ZeroThreshold,
+		ZeroCount:     h.ZeroCount,
+		Count:         h.Count,
+		Sum:           h.Sum,
+	}
+
+	c.PositiveSpans, c.PositiveBuckets = reduceResolution(h.PositiveSpans, h.PositiveBuckets, h.Schema, targetSchema, false, false)
+	c.NegativeSpans, c.NegativeBuckets = reduceResolution(h.NegativeSpans, h.NegativeBuckets, h.Schema, targetSchema, false, false)
+
+	return &c
+}
+
+// String returns a string representation of the Histogram.
+func (h *FloatHistogram) String() string {
+	var sb strings.Builder
+	fmt.Fprintf(&sb, "{count:%g, sum:%g", h.Count, h.Sum)
+
+	var nBuckets []Bucket[float64]
+	for it := h.NegativeBucketIterator(); it.Next(); {
+		bucket := it.At()
+		if bucket.Count != 0 {
+			nBuckets = append(nBuckets, it.At())
+		}
+	}
+	for i := len(nBuckets) - 1; i >= 0; i-- {
+		fmt.Fprintf(&sb, ", %s", nBuckets[i].String())
+	}
+
+	if h.ZeroCount != 0 {
+		fmt.Fprintf(&sb, ", %s", h.ZeroBucket().String())
+	}
+
+	for it := h.PositiveBucketIterator(); it.Next(); {
+		bucket := it.At()
+		if bucket.Count != 0 {
+			fmt.Fprintf(&sb, ", %s", bucket.String())
+		}
+	}
+
+	sb.WriteRune('}')
+	return sb.String()
+}
+
+// TestExpression returns the string representation of this histogram as it is used in the internal PromQL testing
+// framework as well as in promtool rules unit tests.
+// The syntax is described in https://prometheus.io/docs/prometheus/latest/configuration/unit_testing_rules/#series
+func (h *FloatHistogram) TestExpression() string {
+	var res []string
+	m := h.Copy()
+
+	m.Compact(math.MaxInt) // Compact to reduce the number of positive and negative spans to 1.
+
+	if m.Schema != 0 {
+		res = append(res, fmt.Sprintf("schema:%d", m.Schema))
+	}
+	if m.Count != 0 {
+		res = append(res, fmt.Sprintf("count:%g", m.Count))
+	}
+	if m.Sum != 0 {
+		res = append(res, fmt.Sprintf("sum:%g", m.Sum))
+	}
+	if m.ZeroCount != 0 {
+		res = append(res, fmt.Sprintf("z_bucket:%g", m.ZeroCount))
+	}
+	if m.ZeroThreshold != 0 {
+		res = append(res, fmt.Sprintf("z_bucket_w:%g", m.ZeroThreshold))
+	}
+	if m.UsesCustomBuckets() {
+		res = append(res, fmt.Sprintf("custom_values:%g", m.CustomValues))
+	}
+
+	switch m.CounterResetHint {
+	case UnknownCounterReset:
+		// Unknown is the default, don't add anything.
+	case CounterReset:
+		res = append(res, "counter_reset_hint:reset")
+	case NotCounterReset:
+		res = append(res, "counter_reset_hint:not_reset")
+	case GaugeType:
+		res = append(res, "counter_reset_hint:gauge")
+	}
+
+	addBuckets := func(kind, bucketsKey, offsetKey string, buckets []float64, spans []Span) []string {
+		if len(spans) > 1 {
+			panic(fmt.Sprintf("histogram with multiple %s spans not supported", kind))
+		}
+		for _, span := range spans {
+			if span.Offset != 0 {
+				res = append(res, fmt.Sprintf("%s:%d", offsetKey, span.Offset))
+			}
+		}
+
+		var bucketStr []string
+		for _, bucket := range buckets {
+			bucketStr = append(bucketStr, fmt.Sprintf("%g", bucket))
+		}
+		if len(bucketStr) > 0 {
+			res = append(res, fmt.Sprintf("%s:[%s]", bucketsKey, strings.Join(bucketStr, " ")))
+		}
+		return res
+	}
+	res = addBuckets("positive", "buckets", "offset", m.PositiveBuckets, m.PositiveSpans)
+	res = addBuckets("negative", "n_buckets", "n_offset", m.NegativeBuckets, m.NegativeSpans)
+	return "{{" + strings.Join(res, " ") + "}}"
+}
+
+// ZeroBucket returns the zero bucket. This method panics if the schema is for custom buckets.
+func (h *FloatHistogram) ZeroBucket() Bucket[float64] {
+	if h.UsesCustomBuckets() {
+		panic("histograms with custom buckets have no zero bucket")
+	}
+	return Bucket[float64]{
+		Lower:          -h.ZeroThreshold,
+		Upper:          h.ZeroThreshold,
+		LowerInclusive: true,
+		UpperInclusive: true,
+		Count:          h.ZeroCount,
+		// Index is irrelevant for the zero bucket.
+	}
+}
+
+// Mul multiplies the FloatHistogram by the provided factor, i.e. it scales all
+// bucket counts including the zero bucket and the count and the sum of
+// observations. The bucket layout stays the same. This method changes the
+// receiving histogram directly (rather than acting on a copy). It returns a
+// pointer to the receiving histogram for convenience.
+func (h *FloatHistogram) Mul(factor float64) *FloatHistogram {
+	h.ZeroCount *= factor
+	h.Count *= factor
+	h.Sum *= factor
+	for i := range h.PositiveBuckets {
+		h.PositiveBuckets[i] *= factor
+	}
+	for i := range h.NegativeBuckets {
+		h.NegativeBuckets[i] *= factor
+	}
+	return h
+}
+
+// Div works like Mul but divides instead of multiplies.
+// When dividing by 0, everything will be set to Inf.
+func (h *FloatHistogram) Div(scalar float64) *FloatHistogram {
+	h.ZeroCount /= scalar
+	h.Count /= scalar
+	h.Sum /= scalar
+	// Division by zero removes all buckets.
+	if scalar == 0 {
+		h.PositiveBuckets = nil
+		h.NegativeBuckets = nil
+		h.PositiveSpans = nil
+		h.NegativeSpans = nil
+		return h
+	}
+	for i := range h.PositiveBuckets {
+		h.PositiveBuckets[i] /= scalar
+	}
+	for i := range h.NegativeBuckets {
+		h.NegativeBuckets[i] /= scalar
+	}
+	return h
+}
+
+// Add adds the provided other histogram to the receiving histogram. Count, Sum,
+// and buckets from the other histogram are added to the corresponding
+// components of the receiving histogram. Buckets in the other histogram that do
+// not exist in the receiving histogram are inserted into the latter. The
+// resulting histogram might have buckets with a population of zero or directly
+// adjacent spans (offset=0). To normalize those, call the Compact method.
+//
+// The method reconciles differences in the zero threshold and in the schema, and
+// changes them if needed. The other histogram will not be modified in any case.
+// Adding is currently only supported between 2 exponential histograms, or between
+// 2 custom buckets histograms with the exact same custom bounds.
+//
+// This method returns a pointer to the receiving histogram for convenience.
+func (h *FloatHistogram) Add(other *FloatHistogram) (*FloatHistogram, error) {
+	if h.UsesCustomBuckets() != other.UsesCustomBuckets() {
+		return nil, ErrHistogramsIncompatibleSchema
+	}
+	if h.UsesCustomBuckets() && !FloatBucketsMatch(h.CustomValues, other.CustomValues) {
+		return nil, ErrHistogramsIncompatibleBounds
+	}
+
+	switch {
+	case other.CounterResetHint == h.CounterResetHint:
+		// Adding apples to apples, all good. No need to change anything.
+	case h.CounterResetHint == GaugeType:
+		// Adding something else to a gauge. That's probably OK. Outcome is a gauge.
+		// Nothing to do since the receiver is already marked as gauge.
+	case other.CounterResetHint == GaugeType:
+		// Similar to before, but this time the receiver is "something else" and we have to change it to gauge.
+		h.CounterResetHint = GaugeType
+	case h.CounterResetHint == UnknownCounterReset:
+		// With the receiver's CounterResetHint being "unknown", this could still be legitimate
+		// if the caller knows what they are doing. Outcome is then again "unknown".
+		// No need to do anything since the receiver's CounterResetHint is already "unknown".
+	case other.CounterResetHint == UnknownCounterReset:
+		// Similar to before, but now we have to set the receiver's CounterResetHint to "unknown".
+		h.CounterResetHint = UnknownCounterReset
+	default:
+		// All other cases shouldn't actually happen.
+		// They are a direct collision of CounterReset and NotCounterReset.
+		// Conservatively set the CounterResetHint to "unknown" and issue a warning.
+		h.CounterResetHint = UnknownCounterReset
+		// TODO(trevorwhitney): Actually issue the warning as soon as the plumbing for it is in place
+	}
+
+	if !h.UsesCustomBuckets() {
+		otherZeroCount := h.reconcileZeroBuckets(other)
+		h.ZeroCount += otherZeroCount
+	}
+	h.Count += other.Count
+	h.Sum += other.Sum
+
+	var (
+		hPositiveSpans       = h.PositiveSpans
+		hPositiveBuckets     = h.PositiveBuckets
+		otherPositiveSpans   = other.PositiveSpans
+		otherPositiveBuckets = other.PositiveBuckets
+	)
+
+	if h.UsesCustomBuckets() {
+		h.PositiveSpans, h.PositiveBuckets = addBuckets(h.Schema, h.ZeroThreshold, false, hPositiveSpans, hPositiveBuckets, otherPositiveSpans, otherPositiveBuckets)
+		return h, nil
+	}
+
+	var (
+		hNegativeSpans       = h.NegativeSpans
+		hNegativeBuckets     = h.NegativeBuckets
+		otherNegativeSpans   = other.NegativeSpans
+		otherNegativeBuckets = other.NegativeBuckets
+	)
+
+	switch {
+	case other.Schema < h.Schema:
+		hPositiveSpans, hPositiveBuckets = reduceResolution(hPositiveSpans, hPositiveBuckets, h.Schema, other.Schema, false, true)
+		hNegativeSpans, hNegativeBuckets = reduceResolution(hNegativeSpans, hNegativeBuckets, h.Schema, other.Schema, false, true)
+		h.Schema = other.Schema
+
+	case other.Schema > h.Schema:
+		otherPositiveSpans, otherPositiveBuckets = reduceResolution(otherPositiveSpans, otherPositiveBuckets, other.Schema, h.Schema, false, false)
+		otherNegativeSpans, otherNegativeBuckets = reduceResolution(otherNegativeSpans, otherNegativeBuckets, other.Schema, h.Schema, false, false)
+	}
+
+	h.PositiveSpans, h.PositiveBuckets = addBuckets(h.Schema, h.ZeroThreshold, false, hPositiveSpans, hPositiveBuckets, otherPositiveSpans, otherPositiveBuckets)
+	h.NegativeSpans, h.NegativeBuckets = addBuckets(h.Schema, h.ZeroThreshold, false, hNegativeSpans, hNegativeBuckets, otherNegativeSpans, otherNegativeBuckets)
+
+	return h, nil
+}
+
+// Sub works like Add but subtracts the other histogram.
+func (h *FloatHistogram) Sub(other *FloatHistogram) (*FloatHistogram, error) {
+	if h.UsesCustomBuckets() != other.UsesCustomBuckets() {
+		return nil, ErrHistogramsIncompatibleSchema
+	}
+	if h.UsesCustomBuckets() && !FloatBucketsMatch(h.CustomValues, other.CustomValues) {
+		return nil, ErrHistogramsIncompatibleBounds
+	}
+
+	if !h.UsesCustomBuckets() {
+		otherZeroCount := h.reconcileZeroBuckets(other)
+		h.ZeroCount -= otherZeroCount
+	}
+	h.Count -= other.Count
+	h.Sum -= other.Sum
+
+	var (
+		hPositiveSpans       = h.PositiveSpans
+		hPositiveBuckets     = h.PositiveBuckets
+		otherPositiveSpans   = other.PositiveSpans
+		otherPositiveBuckets = other.PositiveBuckets
+	)
+
+	if h.UsesCustomBuckets() {
+		h.PositiveSpans, h.PositiveBuckets = addBuckets(h.Schema, h.ZeroThreshold, true, hPositiveSpans, hPositiveBuckets, otherPositiveSpans, otherPositiveBuckets)
+		return h, nil
+	}
+
+	var (
+		hNegativeSpans       = h.NegativeSpans
+		hNegativeBuckets     = h.NegativeBuckets
+		otherNegativeSpans   = other.NegativeSpans
+		otherNegativeBuckets = other.NegativeBuckets
+	)
+
+	switch {
+	case other.Schema < h.Schema:
+		hPositiveSpans, hPositiveBuckets = reduceResolution(hPositiveSpans, hPositiveBuckets, h.Schema, other.Schema, false, true)
+		hNegativeSpans, hNegativeBuckets = reduceResolution(hNegativeSpans, hNegativeBuckets, h.Schema, other.Schema, false, true)
+		h.Schema = other.Schema
+	case other.Schema > h.Schema:
+		otherPositiveSpans, otherPositiveBuckets = reduceResolution(otherPositiveSpans, otherPositiveBuckets, other.Schema, h.Schema, false, false)
+		otherNegativeSpans, otherNegativeBuckets = reduceResolution(otherNegativeSpans, otherNegativeBuckets, other.Schema, h.Schema, false, false)
+	}
+
+	h.PositiveSpans, h.PositiveBuckets = addBuckets(h.Schema, h.ZeroThreshold, true, hPositiveSpans, hPositiveBuckets, otherPositiveSpans, otherPositiveBuckets)
+	h.NegativeSpans, h.NegativeBuckets = addBuckets(h.Schema, h.ZeroThreshold, true, hNegativeSpans, hNegativeBuckets, otherNegativeSpans, otherNegativeBuckets)
+
+	return h, nil
+}
+
+// Equals returns true if the given float histogram matches exactly.
+// Exact match is when there are no new buckets (even empty) and no missing buckets,
+// and all the bucket values match. Spans can have different empty length spans in between,
+// but they must represent the same bucket layout to match.
+// Sum, Count, ZeroCount and bucket values are compared based on their bit patterns
+// because this method is about data equality rather than mathematical equality.
+// We ignore fields that are not used based on the exponential / custom buckets schema,
+// but check fields where differences may cause unintended behaviour even if they are not
+// supposed to be used according to the schema.
+func (h *FloatHistogram) Equals(h2 *FloatHistogram) bool {
+	if h2 == nil {
+		return false
+	}
+
+	if h.Schema != h2.Schema ||
+		math.Float64bits(h.Count) != math.Float64bits(h2.Count) ||
+		math.Float64bits(h.Sum) != math.Float64bits(h2.Sum) {
+		return false
+	}
+
+	if h.UsesCustomBuckets() {
+		if !FloatBucketsMatch(h.CustomValues, h2.CustomValues) {
+			return false
+		}
+	}
+
+	if h.ZeroThreshold != h2.ZeroThreshold ||
+		math.Float64bits(h.ZeroCount) != math.Float64bits(h2.ZeroCount) {
+		return false
+	}
+
+	if !spansMatch(h.NegativeSpans, h2.NegativeSpans) {
+		return false
+	}
+	if !FloatBucketsMatch(h.NegativeBuckets, h2.NegativeBuckets) {
+		return false
+	}
+
+	if !spansMatch(h.PositiveSpans, h2.PositiveSpans) {
+		return false
+	}
+	if !FloatBucketsMatch(h.PositiveBuckets, h2.PositiveBuckets) {
+		return false
+	}
+
+	return true
+}
+
+// Size returns the total size of the FloatHistogram, which includes the size of the pointer
+// to FloatHistogram, all its fields, and all elements contained in slices.
+// NOTE: this is only valid for 64 bit architectures.
+func (h *FloatHistogram) Size() int {
+	// Size of each slice separately.
+	posSpanSize := len(h.PositiveSpans) * 8     // 8 bytes (int32 + uint32).
+	negSpanSize := len(h.NegativeSpans) * 8     // 8 bytes (int32 + uint32).
+	posBucketSize := len(h.PositiveBuckets) * 8 // 8 bytes (float64).
+	negBucketSize := len(h.NegativeBuckets) * 8 // 8 bytes (float64).
+	customBoundSize := len(h.CustomValues) * 8  // 8 bytes (float64).
+
+	// Total size of the struct.
+
+	// fh is 8 bytes.
+	// fh.CounterResetHint is 4 bytes (1 byte bool + 3 bytes padding).
+	// fh.Schema is 4 bytes.
+	// fh.ZeroThreshold is 8 bytes.
+	// fh.ZeroCount is 8 bytes.
+	// fh.Count is 8 bytes.
+	// fh.Sum is 8 bytes.
+	// fh.PositiveSpans is 24 bytes.
+	// fh.NegativeSpans is 24 bytes.
+	// fh.PositiveBuckets is 24 bytes.
+	// fh.NegativeBuckets is 24 bytes.
+	// fh.CustomValues is 24 bytes.
+	structSize := 168
+
+	return structSize + posSpanSize + negSpanSize + posBucketSize + negBucketSize + customBoundSize
+}
+
+// Compact eliminates empty buckets at the beginning and end of each span, then
+// merges spans that are consecutive or at most maxEmptyBuckets apart, and
+// finally splits spans that contain more consecutive empty buckets than
+// maxEmptyBuckets. (The actual implementation might do something more efficient
+// but with the same result.)  The compaction happens "in place" in the
+// receiving histogram, but a pointer to it is returned for convenience.
+//
+// The ideal value for maxEmptyBuckets depends on circumstances. The motivation
+// to set maxEmptyBuckets > 0 is the assumption that is less overhead to
+// represent very few empty buckets explicitly within one span than cutting the
+// one span into two to treat the empty buckets as a gap between the two spans,
+// both in terms of storage requirement as well as in terms of encoding and
+// decoding effort. However, the tradeoffs are subtle. For one, they are
+// different in the exposition format vs. in a TSDB chunk vs. for the in-memory
+// representation as Go types. In the TSDB, as an additional aspects, the span
+// layout is only stored once per chunk, while many histograms with that same
+// chunk layout are then only stored with their buckets (so that even a single
+// empty bucket will be stored many times).
+//
+// For the Go types, an additional Span takes 8 bytes. Similarly, an additional
+// bucket takes 8 bytes. Therefore, with a single separating empty bucket, both
+// options have the same storage requirement, but the single-span solution is
+// easier to iterate through. Still, the safest bet is to use maxEmptyBuckets==0
+// and only use a larger number if you know what you are doing.
+func (h *FloatHistogram) Compact(maxEmptyBuckets int) *FloatHistogram {
+	h.PositiveBuckets, h.PositiveSpans = compactBuckets(
+		h.PositiveBuckets, h.PositiveSpans, maxEmptyBuckets, false,
+	)
+	h.NegativeBuckets, h.NegativeSpans = compactBuckets(
+		h.NegativeBuckets, h.NegativeSpans, maxEmptyBuckets, false,
+	)
+	return h
+}
+
+// DetectReset returns true if the receiving histogram is missing any buckets
+// that have a non-zero population in the provided previous histogram. It also
+// returns true if any count (in any bucket, in the zero count, or in the count
+// of observations, but NOT the sum of observations) is smaller in the receiving
+// histogram compared to the previous histogram. Otherwise, it returns false.
+//
+// This method will shortcut to true if a CounterReset is detected, and shortcut
+// to false if NotCounterReset is detected. Otherwise it will do the work to detect
+// a reset.
+//
+// Special behavior in case the Schema or the ZeroThreshold are not the same in
+// both histograms:
+//
+//   - A decrease of the ZeroThreshold or an increase of the Schema (i.e. an
+//     increase of resolution) can only happen together with a reset. Thus, the
+//     method returns true in either case.
+//
+//   - Upon an increase of the ZeroThreshold, the buckets in the previous
+//     histogram that fall within the new ZeroThreshold are added to the ZeroCount
+//     of the previous histogram (without mutating the provided previous
+//     histogram). The scenario that a populated bucket of the previous histogram
+//     is partially within, partially outside of the new ZeroThreshold, can only
+//     happen together with a counter reset and therefore shortcuts to returning
+//     true.
+//
+//   - Upon a decrease of the Schema, the buckets of the previous histogram are
+//     merged so that they match the new, lower-resolution schema (again without
+//     mutating the provided previous histogram).
+func (h *FloatHistogram) DetectReset(previous *FloatHistogram) bool {
+	if h.CounterResetHint == CounterReset {
+		return true
+	}
+	if h.CounterResetHint == NotCounterReset {
+		return false
+	}
+	// In all other cases of CounterResetHint (UnknownCounterReset and GaugeType),
+	// we go on as we would otherwise, for reasons explained below.
+	//
+	// If the CounterResetHint is UnknownCounterReset, we do not know yet if this histogram comes
+	// with a counter reset. Therefore, we have to do all the detailed work to find out if there
+	// is a counter reset or not.
+	// We do the same if the CounterResetHint is GaugeType, which should not happen, but PromQL still
+	// allows the user to apply functions to gauge histograms that are only meant for counter histograms.
+	// In this case, we treat the gauge histograms as counter histograms. A warning should be returned
+	// to the user in this case.
+	if h.Count < previous.Count {
+		return true
+	}
+	if h.UsesCustomBuckets() != previous.UsesCustomBuckets() || (h.UsesCustomBuckets() && !FloatBucketsMatch(h.CustomValues, previous.CustomValues)) {
+		// Mark that something has changed or that the application has been restarted. However, this does
+		// not matter so much since the change in schema will be handled directly in the chunks and PromQL
+		// functions.
+		return true
+	}
+	if h.Schema > previous.Schema {
+		return true
+	}
+	if h.ZeroThreshold < previous.ZeroThreshold {
+		// ZeroThreshold decreased.
+		return true
+	}
+	previousZeroCount, newThreshold := previous.zeroCountForLargerThreshold(h.ZeroThreshold)
+	if newThreshold != h.ZeroThreshold {
+		// ZeroThreshold is within a populated bucket in previous
+		// histogram.
+		return true
+	}
+	if h.ZeroCount < previousZeroCount {
+		return true
+	}
+	currIt := h.floatBucketIterator(true, h.ZeroThreshold, h.Schema)
+	prevIt := previous.floatBucketIterator(true, h.ZeroThreshold, h.Schema)
+	if detectReset(&currIt, &prevIt) {
+		return true
+	}
+	currIt = h.floatBucketIterator(false, h.ZeroThreshold, h.Schema)
+	prevIt = previous.floatBucketIterator(false, h.ZeroThreshold, h.Schema)
+	return detectReset(&currIt, &prevIt)
+}
+
+func detectReset(currIt, prevIt *floatBucketIterator) bool {
+	if !prevIt.Next() {
+		return false // If no buckets in previous histogram, nothing can be reset.
+	}
+	prevBucket := prevIt.strippedAt()
+	if !currIt.Next() {
+		// No bucket in current, but at least one in previous
+		// histogram. Check if any of those are non-zero, in which case
+		// this is a reset.
+		for {
+			if prevBucket.count != 0 {
+				return true
+			}
+			if !prevIt.Next() {
+				return false
+			}
+		}
+	}
+	currBucket := currIt.strippedAt()
+	for {
+		// Forward currIt until we find the bucket corresponding to prevBucket.
+		for currBucket.index < prevBucket.index {
+			if !currIt.Next() {
+				// Reached end of currIt early, therefore
+				// previous histogram has a bucket that the
+				// current one does not have. Unless all
+				// remaining buckets in the previous histogram
+				// are unpopulated, this is a reset.
+				for {
+					if prevBucket.count != 0 {
+						return true
+					}
+					if !prevIt.Next() {
+						return false
+					}
+				}
+			}
+			currBucket = currIt.strippedAt()
+		}
+		if currBucket.index > prevBucket.index {
+			// Previous histogram has a bucket the current one does
+			// not have. If it's populated, it's a reset.
+			if prevBucket.count != 0 {
+				return true
+			}
+		} else {
+			// We have reached corresponding buckets in both iterators.
+			// We can finally compare the counts.
+			if currBucket.count < prevBucket.count {
+				return true
+			}
+		}
+		if !prevIt.Next() {
+			// Reached end of prevIt without finding offending buckets.
+			return false
+		}
+		prevBucket = prevIt.strippedAt()
+	}
+}
+
+// PositiveBucketIterator returns a BucketIterator to iterate over all positive
+// buckets in ascending order (starting next to the zero bucket and going up).
+func (h *FloatHistogram) PositiveBucketIterator() BucketIterator[float64] {
+	it := h.floatBucketIterator(true, 0, h.Schema)
+	return &it
+}
+
+// NegativeBucketIterator returns a BucketIterator to iterate over all negative
+// buckets in descending order (starting next to the zero bucket and going
+// down).
+func (h *FloatHistogram) NegativeBucketIterator() BucketIterator[float64] {
+	it := h.floatBucketIterator(false, 0, h.Schema)
+	return &it
+}
+
+// PositiveReverseBucketIterator returns a BucketIterator to iterate over all
+// positive buckets in descending order (starting at the highest bucket and
+// going down towards the zero bucket).
+func (h *FloatHistogram) PositiveReverseBucketIterator() BucketIterator[float64] {
+	it := newReverseFloatBucketIterator(h.PositiveSpans, h.PositiveBuckets, h.Schema, true, h.CustomValues)
+	return &it
+}
+
+// NegativeReverseBucketIterator returns a BucketIterator to iterate over all
+// negative buckets in ascending order (starting at the lowest bucket and going
+// up towards the zero bucket).
+func (h *FloatHistogram) NegativeReverseBucketIterator() BucketIterator[float64] {
+	it := newReverseFloatBucketIterator(h.NegativeSpans, h.NegativeBuckets, h.Schema, false, nil)
+	return &it
+}
+
+// AllBucketIterator returns a BucketIterator to iterate over all negative,
+// zero, and positive buckets in ascending order (starting at the lowest bucket
+// and going up). If the highest negative bucket or the lowest positive bucket
+// overlap with the zero bucket, their upper or lower boundary, respectively, is
+// set to the zero threshold.
+func (h *FloatHistogram) AllBucketIterator() BucketIterator[float64] {
+	return &allFloatBucketIterator{
+		h:         h,
+		leftIter:  newReverseFloatBucketIterator(h.NegativeSpans, h.NegativeBuckets, h.Schema, false, nil),
+		rightIter: h.floatBucketIterator(true, 0, h.Schema),
+		state:     -1,
+	}
+}
+
+// AllReverseBucketIterator returns a BucketIterator to iterate over all negative,
+// zero, and positive buckets in descending order (starting at the lowest bucket
+// and going up). If the highest negative bucket or the lowest positive bucket
+// overlap with the zero bucket, their upper or lower boundary, respectively, is
+// set to the zero threshold.
+func (h *FloatHistogram) AllReverseBucketIterator() BucketIterator[float64] {
+	return &allFloatBucketIterator{
+		h:         h,
+		leftIter:  newReverseFloatBucketIterator(h.PositiveSpans, h.PositiveBuckets, h.Schema, true, h.CustomValues),
+		rightIter: h.floatBucketIterator(false, 0, h.Schema),
+		state:     -1,
+	}
+}
+
+// Validate validates consistency between span and bucket slices. Also, buckets are checked
+// against negative values. We check to make sure there are no unexpected fields or field values
+// based on the exponential / custom buckets schema.
+// We do not check for h.Count being at least as large as the sum of the
+// counts in the buckets because floating point precision issues can
+// create false positives here.
+func (h *FloatHistogram) Validate() error {
+	var nCount, pCount float64
+	if h.UsesCustomBuckets() {
+		if err := checkHistogramCustomBounds(h.CustomValues, h.PositiveSpans, len(h.PositiveBuckets)); err != nil {
+			return fmt.Errorf("custom buckets: %w", err)
+		}
+		if h.ZeroCount != 0 {
+			return errors.New("custom buckets: must have zero count of 0")
+		}
+		if h.ZeroThreshold != 0 {
+			return errors.New("custom buckets: must have zero threshold of 0")
+		}
+		if len(h.NegativeSpans) > 0 {
+			return errors.New("custom buckets: must not have negative spans")
+		}
+		if len(h.NegativeBuckets) > 0 {
+			return errors.New("custom buckets: must not have negative buckets")
+		}
+	} else {
+		if err := checkHistogramSpans(h.PositiveSpans, len(h.PositiveBuckets)); err != nil {
+			return fmt.Errorf("positive side: %w", err)
+		}
+		if err := checkHistogramSpans(h.NegativeSpans, len(h.NegativeBuckets)); err != nil {
+			return fmt.Errorf("negative side: %w", err)
+		}
+		err := checkHistogramBuckets(h.NegativeBuckets, &nCount, false)
+		if err != nil {
+			return fmt.Errorf("negative side: %w", err)
+		}
+		if h.CustomValues != nil {
+			return errors.New("histogram with exponential schema must not have custom bounds")
+		}
+	}
+	err := checkHistogramBuckets(h.PositiveBuckets, &pCount, false)
+	if err != nil {
+		return fmt.Errorf("positive side: %w", err)
+	}
+
+	return nil
+}
+
+// zeroCountForLargerThreshold returns what the histogram's zero count would be
+// if the ZeroThreshold had the provided larger (or equal) value. If the
+// provided value is less than the histogram's ZeroThreshold, the method panics.
+// If the largerThreshold ends up within a populated bucket of the histogram, it
+// is adjusted upwards to the lower limit of that bucket (all in terms of
+// absolute values) and that bucket's count is included in the returned
+// count. The adjusted threshold is returned, too.
+func (h *FloatHistogram) zeroCountForLargerThreshold(largerThreshold float64) (count, threshold float64) {
+	// Fast path.
+	if largerThreshold == h.ZeroThreshold {
+		return h.ZeroCount, largerThreshold
+	}
+	if largerThreshold < h.ZeroThreshold {
+		panic(fmt.Errorf("new threshold %f is less than old threshold %f", largerThreshold, h.ZeroThreshold))
+	}
+outer:
+	for {
+		count = h.ZeroCount
+		i := h.PositiveBucketIterator()
+		for i.Next() {
+			b := i.At()
+			if b.Lower >= largerThreshold {
+				break
+			}
+			count += b.Count // Bucket to be merged into zero bucket.
+			if b.Upper > largerThreshold {
+				// New threshold ended up within a bucket. if it's
+				// populated, we need to adjust largerThreshold before
+				// we are done here.
+				if b.Count != 0 {
+					largerThreshold = b.Upper
+				}
+				break
+			}
+		}
+		i = h.NegativeBucketIterator()
+		for i.Next() {
+			b := i.At()
+			if b.Upper <= -largerThreshold {
+				break
+			}
+			count += b.Count // Bucket to be merged into zero bucket.
+			if b.Lower < -largerThreshold {
+				// New threshold ended up within a bucket. If
+				// it's populated, we need to adjust
+				// largerThreshold and have to redo the whole
+				// thing because the treatment of the positive
+				// buckets is invalid now.
+				if b.Count != 0 {
+					largerThreshold = -b.Lower
+					continue outer
+				}
+				break
+			}
+		}
+		return count, largerThreshold
+	}
+}
+
+// trimBucketsInZeroBucket removes all buckets that are within the zero
+// bucket. It assumes that the zero threshold is at a bucket boundary and that
+// the counts in the buckets to remove are already part of the zero count.
+func (h *FloatHistogram) trimBucketsInZeroBucket() {
+	i := h.PositiveBucketIterator()
+	bucketsIdx := 0
+	for i.Next() {
+		b := i.At()
+		if b.Lower >= h.ZeroThreshold {
+			break
+		}
+		h.PositiveBuckets[bucketsIdx] = 0
+		bucketsIdx++
+	}
+	i = h.NegativeBucketIterator()
+	bucketsIdx = 0
+	for i.Next() {
+		b := i.At()
+		if b.Upper <= -h.ZeroThreshold {
+			break
+		}
+		h.NegativeBuckets[bucketsIdx] = 0
+		bucketsIdx++
+	}
+	// We are abusing Compact to trim the buckets set to zero
+	// above. Premature compacting could cause additional cost, but this
+	// code path is probably rarely used anyway.
+	h.Compact(0)
+}
+
+// reconcileZeroBuckets finds a zero bucket large enough to include the zero
+// buckets of both histograms (the receiving histogram and the other histogram)
+// with a zero threshold that is not within a populated bucket in either
+// histogram. This method modifies the receiving histogram accordingly, but
+// leaves the other histogram as is. Instead, it returns the zero count the
+// other histogram would have if it were modified.
+func (h *FloatHistogram) reconcileZeroBuckets(other *FloatHistogram) float64 {
+	otherZeroCount := other.ZeroCount
+	otherZeroThreshold := other.ZeroThreshold
+
+	for otherZeroThreshold != h.ZeroThreshold {
+		if h.ZeroThreshold > otherZeroThreshold {
+			otherZeroCount, otherZeroThreshold = other.zeroCountForLargerThreshold(h.ZeroThreshold)
+		}
+		if otherZeroThreshold > h.ZeroThreshold {
+			h.ZeroCount, h.ZeroThreshold = h.zeroCountForLargerThreshold(otherZeroThreshold)
+			h.trimBucketsInZeroBucket()
+		}
+	}
+	return otherZeroCount
+}
+
+// floatBucketIterator is a low-level constructor for bucket iterators.
+//
+// If positive is true, the returned iterator iterates through the positive
+// buckets, otherwise through the negative buckets.
+//
+// Only for exponential schemas, if absoluteStartValue is < the lowest absolute
+// value of any upper bucket boundary, the iterator starts with the first bucket.
+// Otherwise, it will skip all buckets with an absolute value of their upper boundary ≤
+// absoluteStartValue. For custom bucket schemas, absoluteStartValue is ignored and
+// no buckets are skipped.
+//
+// targetSchema must be ≤ the schema of FloatHistogram (and of course within the
+// legal values for schemas in general). The buckets are merged to match the
+// targetSchema prior to iterating (without mutating FloatHistogram), but custom buckets
+// schemas cannot be merged with other schemas.
+func (h *FloatHistogram) floatBucketIterator(
+	positive bool, absoluteStartValue float64, targetSchema int32,
+) floatBucketIterator {
+	if h.UsesCustomBuckets() && targetSchema != h.Schema {
+		panic(errors.New("cannot merge from custom buckets schema to exponential schema"))
+	}
+	if !h.UsesCustomBuckets() && IsCustomBucketsSchema(targetSchema) {
+		panic(errors.New("cannot merge from exponential buckets schema to custom schema"))
+	}
+	if targetSchema > h.Schema {
+		panic(fmt.Errorf("cannot merge from schema %d to %d", h.Schema, targetSchema))
+	}
+	i := floatBucketIterator{
+		baseBucketIterator: baseBucketIterator[float64, float64]{
+			schema:   h.Schema,
+			positive: positive,
+		},
+		targetSchema:           targetSchema,
+		absoluteStartValue:     absoluteStartValue,
+		boundReachedStartValue: absoluteStartValue == 0,
+	}
+	if positive {
+		i.spans = h.PositiveSpans
+		i.buckets = h.PositiveBuckets
+		i.customValues = h.CustomValues
+	} else {
+		i.spans = h.NegativeSpans
+		i.buckets = h.NegativeBuckets
+	}
+	return i
+}
+
+// reverseFloatBucketIterator is a low-level constructor for reverse bucket iterators.
+func newReverseFloatBucketIterator(
+	spans []Span, buckets []float64, schema int32, positive bool, customValues []float64,
+) reverseFloatBucketIterator {
+	r := reverseFloatBucketIterator{
+		baseBucketIterator: baseBucketIterator[float64, float64]{
+			schema:       schema,
+			spans:        spans,
+			buckets:      buckets,
+			positive:     positive,
+			customValues: customValues,
+		},
+	}
+
+	r.spansIdx = len(r.spans) - 1
+	r.bucketsIdx = len(r.buckets) - 1
+	if r.spansIdx >= 0 {
+		r.idxInSpan = int32(r.spans[r.spansIdx].Length) - 1
+	}
+	r.currIdx = 0
+	for _, s := range r.spans {
+		r.currIdx += s.Offset + int32(s.Length)
+	}
+
+	return r
+}
+
+type floatBucketIterator struct {
+	baseBucketIterator[float64, float64]
+
+	targetSchema       int32   // targetSchema is the schema to merge to and must be ≤ schema.
+	origIdx            int32   // The bucket index within the original schema.
+	absoluteStartValue float64 // Never return buckets with an upper bound ≤ this value.
+
+	boundReachedStartValue bool // Has getBound reached absoluteStartValue already?
+}
+
+func (i *floatBucketIterator) At() Bucket[float64] {
+	// Need to use i.targetSchema rather than i.baseBucketIterator.schema.
+	return i.baseBucketIterator.at(i.targetSchema)
+}
+
+func (i *floatBucketIterator) Next() bool {
+	if i.spansIdx >= len(i.spans) {
+		return false
+	}
+
+	if i.schema == i.targetSchema {
+		// Fast path for the common case.
+		span := i.spans[i.spansIdx]
+		if i.bucketsIdx == 0 {
+			// Seed origIdx for the first bucket.
+			i.currIdx = span.Offset
+		} else {
+			i.currIdx++
+		}
+
+		for i.idxInSpan >= span.Length {
+			// We have exhausted the current span and have to find a new
+			// one. We even handle pathologic spans of length 0 here.
+			i.idxInSpan = 0
+			i.spansIdx++
+			if i.spansIdx >= len(i.spans) {
+				return false
+			}
+			span = i.spans[i.spansIdx]
+			i.currIdx += span.Offset
+		}
+
+		i.currCount = i.buckets[i.bucketsIdx]
+		i.idxInSpan++
+		i.bucketsIdx++
+	} else {
+		// Copy all of these into local variables so that we can forward to the
+		// next bucket and then roll back if needed.
+		origIdx, spansIdx, idxInSpan := i.origIdx, i.spansIdx, i.idxInSpan
+		span := i.spans[spansIdx]
+		firstPass := true
+		i.currCount = 0
+
+	mergeLoop: // Merge together all buckets from the original schema that fall into one bucket in the targetSchema.
+		for {
+			if i.bucketsIdx == 0 {
+				// Seed origIdx for the first bucket.
+				origIdx = span.Offset
+			} else {
+				origIdx++
+			}
+			for idxInSpan >= span.Length {
+				// We have exhausted the current span and have to find a new
+				// one. We even handle pathologic spans of length 0 here.
+				idxInSpan = 0
+				spansIdx++
+				if spansIdx >= len(i.spans) {
+					if firstPass {
+						return false
+					}
+					break mergeLoop
+				}
+				span = i.spans[spansIdx]
+				origIdx += span.Offset
+			}
+			currIdx := targetIdx(origIdx, i.schema, i.targetSchema)
+			switch {
+			case firstPass:
+				i.currIdx = currIdx
+				firstPass = false
+			case currIdx != i.currIdx:
+				// Reached next bucket in targetSchema.
+				// Do not actually forward to the next bucket, but break out.
+				break mergeLoop
+			}
+			i.currCount += i.buckets[i.bucketsIdx]
+			idxInSpan++
+			i.bucketsIdx++
+			i.origIdx, i.spansIdx, i.idxInSpan = origIdx, spansIdx, idxInSpan
+			if i.schema == i.targetSchema {
+				// Don't need to test the next bucket for mergeability
+				// if we have no schema change anyway.
+				break mergeLoop
+			}
+		}
+	}
+
+	// Skip buckets before absoluteStartValue for exponential schemas.
+	// TODO(beorn7): Maybe do something more efficient than this recursive call.
+	if !i.boundReachedStartValue && IsExponentialSchema(i.targetSchema) && getBoundExponential(i.currIdx, i.targetSchema) <= i.absoluteStartValue {
+		return i.Next()
+	}
+	i.boundReachedStartValue = true
+	return true
+}
+
+type reverseFloatBucketIterator struct {
+	baseBucketIterator[float64, float64]
+	idxInSpan int32 // Changed from uint32 to allow negative values for exhaustion detection.
+}
+
+func (i *reverseFloatBucketIterator) Next() bool {
+	i.currIdx--
+	if i.bucketsIdx < 0 {
+		return false
+	}
+
+	for i.idxInSpan < 0 {
+		// We have exhausted the current span and have to find a new
+		// one. We'll even handle pathologic spans of length 0.
+		i.spansIdx--
+		i.idxInSpan = int32(i.spans[i.spansIdx].Length) - 1
+		i.currIdx -= i.spans[i.spansIdx+1].Offset
+	}
+
+	i.currCount = i.buckets[i.bucketsIdx]
+	i.bucketsIdx--
+	i.idxInSpan--
+	return true
+}
+
+type allFloatBucketIterator struct {
+	h         *FloatHistogram
+	leftIter  reverseFloatBucketIterator
+	rightIter floatBucketIterator
+	// -1 means we are iterating negative buckets.
+	// 0 means it is time for the zero bucket.
+	// 1 means we are iterating positive buckets.
+	// Anything else means iteration is over.
+	state      int8
+	currBucket Bucket[float64]
+}
+
+func (i *allFloatBucketIterator) Next() bool {
+	switch i.state {
+	case -1:
+		if i.leftIter.Next() {
+			i.currBucket = i.leftIter.At()
+			switch {
+			case i.currBucket.Upper < 0 && i.currBucket.Upper > -i.h.ZeroThreshold:
+				i.currBucket.Upper = -i.h.ZeroThreshold
+			case i.currBucket.Lower > 0 && i.currBucket.Lower < i.h.ZeroThreshold:
+				i.currBucket.Lower = i.h.ZeroThreshold
+			}
+			return true
+		}
+		i.state = 0
+		return i.Next()
+	case 0:
+		i.state = 1
+		if i.h.ZeroCount > 0 {
+			i.currBucket = i.h.ZeroBucket()
+			return true
+		}
+		return i.Next()
+	case 1:
+		if i.rightIter.Next() {
+			i.currBucket = i.rightIter.At()
+			switch {
+			case i.currBucket.Lower > 0 && i.currBucket.Lower < i.h.ZeroThreshold:
+				i.currBucket.Lower = i.h.ZeroThreshold
+			case i.currBucket.Upper < 0 && i.currBucket.Upper > -i.h.ZeroThreshold:
+				i.currBucket.Upper = -i.h.ZeroThreshold
+			}
+			return true
+		}
+		i.state = 42
+		return false
+	}
+
+	return false
+}
+
+func (i *allFloatBucketIterator) At() Bucket[float64] {
+	return i.currBucket
+}
+
+// targetIdx returns the bucket index in the target schema for the given bucket
+// index idx in the original schema.
+func targetIdx(idx, originSchema, targetSchema int32) int32 {
+	return ((idx - 1) >> (originSchema - targetSchema)) + 1
+}
+
+// addBuckets adds the buckets described by spansB/bucketsB to the buckets described by spansA/bucketsA,
+// creating missing buckets in spansA/bucketsA as needed.
+// It returns the resulting spans/buckets (which must be used instead of the original spansA/bucketsA,
+// although spansA/bucketsA might get modified by this function).
+// All buckets must use the same provided schema.
+// Buckets in spansB/bucketsB with an absolute upper limit ≤ threshold are ignored.
+// If negative is true, the buckets in spansB/bucketsB are subtracted rather than added.
+func addBuckets(
+	schema int32, threshold float64, negative bool,
+	spansA []Span, bucketsA []float64,
+	spansB []Span, bucketsB []float64,
+) ([]Span, []float64) {
+	var (
+		iSpan              = -1
+		iBucket            = -1
+		iInSpan            int32
+		indexA             int32
+		indexB             int32
+		bIdxB              int
+		bucketB            float64
+		deltaIndex         int32
+		lowerThanThreshold = true
+	)
+
+	for _, spanB := range spansB {
+		indexB += spanB.Offset
+		for j := 0; j < int(spanB.Length); j++ {
+			if lowerThanThreshold && IsExponentialSchema(schema) && getBoundExponential(indexB, schema) <= threshold {
+				goto nextLoop
+			}
+			lowerThanThreshold = false
+
+			bucketB = bucketsB[bIdxB]
+			if negative {
+				bucketB *= -1
+			}
+
+			if iSpan == -1 {
+				if len(spansA) == 0 || spansA[0].Offset > indexB {
+					// Add bucket before all others.
+					bucketsA = append(bucketsA, 0)
+					copy(bucketsA[1:], bucketsA)
+					bucketsA[0] = bucketB
+					if len(spansA) > 0 && spansA[0].Offset == indexB+1 {
+						spansA[0].Length++
+						spansA[0].Offset--
+						goto nextLoop
+					}
+					spansA = append(spansA, Span{})
+					copy(spansA[1:], spansA)
+					spansA[0] = Span{Offset: indexB, Length: 1}
+					if len(spansA) > 1 {
+						// Convert the absolute offset in the formerly
+						// first span to a relative offset.
+						spansA[1].Offset -= indexB + 1
+					}
+					goto nextLoop
+				} else if spansA[0].Offset == indexB {
+					// Just add to first bucket.
+					bucketsA[0] += bucketB
+					goto nextLoop
+				}
+				iSpan, iBucket, iInSpan = 0, 0, 0
+				indexA = spansA[0].Offset
+			}
+			deltaIndex = indexB - indexA
+			for {
+				remainingInSpan := int32(spansA[iSpan].Length) - iInSpan
+				if deltaIndex < remainingInSpan {
+					// Bucket is in current span.
+					iBucket += int(deltaIndex)
+					iInSpan += deltaIndex
+					bucketsA[iBucket] += bucketB
+					break
+				}
+				deltaIndex -= remainingInSpan
+				iBucket += int(remainingInSpan)
+				iSpan++
+				if iSpan == len(spansA) || deltaIndex < spansA[iSpan].Offset {
+					// Bucket is in gap behind previous span (or there are no further spans).
+					bucketsA = append(bucketsA, 0)
+					copy(bucketsA[iBucket+1:], bucketsA[iBucket:])
+					bucketsA[iBucket] = bucketB
+					switch {
+					case deltaIndex == 0:
+						// Directly after previous span, extend previous span.
+						if iSpan < len(spansA) {
+							spansA[iSpan].Offset--
+						}
+						iSpan--
+						iInSpan = int32(spansA[iSpan].Length)
+						spansA[iSpan].Length++
+						goto nextLoop
+					case iSpan < len(spansA) && deltaIndex == spansA[iSpan].Offset-1:
+						// Directly before next span, extend next span.
+						iInSpan = 0
+						spansA[iSpan].Offset--
+						spansA[iSpan].Length++
+						goto nextLoop
+					default:
+						// No next span, or next span is not directly adjacent to new bucket.
+						// Add new span.
+						iInSpan = 0
+						if iSpan < len(spansA) {
+							spansA[iSpan].Offset -= deltaIndex + 1
+						}
+						spansA = append(spansA, Span{})
+						copy(spansA[iSpan+1:], spansA[iSpan:])
+						spansA[iSpan] = Span{Length: 1, Offset: deltaIndex}
+						goto nextLoop
+					}
+				} else {
+					// Try start of next span.
+					deltaIndex -= spansA[iSpan].Offset
+					iInSpan = 0
+				}
+			}
+
+		nextLoop:
+			indexA = indexB
+			indexB++
+			bIdxB++
+		}
+	}
+
+	return spansA, bucketsA
+}
+
+func FloatBucketsMatch(b1, b2 []float64) bool {
+	if len(b1) != len(b2) {
+		return false
+	}
+	for i, b := range b1 {
+		if math.Float64bits(b) != math.Float64bits(b2[i]) {
+			return false
+		}
+	}
+	return true
+}
+
+// ReduceResolution reduces the float histogram's spans, buckets into target schema.
+// The target schema must be smaller than the current float histogram's schema.
+// This will panic if the histogram has custom buckets or if the target schema is
+// a custom buckets schema.
+func (h *FloatHistogram) ReduceResolution(targetSchema int32) *FloatHistogram {
+	if h.UsesCustomBuckets() {
+		panic("cannot reduce resolution when there are custom buckets")
+	}
+	if IsCustomBucketsSchema(targetSchema) {
+		panic("cannot reduce resolution to custom buckets schema")
+	}
+	if targetSchema >= h.Schema {
+		panic(fmt.Errorf("cannot reduce resolution from schema %d to %d", h.Schema, targetSchema))
+	}
+
+	h.PositiveSpans, h.PositiveBuckets = reduceResolution(h.PositiveSpans, h.PositiveBuckets, h.Schema, targetSchema, false, true)
+	h.NegativeSpans, h.NegativeBuckets = reduceResolution(h.NegativeSpans, h.NegativeBuckets, h.Schema, targetSchema, false, true)
+
+	h.Schema = targetSchema
+	return h
+}
diff --git a/vendor/github.com/prometheus/prometheus/model/histogram/generic.go b/vendor/github.com/prometheus/prometheus/model/histogram/generic.go
new file mode 100644
index 0000000000000000000000000000000000000000..a36b58d0696c068069544e518e53315e35a8784e
--- /dev/null
+++ b/vendor/github.com/prometheus/prometheus/model/histogram/generic.go
@@ -0,0 +1,786 @@
+// Copyright 2022 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package histogram
+
+import (
+	"errors"
+	"fmt"
+	"math"
+	"strings"
+)
+
+const (
+	ExponentialSchemaMax int32 = 8
+	ExponentialSchemaMin int32 = -4
+	CustomBucketsSchema  int32 = -53
+)
+
+var (
+	ErrHistogramCountNotBigEnough     = errors.New("histogram's observation count should be at least the number of observations found in the buckets")
+	ErrHistogramCountMismatch         = errors.New("histogram's observation count should equal the number of observations found in the buckets (in absence of NaN)")
+	ErrHistogramNegativeBucketCount   = errors.New("histogram has a bucket whose observation count is negative")
+	ErrHistogramSpanNegativeOffset    = errors.New("histogram has a span whose offset is negative")
+	ErrHistogramSpansBucketsMismatch  = errors.New("histogram spans specify different number of buckets than provided")
+	ErrHistogramCustomBucketsMismatch = errors.New("histogram custom bounds are too few")
+	ErrHistogramCustomBucketsInvalid  = errors.New("histogram custom bounds must be in strictly increasing order")
+	ErrHistogramCustomBucketsInfinite = errors.New("histogram custom bounds must be finite")
+	ErrHistogramsIncompatibleSchema   = errors.New("cannot apply this operation on histograms with a mix of exponential and custom bucket schemas")
+	ErrHistogramsIncompatibleBounds   = errors.New("cannot apply this operation on custom buckets histograms with different custom bounds")
+)
+
+func IsCustomBucketsSchema(s int32) bool {
+	return s == CustomBucketsSchema
+}
+
+func IsExponentialSchema(s int32) bool {
+	return s >= ExponentialSchemaMin && s <= ExponentialSchemaMax
+}
+
+// BucketCount is a type constraint for the count in a bucket, which can be
+// float64 (for type FloatHistogram) or uint64 (for type Histogram).
+type BucketCount interface {
+	float64 | uint64
+}
+
+// InternalBucketCount is used internally by Histogram and FloatHistogram. The
+// difference to the BucketCount above is that Histogram internally uses deltas
+// between buckets rather than absolute counts (while FloatHistogram uses
+// absolute counts directly). Go type parameters don't allow type
+// specialization. Therefore, where special treatment of deltas between buckets
+// vs. absolute counts is important, this information has to be provided as a
+// separate boolean parameter "deltaBuckets".
+type InternalBucketCount interface {
+	float64 | int64
+}
+
+// Bucket represents a bucket with lower and upper limit and the absolute count
+// of samples in the bucket. It also specifies if each limit is inclusive or
+// not. (Mathematically, inclusive limits create a closed interval, and
+// non-inclusive limits an open interval.)
+//
+// To represent cumulative buckets, Lower is set to -Inf, and the Count is then
+// cumulative (including the counts of all buckets for smaller values).
+type Bucket[BC BucketCount] struct {
+	Lower, Upper                   float64
+	LowerInclusive, UpperInclusive bool
+	Count                          BC
+
+	// Index within schema. To easily compare buckets that share the same
+	// schema and sign (positive or negative). Irrelevant for the zero bucket.
+	Index int32
+}
+
+// strippedBucket is Bucket without bound values (which are expensive to calculate
+// and not used in certain use cases).
+type strippedBucket[BC BucketCount] struct {
+	count BC
+	index int32
+}
+
+// String returns a string representation of a Bucket, using the usual
+// mathematical notation of '['/']' for inclusive bounds and '('/')' for
+// non-inclusive bounds.
+func (b Bucket[BC]) String() string {
+	var sb strings.Builder
+	if b.LowerInclusive {
+		sb.WriteRune('[')
+	} else {
+		sb.WriteRune('(')
+	}
+	fmt.Fprintf(&sb, "%g,%g", b.Lower, b.Upper)
+	if b.UpperInclusive {
+		sb.WriteRune(']')
+	} else {
+		sb.WriteRune(')')
+	}
+	fmt.Fprintf(&sb, ":%v", b.Count)
+	return sb.String()
+}
+
+// BucketIterator iterates over the buckets of a Histogram, returning decoded
+// buckets.
+type BucketIterator[BC BucketCount] interface {
+	// Next advances the iterator by one.
+	Next() bool
+	// At returns the current bucket.
+	At() Bucket[BC]
+}
+
+// baseBucketIterator provides a struct that is shared by most BucketIterator
+// implementations, together with an implementation of the At method. This
+// iterator can be embedded in full implementations of BucketIterator to save on
+// code replication.
+type baseBucketIterator[BC BucketCount, IBC InternalBucketCount] struct {
+	schema  int32
+	spans   []Span
+	buckets []IBC
+
+	positive bool // Whether this is for positive buckets.
+
+	spansIdx   int    // Current span within spans slice.
+	idxInSpan  uint32 // Index in the current span. 0 <= idxInSpan < span.Length.
+	bucketsIdx int    // Current bucket within buckets slice.
+
+	currCount IBC   // Count in the current bucket.
+	currIdx   int32 // The actual bucket index.
+
+	customValues []float64 // Bounds (usually upper) for histograms with custom buckets.
+}
+
+func (b *baseBucketIterator[BC, IBC]) At() Bucket[BC] {
+	return b.at(b.schema)
+}
+
+// at is an internal version of the exported At to enable using a different schema.
+func (b *baseBucketIterator[BC, IBC]) at(schema int32) Bucket[BC] {
+	bucket := Bucket[BC]{
+		Count: BC(b.currCount),
+		Index: b.currIdx,
+	}
+	if b.positive {
+		bucket.Upper = getBound(b.currIdx, schema, b.customValues)
+		bucket.Lower = getBound(b.currIdx-1, schema, b.customValues)
+	} else {
+		bucket.Lower = -getBound(b.currIdx, schema, b.customValues)
+		bucket.Upper = -getBound(b.currIdx-1, schema, b.customValues)
+	}
+	if IsCustomBucketsSchema(schema) {
+		bucket.LowerInclusive = b.currIdx == 0
+		bucket.UpperInclusive = true
+	} else {
+		bucket.LowerInclusive = bucket.Lower < 0
+		bucket.UpperInclusive = bucket.Upper > 0
+	}
+	return bucket
+}
+
+// strippedAt returns current strippedBucket (which lacks bucket bounds but is cheaper to compute).
+func (b *baseBucketIterator[BC, IBC]) strippedAt() strippedBucket[BC] {
+	return strippedBucket[BC]{
+		count: BC(b.currCount),
+		index: b.currIdx,
+	}
+}
+
+// compactBuckets is a generic function used by both Histogram.Compact and
+// FloatHistogram.Compact. Set deltaBuckets to true if the provided buckets are
+// deltas. Set it to false if the buckets contain absolute counts.
+func compactBuckets[IBC InternalBucketCount](buckets []IBC, spans []Span, maxEmptyBuckets int, deltaBuckets bool) ([]IBC, []Span) {
+	// Fast path: If there are no empty buckets AND no offset in any span is
+	// <= maxEmptyBuckets AND no span has length 0, there is nothing to do and we can return
+	// immediately. We check that first because it's cheap and presumably
+	// common.
+	nothingToDo := true
+	var currentBucketAbsolute IBC
+	for _, bucket := range buckets {
+		if deltaBuckets {
+			currentBucketAbsolute += bucket
+		} else {
+			currentBucketAbsolute = bucket
+		}
+		if currentBucketAbsolute == 0 {
+			nothingToDo = false
+			break
+		}
+	}
+	if nothingToDo {
+		for _, span := range spans {
+			if int(span.Offset) <= maxEmptyBuckets || span.Length == 0 {
+				nothingToDo = false
+				break
+			}
+		}
+		if nothingToDo {
+			return buckets, spans
+		}
+	}
+
+	var iBucket, iSpan int
+	var posInSpan uint32
+	currentBucketAbsolute = 0
+
+	// Helper function.
+	emptyBucketsHere := func() int {
+		i := 0
+		abs := currentBucketAbsolute
+		for uint32(i)+posInSpan < spans[iSpan].Length && abs == 0 {
+			i++
+			if i+iBucket >= len(buckets) {
+				break
+			}
+			abs = buckets[i+iBucket]
+		}
+		return i
+	}
+
+	// Merge spans with zero-offset to avoid special cases later.
+	if len(spans) > 1 {
+		for i, span := range spans[1:] {
+			if span.Offset == 0 {
+				spans[iSpan].Length += span.Length
+				continue
+			}
+			iSpan++
+			if i+1 != iSpan {
+				spans[iSpan] = span
+			}
+		}
+		spans = spans[:iSpan+1]
+		iSpan = 0
+	}
+
+	// Merge spans with zero-length to avoid special cases later.
+	for i, span := range spans {
+		if span.Length == 0 {
+			if i+1 < len(spans) {
+				spans[i+1].Offset += span.Offset
+			}
+			continue
+		}
+		if i != iSpan {
+			spans[iSpan] = span
+		}
+		iSpan++
+	}
+	spans = spans[:iSpan]
+	iSpan = 0
+
+	// Cut out empty buckets from start and end of spans, no matter
+	// what. Also cut out empty buckets from the middle of a span but only
+	// if there are more than maxEmptyBuckets consecutive empty buckets.
+	for iBucket < len(buckets) {
+		if deltaBuckets {
+			currentBucketAbsolute += buckets[iBucket]
+		} else {
+			currentBucketAbsolute = buckets[iBucket]
+		}
+		if nEmpty := emptyBucketsHere(); nEmpty > 0 {
+			if posInSpan > 0 &&
+				nEmpty < int(spans[iSpan].Length-posInSpan) &&
+				nEmpty <= maxEmptyBuckets {
+				// The empty buckets are in the middle of a
+				// span, and there are few enough to not bother.
+				// Just fast-forward.
+				iBucket += nEmpty
+				if deltaBuckets {
+					currentBucketAbsolute = 0
+				}
+				posInSpan += uint32(nEmpty)
+				continue
+			}
+			// In all other cases, we cut out the empty buckets.
+			if deltaBuckets && iBucket+nEmpty < len(buckets) {
+				currentBucketAbsolute = -buckets[iBucket]
+				buckets[iBucket+nEmpty] += buckets[iBucket]
+			}
+			buckets = append(buckets[:iBucket], buckets[iBucket+nEmpty:]...)
+			if posInSpan == 0 {
+				// Start of span.
+				if nEmpty == int(spans[iSpan].Length) {
+					// The whole span is empty.
+					offset := spans[iSpan].Offset
+					spans = append(spans[:iSpan], spans[iSpan+1:]...)
+					if len(spans) > iSpan {
+						spans[iSpan].Offset += offset + int32(nEmpty)
+					}
+					continue
+				}
+				spans[iSpan].Length -= uint32(nEmpty)
+				spans[iSpan].Offset += int32(nEmpty)
+				continue
+			}
+			// It's in the middle or in the end of the span.
+			// Split the current span.
+			newSpan := Span{
+				Offset: int32(nEmpty),
+				Length: spans[iSpan].Length - posInSpan - uint32(nEmpty),
+			}
+			spans[iSpan].Length = posInSpan
+			// In any case, we have to split to the next span.
+			iSpan++
+			posInSpan = 0
+			if newSpan.Length == 0 {
+				// The span is empty, so we were already at the end of a span.
+				// We don't have to insert the new span, just adjust the next
+				// span's offset, if there is one.
+				if iSpan < len(spans) {
+					spans[iSpan].Offset += int32(nEmpty)
+				}
+				continue
+			}
+			// Insert the new span.
+			spans = append(spans, Span{})
+			if iSpan+1 < len(spans) {
+				copy(spans[iSpan+1:], spans[iSpan:])
+			}
+			spans[iSpan] = newSpan
+			continue
+		}
+		iBucket++
+		posInSpan++
+		if posInSpan >= spans[iSpan].Length {
+			posInSpan = 0
+			iSpan++
+		}
+	}
+	if maxEmptyBuckets == 0 || len(buckets) == 0 {
+		return buckets, spans
+	}
+
+	// Finally, check if any offsets between spans are small enough to merge
+	// the spans.
+	iBucket = int(spans[0].Length)
+	if deltaBuckets {
+		currentBucketAbsolute = 0
+		for _, bucket := range buckets[:iBucket] {
+			currentBucketAbsolute += bucket
+		}
+	}
+	iSpan = 1
+	for iSpan < len(spans) {
+		if int(spans[iSpan].Offset) > maxEmptyBuckets {
+			l := int(spans[iSpan].Length)
+			if deltaBuckets {
+				for _, bucket := range buckets[iBucket : iBucket+l] {
+					currentBucketAbsolute += bucket
+				}
+			}
+			iBucket += l
+			iSpan++
+			continue
+		}
+		// Merge span with previous one and insert empty buckets.
+		offset := int(spans[iSpan].Offset)
+		spans[iSpan-1].Length += uint32(offset) + spans[iSpan].Length
+		spans = append(spans[:iSpan], spans[iSpan+1:]...)
+		newBuckets := make([]IBC, len(buckets)+offset)
+		copy(newBuckets, buckets[:iBucket])
+		copy(newBuckets[iBucket+offset:], buckets[iBucket:])
+		if deltaBuckets {
+			newBuckets[iBucket] = -currentBucketAbsolute
+			newBuckets[iBucket+offset] += currentBucketAbsolute
+		}
+		iBucket += offset
+		buckets = newBuckets
+		currentBucketAbsolute = buckets[iBucket]
+		// Note that with many merges, it would be more efficient to
+		// first record all the chunks of empty buckets to insert and
+		// then do it in one go through all the buckets.
+	}
+
+	return buckets, spans
+}
+
+func checkHistogramSpans(spans []Span, numBuckets int) error {
+	var spanBuckets int
+	for n, span := range spans {
+		if n > 0 && span.Offset < 0 {
+			return fmt.Errorf("span number %d with offset %d: %w", n+1, span.Offset, ErrHistogramSpanNegativeOffset)
+		}
+		spanBuckets += int(span.Length)
+	}
+	if spanBuckets != numBuckets {
+		return fmt.Errorf("spans need %d buckets, have %d buckets: %w", spanBuckets, numBuckets, ErrHistogramSpansBucketsMismatch)
+	}
+	return nil
+}
+
+func checkHistogramBuckets[BC BucketCount, IBC InternalBucketCount](buckets []IBC, count *BC, deltas bool) error {
+	if len(buckets) == 0 {
+		return nil
+	}
+
+	var last IBC
+	for i := 0; i < len(buckets); i++ {
+		var c IBC
+		if deltas {
+			c = last + buckets[i]
+		} else {
+			c = buckets[i]
+		}
+		if c < 0 {
+			return fmt.Errorf("bucket number %d has observation count of %v: %w", i+1, c, ErrHistogramNegativeBucketCount)
+		}
+		last = c
+		*count += BC(c)
+	}
+
+	return nil
+}
+
+func checkHistogramCustomBounds(bounds []float64, spans []Span, numBuckets int) error {
+	prev := math.Inf(-1)
+	for _, curr := range bounds {
+		if curr <= prev {
+			return fmt.Errorf("previous bound is %f and current is %f: %w", prev, curr, ErrHistogramCustomBucketsInvalid)
+		}
+		prev = curr
+	}
+	if prev == math.Inf(1) {
+		return fmt.Errorf("last +Inf bound must not be explicitly defined: %w", ErrHistogramCustomBucketsInfinite)
+	}
+
+	var spanBuckets int
+	var totalSpanLength int
+	for n, span := range spans {
+		if span.Offset < 0 {
+			return fmt.Errorf("span number %d with offset %d: %w", n+1, span.Offset, ErrHistogramSpanNegativeOffset)
+		}
+		spanBuckets += int(span.Length)
+		totalSpanLength += int(span.Length) + int(span.Offset)
+	}
+	if spanBuckets != numBuckets {
+		return fmt.Errorf("spans need %d buckets, have %d buckets: %w", spanBuckets, numBuckets, ErrHistogramSpansBucketsMismatch)
+	}
+	if (len(bounds) + 1) < totalSpanLength {
+		return fmt.Errorf("only %d custom bounds defined which is insufficient to cover total span length of %d: %w", len(bounds), totalSpanLength, ErrHistogramCustomBucketsMismatch)
+	}
+
+	return nil
+}
+
+func getBound(idx, schema int32, customValues []float64) float64 {
+	if IsCustomBucketsSchema(schema) {
+		length := int32(len(customValues))
+		switch {
+		case idx > length || idx < -1:
+			panic(fmt.Errorf("index %d out of bounds for custom bounds of length %d", idx, length))
+		case idx == length:
+			return math.Inf(1)
+		case idx == -1:
+			return math.Inf(-1)
+		default:
+			return customValues[idx]
+		}
+	}
+	return getBoundExponential(idx, schema)
+}
+
+func getBoundExponential(idx, schema int32) float64 {
+	// Here a bit of context about the behavior for the last bucket counting
+	// regular numbers (called simply "last bucket" below) and the bucket
+	// counting observations of ±Inf (called "inf bucket" below, with an idx
+	// one higher than that of the "last bucket"):
+	//
+	// If we apply the usual formula to the last bucket, its upper bound
+	// would be calculated as +Inf. The reason is that the max possible
+	// regular float64 number (math.MaxFloat64) doesn't coincide with one of
+	// the calculated bucket boundaries. So the calculated boundary has to
+	// be larger than math.MaxFloat64, and the only float64 larger than
+	// math.MaxFloat64 is +Inf. However, we want to count actual
+	// observations of ±Inf in the inf bucket. Therefore, we have to treat
+	// the upper bound of the last bucket specially and set it to
+	// math.MaxFloat64. (The upper bound of the inf bucket, with its idx
+	// being one higher than that of the last bucket, naturally comes out as
+	// +Inf by the usual formula. So that's fine.)
+	//
+	// math.MaxFloat64 has a frac of 0.9999999999999999 and an exp of
+	// 1024. If there were a float64 number following math.MaxFloat64, it
+	// would have a frac of 1.0 and an exp of 1024, or equivalently a frac
+	// of 0.5 and an exp of 1025. However, since frac must be smaller than
+	// 1, and exp must be smaller than 1025, either representation overflows
+	// a float64. (Which, in turn, is the reason that math.MaxFloat64 is the
+	// largest possible float64. Q.E.D.) However, the formula for
+	// calculating the upper bound from the idx and schema of the last
+	// bucket results in precisely that. It is either frac=1.0 & exp=1024
+	// (for schema < 0) or frac=0.5 & exp=1025 (for schema >=0). (This is,
+	// by the way, a power of two where the exponent itself is a power of
+	// two, 2¹⁰ in fact, which coincides with a bucket boundary in all
+	// schemas.) So these are the special cases we have to catch below.
+	if schema < 0 {
+		exp := int(idx) << -schema
+		if exp == 1024 {
+			// This is the last bucket before the overflow bucket
+			// (for ±Inf observations). Return math.MaxFloat64 as
+			// explained above.
+			return math.MaxFloat64
+		}
+		return math.Ldexp(1, exp)
+	}
+
+	fracIdx := idx & ((1 << schema) - 1)
+	frac := exponentialBounds[schema][fracIdx]
+	exp := (int(idx) >> schema) + 1
+	if frac == 0.5 && exp == 1025 {
+		// This is the last bucket before the overflow bucket (for ±Inf
+		// observations). Return math.MaxFloat64 as explained above.
+		return math.MaxFloat64
+	}
+	return math.Ldexp(frac, exp)
+}
+
+// exponentialBounds is a precalculated table of bucket bounds in the interval
+// [0.5,1) in schema 0 to 8.
+var exponentialBounds = [][]float64{
+	// Schema "0":
+	{0.5},
+	// Schema 1:
+	{0.5, 0.7071067811865475},
+	// Schema 2:
+	{0.5, 0.5946035575013605, 0.7071067811865475, 0.8408964152537144},
+	// Schema 3:
+	{
+		0.5, 0.5452538663326288, 0.5946035575013605, 0.6484197773255048,
+		0.7071067811865475, 0.7711054127039704, 0.8408964152537144, 0.9170040432046711,
+	},
+	// Schema 4:
+	{
+		0.5, 0.5221368912137069, 0.5452538663326288, 0.5693943173783458,
+		0.5946035575013605, 0.620928906036742, 0.6484197773255048, 0.6771277734684463,
+		0.7071067811865475, 0.7384130729697496, 0.7711054127039704, 0.805245165974627,
+		0.8408964152537144, 0.8781260801866495, 0.9170040432046711, 0.9576032806985735,
+	},
+	// Schema 5:
+	{
+		0.5, 0.5109485743270583, 0.5221368912137069, 0.5335702003384117,
+		0.5452538663326288, 0.5571933712979462, 0.5693943173783458, 0.5818624293887887,
+		0.5946035575013605, 0.6076236799902344, 0.620928906036742, 0.6345254785958666,
+		0.6484197773255048, 0.6626183215798706, 0.6771277734684463, 0.6919549409819159,
+		0.7071067811865475, 0.7225904034885232, 0.7384130729697496, 0.7545822137967112,
+		0.7711054127039704, 0.7879904225539431, 0.805245165974627, 0.8228777390769823,
+		0.8408964152537144, 0.8593096490612387, 0.8781260801866495, 0.8973545375015533,
+		0.9170040432046711, 0.9370838170551498, 0.9576032806985735, 0.9785720620876999,
+	},
+	// Schema 6:
+	{
+		0.5, 0.5054446430258502, 0.5109485743270583, 0.5165124395106142,
+		0.5221368912137069, 0.5278225891802786, 0.5335702003384117, 0.5393803988785598,
+		0.5452538663326288, 0.5511912916539204, 0.5571933712979462, 0.5632608093041209,
+		0.5693943173783458, 0.5755946149764913, 0.5818624293887887, 0.5881984958251406,
+		0.5946035575013605, 0.6010783657263515, 0.6076236799902344, 0.6142402680534349,
+		0.620928906036742, 0.6276903785123455, 0.6345254785958666, 0.6414350080393891,
+		0.6484197773255048, 0.6554806057623822, 0.6626183215798706, 0.6698337620266515,
+		0.6771277734684463, 0.6845012114872953, 0.6919549409819159, 0.6994898362691555,
+		0.7071067811865475, 0.7148066691959849, 0.7225904034885232, 0.7304588970903234,
+		0.7384130729697496, 0.7464538641456323, 0.7545822137967112, 0.762799075372269,
+		0.7711054127039704, 0.7795022001189185, 0.7879904225539431, 0.7965710756711334,
+		0.805245165974627, 0.8140137109286738, 0.8228777390769823, 0.8318382901633681,
+		0.8408964152537144, 0.8500531768592616, 0.8593096490612387, 0.8686669176368529,
+		0.8781260801866495, 0.8876882462632604, 0.8973545375015533, 0.9071260877501991,
+		0.9170040432046711, 0.9269895625416926, 0.9370838170551498, 0.9472879907934827,
+		0.9576032806985735, 0.9680308967461471, 0.9785720620876999, 0.9892280131939752,
+	},
+	// Schema 7:
+	{
+		0.5, 0.5027149505564014, 0.5054446430258502, 0.5081891574554764,
+		0.5109485743270583, 0.5137229745593818, 0.5165124395106142, 0.5193170509806894,
+		0.5221368912137069, 0.5249720429003435, 0.5278225891802786, 0.5306886136446309,
+		0.5335702003384117, 0.5364674337629877, 0.5393803988785598, 0.5423091811066545,
+		0.5452538663326288, 0.5482145409081883, 0.5511912916539204, 0.5541842058618393,
+		0.5571933712979462, 0.5602188762048033, 0.5632608093041209, 0.5663192597993595,
+		0.5693943173783458, 0.572486072215902, 0.5755946149764913, 0.5787200368168754,
+		0.5818624293887887, 0.585021884841625, 0.5881984958251406, 0.5913923554921704,
+		0.5946035575013605, 0.5978321960199137, 0.6010783657263515, 0.6043421618132907,
+		0.6076236799902344, 0.6109230164863786, 0.6142402680534349, 0.6175755319684665,
+		0.620928906036742, 0.6243004885946023, 0.6276903785123455, 0.6310986751971253,
+		0.6345254785958666, 0.637970889198196, 0.6414350080393891, 0.6449179367033329,
+		0.6484197773255048, 0.6519406325959679, 0.6554806057623822, 0.659039800633032,
+		0.6626183215798706, 0.6662162735415805, 0.6698337620266515, 0.6734708931164728,
+		0.6771277734684463, 0.6808045103191123, 0.6845012114872953, 0.688217985377265,
+		0.6919549409819159, 0.6957121878859629, 0.6994898362691555, 0.7032879969095076,
+		0.7071067811865475, 0.7109463010845827, 0.7148066691959849, 0.718687998724491,
+		0.7225904034885232, 0.7265139979245261, 0.7304588970903234, 0.7344252166684908,
+		0.7384130729697496, 0.7424225829363761, 0.7464538641456323, 0.7505070348132126,
+		0.7545822137967112, 0.7586795205991071, 0.762799075372269, 0.7669409989204777,
+		0.7711054127039704, 0.7752924388424999, 0.7795022001189185, 0.7837348199827764,
+		0.7879904225539431, 0.7922691326262467, 0.7965710756711334, 0.8008963778413465,
+		0.805245165974627, 0.8096175675974316, 0.8140137109286738, 0.8184337248834821,
+		0.8228777390769823, 0.8273458838280969, 0.8318382901633681, 0.8363550898207981,
+		0.8408964152537144, 0.8454623996346523, 0.8500531768592616, 0.8546688815502312,
+		0.8593096490612387, 0.8639756154809185, 0.8686669176368529, 0.8733836930995842,
+		0.8781260801866495, 0.8828942179666361, 0.8876882462632604, 0.8925083056594671,
+		0.8973545375015533, 0.9022270839033115, 0.9071260877501991, 0.9120516927035263,
+		0.9170040432046711, 0.9219832844793128, 0.9269895625416926, 0.9320230241988943,
+		0.9370838170551498, 0.9421720895161669, 0.9472879907934827, 0.9524316709088368,
+		0.9576032806985735, 0.9628029718180622, 0.9680308967461471, 0.9732872087896164,
+		0.9785720620876999, 0.9838856116165875, 0.9892280131939752, 0.9945994234836328,
+	},
+	// Schema 8:
+	{
+		0.5, 0.5013556375251013, 0.5027149505564014, 0.5040779490592088,
+		0.5054446430258502, 0.5068150424757447, 0.5081891574554764, 0.509566998038869,
+		0.5109485743270583, 0.5123338964485679, 0.5137229745593818, 0.5151158188430205,
+		0.5165124395106142, 0.5179128468009786, 0.5193170509806894, 0.520725062344158,
+		0.5221368912137069, 0.5235525479396449, 0.5249720429003435, 0.526395386502313,
+		0.5278225891802786, 0.5292536613972564, 0.5306886136446309, 0.5321274564422321,
+		0.5335702003384117, 0.5350168559101208, 0.5364674337629877, 0.5379219445313954,
+		0.5393803988785598, 0.5408428074966075, 0.5423091811066545, 0.5437795304588847,
+		0.5452538663326288, 0.5467321995364429, 0.5482145409081883, 0.549700901315111,
+		0.5511912916539204, 0.5526857228508706, 0.5541842058618393, 0.5556867516724088,
+		0.5571933712979462, 0.5587040757836845, 0.5602188762048033, 0.5617377836665098,
+		0.5632608093041209, 0.564787964283144, 0.5663192597993595, 0.5678547070789026,
+		0.5693943173783458, 0.5709381019847808, 0.572486072215902, 0.5740382394200894,
+		0.5755946149764913, 0.5771552102951081, 0.5787200368168754, 0.5802891060137493,
+		0.5818624293887887, 0.5834400184762408, 0.585021884841625, 0.5866080400818185,
+		0.5881984958251406, 0.5897932637314379, 0.5913923554921704, 0.5929957828304968,
+		0.5946035575013605, 0.5962156912915756, 0.5978321960199137, 0.5994530835371903,
+		0.6010783657263515, 0.6027080545025619, 0.6043421618132907, 0.6059806996384005,
+		0.6076236799902344, 0.6092711149137041, 0.6109230164863786, 0.6125793968185725,
+		0.6142402680534349, 0.6159056423670379, 0.6175755319684665, 0.6192499490999082,
+		0.620928906036742, 0.622612415087629, 0.6243004885946023, 0.6259931389331581,
+		0.6276903785123455, 0.6293922197748583, 0.6310986751971253, 0.6328097572894031,
+		0.6345254785958666, 0.6362458516947014, 0.637970889198196, 0.6397006037528346,
+		0.6414350080393891, 0.6431741147730128, 0.6449179367033329, 0.6466664866145447,
+		0.6484197773255048, 0.6501778216898253, 0.6519406325959679, 0.6537082229673385,
+		0.6554806057623822, 0.6572577939746774, 0.659039800633032, 0.6608266388015788,
+		0.6626183215798706, 0.6644148621029772, 0.6662162735415805, 0.6680225691020727,
+		0.6698337620266515, 0.6716498655934177, 0.6734708931164728, 0.6752968579460171,
+		0.6771277734684463, 0.6789636531064505, 0.6808045103191123, 0.6826503586020058,
+		0.6845012114872953, 0.6863570825438342, 0.688217985377265, 0.690083933630119,
+		0.6919549409819159, 0.6938310211492645, 0.6957121878859629, 0.6975984549830999,
+		0.6994898362691555, 0.7013863456101023, 0.7032879969095076, 0.7051948041086352,
+		0.7071067811865475, 0.7090239421602076, 0.7109463010845827, 0.7128738720527471,
+		0.7148066691959849, 0.7167447066838943, 0.718687998724491, 0.7206365595643126,
+		0.7225904034885232, 0.7245495448210174, 0.7265139979245261, 0.7284837772007218,
+		0.7304588970903234, 0.7324393720732029, 0.7344252166684908, 0.7364164454346837,
+		0.7384130729697496, 0.7404151139112358, 0.7424225829363761, 0.7444354947621984,
+		0.7464538641456323, 0.7484777058836176, 0.7505070348132126, 0.7525418658117031,
+		0.7545822137967112, 0.7566280937263048, 0.7586795205991071, 0.7607365094544071,
+		0.762799075372269, 0.7648672334736434, 0.7669409989204777, 0.7690203869158282,
+		0.7711054127039704, 0.7731960915705107, 0.7752924388424999, 0.7773944698885442,
+		0.7795022001189185, 0.7816156449856788, 0.7837348199827764, 0.7858597406461707,
+		0.7879904225539431, 0.7901268813264122, 0.7922691326262467, 0.7944171921585818,
+		0.7965710756711334, 0.7987307989543135, 0.8008963778413465, 0.8030678282083853,
+		0.805245165974627, 0.8074284071024302, 0.8096175675974316, 0.8118126635086642,
+		0.8140137109286738, 0.8162207259936375, 0.8184337248834821, 0.820652723822003,
+		0.8228777390769823, 0.8251087869603088, 0.8273458838280969, 0.8295890460808079,
+		0.8318382901633681, 0.8340936325652911, 0.8363550898207981, 0.8386226785089391,
+		0.8408964152537144, 0.8431763167241966, 0.8454623996346523, 0.8477546807446661,
+		0.8500531768592616, 0.8523579048290255, 0.8546688815502312, 0.8569861239649629,
+		0.8593096490612387, 0.8616394738731368, 0.8639756154809185, 0.8663180910111553,
+		0.8686669176368529, 0.871022112577578, 0.8733836930995842, 0.8757516765159389,
+		0.8781260801866495, 0.8805069215187917, 0.8828942179666361, 0.8852879870317771,
+		0.8876882462632604, 0.890095013257712, 0.8925083056594671, 0.8949281411607002,
+		0.8973545375015533, 0.8997875124702672, 0.9022270839033115, 0.9046732696855155,
+		0.9071260877501991, 0.909585556079304, 0.9120516927035263, 0.9145245157024483,
+		0.9170040432046711, 0.9194902933879467, 0.9219832844793128, 0.9244830347552253,
+		0.9269895625416926, 0.92950288621441, 0.9320230241988943, 0.9345499949706191,
+		0.9370838170551498, 0.93962450902828, 0.9421720895161669, 0.9447265771954693,
+		0.9472879907934827, 0.9498563490882775, 0.9524316709088368, 0.9550139751351947,
+		0.9576032806985735, 0.9601996065815236, 0.9628029718180622, 0.9654133954938133,
+		0.9680308967461471, 0.9706554947643201, 0.9732872087896164, 0.9759260581154889,
+		0.9785720620876999, 0.9812252401044634, 0.9838856116165875, 0.9865531961276168,
+		0.9892280131939752, 0.9919100824251095, 0.9945994234836328, 0.9972960560854698,
+	},
+}
+
+// reduceResolution reduces the input spans, buckets in origin schema to the spans, buckets in target schema.
+// The target schema must be smaller than the original schema.
+// Set deltaBuckets to true if the provided buckets are
+// deltas. Set it to false if the buckets contain absolute counts.
+// Set inplace to true to reuse input slices and avoid allocations (otherwise
+// new slices will be allocated for result).
+func reduceResolution[IBC InternalBucketCount](
+	originSpans []Span,
+	originBuckets []IBC,
+	originSchema,
+	targetSchema int32,
+	deltaBuckets bool,
+	inplace bool,
+) ([]Span, []IBC) {
+	var (
+		targetSpans           []Span // The spans in the target schema.
+		targetBuckets         []IBC  // The bucket counts in the target schema.
+		bucketIdx             int32  // The index of bucket in the origin schema.
+		bucketCountIdx        int    // The position of a bucket in origin bucket count slice `originBuckets`.
+		targetBucketIdx       int32  // The index of bucket in the target schema.
+		lastBucketCount       IBC    // The last visited bucket's count in the origin schema.
+		lastTargetBucketIdx   int32  // The index of the last added target bucket.
+		lastTargetBucketCount IBC
+	)
+
+	if inplace {
+		// Slice reuse is safe because when reducing the resolution,
+		// target slices don't grow faster than origin slices are being read.
+		targetSpans = originSpans[:0]
+		targetBuckets = originBuckets[:0]
+	}
+
+	for _, span := range originSpans {
+		// Determine the index of the first bucket in this span.
+		bucketIdx += span.Offset
+		for j := 0; j < int(span.Length); j++ {
+			// Determine the index of the bucket in the target schema from the index in the original schema.
+			targetBucketIdx = targetIdx(bucketIdx, originSchema, targetSchema)
+
+			switch {
+			case len(targetSpans) == 0:
+				// This is the first span in the targetSpans.
+				span := Span{
+					Offset: targetBucketIdx,
+					Length: 1,
+				}
+				targetSpans = append(targetSpans, span)
+				targetBuckets = append(targetBuckets, originBuckets[bucketCountIdx])
+				lastTargetBucketIdx = targetBucketIdx
+				lastBucketCount = originBuckets[bucketCountIdx]
+				lastTargetBucketCount = originBuckets[bucketCountIdx]
+
+			case lastTargetBucketIdx == targetBucketIdx:
+				// The current bucket has to be merged into the same target bucket as the previous bucket.
+				if deltaBuckets {
+					lastBucketCount += originBuckets[bucketCountIdx]
+					targetBuckets[len(targetBuckets)-1] += lastBucketCount
+					lastTargetBucketCount += lastBucketCount
+				} else {
+					targetBuckets[len(targetBuckets)-1] += originBuckets[bucketCountIdx]
+				}
+
+			case (lastTargetBucketIdx + 1) == targetBucketIdx:
+				// The current bucket has to go into a new target bucket,
+				// and that bucket is next to the previous target bucket,
+				// so we add it to the current target span.
+				targetSpans[len(targetSpans)-1].Length++
+				lastTargetBucketIdx++
+				if deltaBuckets {
+					lastBucketCount += originBuckets[bucketCountIdx]
+					targetBuckets = append(targetBuckets, lastBucketCount-lastTargetBucketCount)
+					lastTargetBucketCount = lastBucketCount
+				} else {
+					targetBuckets = append(targetBuckets, originBuckets[bucketCountIdx])
+				}
+
+			case (lastTargetBucketIdx + 1) < targetBucketIdx:
+				// The current bucket has to go into a new target bucket,
+				// and that bucket is separated by a gap from the previous target bucket,
+				// so we need to add a new target span.
+				span := Span{
+					Offset: targetBucketIdx - lastTargetBucketIdx - 1,
+					Length: 1,
+				}
+				targetSpans = append(targetSpans, span)
+				lastTargetBucketIdx = targetBucketIdx
+				if deltaBuckets {
+					lastBucketCount += originBuckets[bucketCountIdx]
+					targetBuckets = append(targetBuckets, lastBucketCount-lastTargetBucketCount)
+					lastTargetBucketCount = lastBucketCount
+				} else {
+					targetBuckets = append(targetBuckets, originBuckets[bucketCountIdx])
+				}
+			}
+
+			bucketIdx++
+			bucketCountIdx++
+		}
+	}
+
+	return targetSpans, targetBuckets
+}
+
+func clearIfNotNil[T any](items []T) []T {
+	if items == nil {
+		return nil
+	}
+	return items[:0]
+}
diff --git a/vendor/github.com/prometheus/prometheus/model/histogram/histogram.go b/vendor/github.com/prometheus/prometheus/model/histogram/histogram.go
new file mode 100644
index 0000000000000000000000000000000000000000..778aefe282898d150eb54331f324f79aeeb51ec2
--- /dev/null
+++ b/vendor/github.com/prometheus/prometheus/model/histogram/histogram.go
@@ -0,0 +1,633 @@
+// Copyright 2021 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package histogram
+
+import (
+	"errors"
+	"fmt"
+	"math"
+	"slices"
+	"strings"
+)
+
+// CounterResetHint contains the known information about a counter reset,
+// or alternatively that we are dealing with a gauge histogram, where counter resets do not apply.
+type CounterResetHint byte
+
+const (
+	UnknownCounterReset CounterResetHint = iota // UnknownCounterReset means we cannot say if this histogram signals a counter reset or not.
+	CounterReset                                // CounterReset means there was definitely a counter reset starting from this histogram.
+	NotCounterReset                             // NotCounterReset means there was definitely no counter reset with this histogram.
+	GaugeType                                   // GaugeType means this is a gauge histogram, where counter resets do not happen.
+)
+
+// Histogram encodes a sparse, high-resolution histogram. See the design
+// document for full details:
+// https://docs.google.com/document/d/1cLNv3aufPZb3fNfaJgdaRBZsInZKKIHo9E6HinJVbpM/edit#
+//
+// The most tricky bit is how bucket indices represent real bucket boundaries.
+// An example for schema 0 (by which each bucket is twice as wide as the
+// previous bucket):
+//
+//	Bucket boundaries →              [-2,-1)  [-1,-0.5) [-0.5,-0.25) ... [-0.001,0.001] ... (0.25,0.5] (0.5,1]  (1,2] ....
+//	                                    ↑        ↑           ↑                  ↑                ↑         ↑      ↑
+//	Zero bucket (width e.g. 0.001) →    |        |           |                  ZB               |         |      |
+//	Positive bucket indices →           |        |           |                          ...     -1         0      1    2    3
+//	Negative bucket indices →  3   2    1        0          -1       ...
+//
+// Which bucket indices are actually used is determined by the spans.
+type Histogram struct {
+	// Counter reset information.
+	CounterResetHint CounterResetHint
+	// Currently valid schema numbers are -4 <= n <= 8 for exponential buckets,
+	// They are all for base-2 bucket schemas, where 1 is a bucket boundary in
+	// each case, and then each power of two is divided into 2^n logarithmic buckets.
+	// Or in other words, each bucket boundary is the previous boundary times
+	// 2^(2^-n). Another valid schema number is -53 for custom buckets, defined by
+	// the CustomValues field.
+	Schema int32
+	// Width of the zero bucket.
+	ZeroThreshold float64
+	// Observations falling into the zero bucket.
+	ZeroCount uint64
+	// Total number of observations.
+	Count uint64
+	// Sum of observations. This is also used as the stale marker.
+	Sum float64
+	// Spans for positive and negative buckets (see Span below).
+	PositiveSpans, NegativeSpans []Span
+	// Observation counts in buckets. The first element is an absolute
+	// count. All following ones are deltas relative to the previous
+	// element.
+	PositiveBuckets, NegativeBuckets []int64
+	// Holds the custom (usually upper) bounds for bucket definitions, otherwise nil.
+	// This slice is interned, to be treated as immutable and copied by reference.
+	// These numbers should be strictly increasing. This field is only used when the
+	// schema is for custom buckets, and the ZeroThreshold, ZeroCount, NegativeSpans
+	// and NegativeBuckets fields are not used in that case.
+	CustomValues []float64
+}
+
+// A Span defines a continuous sequence of buckets.
+type Span struct {
+	// Gap to previous span (always positive), or starting index for the 1st
+	// span (which can be negative).
+	Offset int32
+	// Length of the span.
+	Length uint32
+}
+
+func (h *Histogram) UsesCustomBuckets() bool {
+	return IsCustomBucketsSchema(h.Schema)
+}
+
+// Copy returns a deep copy of the Histogram.
+func (h *Histogram) Copy() *Histogram {
+	c := Histogram{
+		CounterResetHint: h.CounterResetHint,
+		Schema:           h.Schema,
+		Count:            h.Count,
+		Sum:              h.Sum,
+	}
+
+	if h.UsesCustomBuckets() {
+		if len(h.CustomValues) != 0 {
+			c.CustomValues = make([]float64, len(h.CustomValues))
+			copy(c.CustomValues, h.CustomValues)
+		}
+	} else {
+		c.ZeroThreshold = h.ZeroThreshold
+		c.ZeroCount = h.ZeroCount
+
+		if len(h.NegativeSpans) != 0 {
+			c.NegativeSpans = make([]Span, len(h.NegativeSpans))
+			copy(c.NegativeSpans, h.NegativeSpans)
+		}
+		if len(h.NegativeBuckets) != 0 {
+			c.NegativeBuckets = make([]int64, len(h.NegativeBuckets))
+			copy(c.NegativeBuckets, h.NegativeBuckets)
+		}
+	}
+
+	if len(h.PositiveSpans) != 0 {
+		c.PositiveSpans = make([]Span, len(h.PositiveSpans))
+		copy(c.PositiveSpans, h.PositiveSpans)
+	}
+	if len(h.PositiveBuckets) != 0 {
+		c.PositiveBuckets = make([]int64, len(h.PositiveBuckets))
+		copy(c.PositiveBuckets, h.PositiveBuckets)
+	}
+
+	return &c
+}
+
+// CopyTo makes a deep copy into the given Histogram object.
+// The destination object has to be a non-nil pointer.
+func (h *Histogram) CopyTo(to *Histogram) {
+	to.CounterResetHint = h.CounterResetHint
+	to.Schema = h.Schema
+	to.Count = h.Count
+	to.Sum = h.Sum
+
+	if h.UsesCustomBuckets() {
+		to.ZeroThreshold = 0
+		to.ZeroCount = 0
+
+		to.NegativeSpans = clearIfNotNil(to.NegativeSpans)
+		to.NegativeBuckets = clearIfNotNil(to.NegativeBuckets)
+
+		to.CustomValues = resize(to.CustomValues, len(h.CustomValues))
+		copy(to.CustomValues, h.CustomValues)
+	} else {
+		to.ZeroThreshold = h.ZeroThreshold
+		to.ZeroCount = h.ZeroCount
+
+		to.NegativeSpans = resize(to.NegativeSpans, len(h.NegativeSpans))
+		copy(to.NegativeSpans, h.NegativeSpans)
+
+		to.NegativeBuckets = resize(to.NegativeBuckets, len(h.NegativeBuckets))
+		copy(to.NegativeBuckets, h.NegativeBuckets)
+
+		to.CustomValues = clearIfNotNil(to.CustomValues)
+	}
+
+	to.PositiveSpans = resize(to.PositiveSpans, len(h.PositiveSpans))
+	copy(to.PositiveSpans, h.PositiveSpans)
+
+	to.PositiveBuckets = resize(to.PositiveBuckets, len(h.PositiveBuckets))
+	copy(to.PositiveBuckets, h.PositiveBuckets)
+}
+
+// String returns a string representation of the Histogram.
+func (h *Histogram) String() string {
+	var sb strings.Builder
+	fmt.Fprintf(&sb, "{count:%d, sum:%g", h.Count, h.Sum)
+
+	var nBuckets []Bucket[uint64]
+	for it := h.NegativeBucketIterator(); it.Next(); {
+		bucket := it.At()
+		if bucket.Count != 0 {
+			nBuckets = append(nBuckets, it.At())
+		}
+	}
+	for i := len(nBuckets) - 1; i >= 0; i-- {
+		fmt.Fprintf(&sb, ", %s", nBuckets[i].String())
+	}
+
+	if h.ZeroCount != 0 {
+		fmt.Fprintf(&sb, ", %s", h.ZeroBucket().String())
+	}
+
+	for it := h.PositiveBucketIterator(); it.Next(); {
+		bucket := it.At()
+		if bucket.Count != 0 {
+			fmt.Fprintf(&sb, ", %s", bucket.String())
+		}
+	}
+
+	sb.WriteRune('}')
+	return sb.String()
+}
+
+// ZeroBucket returns the zero bucket. This method panics if the schema is for custom buckets.
+func (h *Histogram) ZeroBucket() Bucket[uint64] {
+	if h.UsesCustomBuckets() {
+		panic("histograms with custom buckets have no zero bucket")
+	}
+	return Bucket[uint64]{
+		Lower:          -h.ZeroThreshold,
+		Upper:          h.ZeroThreshold,
+		LowerInclusive: true,
+		UpperInclusive: true,
+		Count:          h.ZeroCount,
+	}
+}
+
+// PositiveBucketIterator returns a BucketIterator to iterate over all positive
+// buckets in ascending order (starting next to the zero bucket and going up).
+func (h *Histogram) PositiveBucketIterator() BucketIterator[uint64] {
+	it := newRegularBucketIterator(h.PositiveSpans, h.PositiveBuckets, h.Schema, true, h.CustomValues)
+	return &it
+}
+
+// NegativeBucketIterator returns a BucketIterator to iterate over all negative
+// buckets in descending order (starting next to the zero bucket and going down).
+func (h *Histogram) NegativeBucketIterator() BucketIterator[uint64] {
+	it := newRegularBucketIterator(h.NegativeSpans, h.NegativeBuckets, h.Schema, false, nil)
+	return &it
+}
+
+// CumulativeBucketIterator returns a BucketIterator to iterate over a
+// cumulative view of the buckets. This method currently only supports
+// Histograms without negative buckets and panics if the Histogram has negative
+// buckets. It is currently only used for testing.
+func (h *Histogram) CumulativeBucketIterator() BucketIterator[uint64] {
+	if len(h.NegativeBuckets) > 0 {
+		panic("CumulativeBucketIterator called on Histogram with negative buckets")
+	}
+	return &cumulativeBucketIterator{h: h, posSpansIdx: -1}
+}
+
+// Equals returns true if the given histogram matches exactly.
+// Exact match is when there are no new buckets (even empty) and no missing buckets,
+// and all the bucket values match. Spans can have different empty length spans in between,
+// but they must represent the same bucket layout to match.
+// Sum is compared based on its bit pattern because this method
+// is about data equality rather than mathematical equality.
+// We ignore fields that are not used based on the exponential / custom buckets schema,
+// but check fields where differences may cause unintended behaviour even if they are not
+// supposed to be used according to the schema.
+func (h *Histogram) Equals(h2 *Histogram) bool {
+	if h2 == nil {
+		return false
+	}
+
+	if h.Schema != h2.Schema || h.Count != h2.Count ||
+		math.Float64bits(h.Sum) != math.Float64bits(h2.Sum) {
+		return false
+	}
+
+	if h.UsesCustomBuckets() {
+		if !FloatBucketsMatch(h.CustomValues, h2.CustomValues) {
+			return false
+		}
+	}
+
+	if h.ZeroThreshold != h2.ZeroThreshold || h.ZeroCount != h2.ZeroCount {
+		return false
+	}
+
+	if !spansMatch(h.NegativeSpans, h2.NegativeSpans) {
+		return false
+	}
+	if !slices.Equal(h.NegativeBuckets, h2.NegativeBuckets) {
+		return false
+	}
+
+	if !spansMatch(h.PositiveSpans, h2.PositiveSpans) {
+		return false
+	}
+	if !slices.Equal(h.PositiveBuckets, h2.PositiveBuckets) {
+		return false
+	}
+
+	return true
+}
+
+// spansMatch returns true if both spans represent the same bucket layout
+// after combining zero length spans with the next non-zero length span.
+func spansMatch(s1, s2 []Span) bool {
+	if len(s1) == 0 && len(s2) == 0 {
+		return true
+	}
+
+	s1idx, s2idx := 0, 0
+	for {
+		if s1idx >= len(s1) {
+			return allEmptySpans(s2[s2idx:])
+		}
+		if s2idx >= len(s2) {
+			return allEmptySpans(s1[s1idx:])
+		}
+
+		currS1, currS2 := s1[s1idx], s2[s2idx]
+		s1idx++
+		s2idx++
+		if currS1.Length == 0 {
+			// This span is zero length, so we add consecutive such spans
+			// until we find a non-zero span.
+			for ; s1idx < len(s1) && s1[s1idx].Length == 0; s1idx++ {
+				currS1.Offset += s1[s1idx].Offset
+			}
+			if s1idx < len(s1) {
+				currS1.Offset += s1[s1idx].Offset
+				currS1.Length = s1[s1idx].Length
+				s1idx++
+			}
+		}
+		if currS2.Length == 0 {
+			// This span is zero length, so we add consecutive such spans
+			// until we find a non-zero span.
+			for ; s2idx < len(s2) && s2[s2idx].Length == 0; s2idx++ {
+				currS2.Offset += s2[s2idx].Offset
+			}
+			if s2idx < len(s2) {
+				currS2.Offset += s2[s2idx].Offset
+				currS2.Length = s2[s2idx].Length
+				s2idx++
+			}
+		}
+
+		if currS1.Length == 0 && currS2.Length == 0 {
+			// The last spans of both set are zero length. Previous spans match.
+			return true
+		}
+
+		if currS1.Offset != currS2.Offset || currS1.Length != currS2.Length {
+			return false
+		}
+	}
+}
+
+func allEmptySpans(s []Span) bool {
+	for _, ss := range s {
+		if ss.Length > 0 {
+			return false
+		}
+	}
+	return true
+}
+
+// Compact works like FloatHistogram.Compact. See there for detailed
+// explanations.
+func (h *Histogram) Compact(maxEmptyBuckets int) *Histogram {
+	h.PositiveBuckets, h.PositiveSpans = compactBuckets(
+		h.PositiveBuckets, h.PositiveSpans, maxEmptyBuckets, true,
+	)
+	h.NegativeBuckets, h.NegativeSpans = compactBuckets(
+		h.NegativeBuckets, h.NegativeSpans, maxEmptyBuckets, true,
+	)
+	return h
+}
+
+// ToFloat returns a FloatHistogram representation of the Histogram. It is a deep
+// copy (e.g. spans are not shared). The function accepts a FloatHistogram as an
+// argument whose memory will be reused and overwritten if provided. If this
+// argument is nil, a new FloatHistogram will be allocated.
+func (h *Histogram) ToFloat(fh *FloatHistogram) *FloatHistogram {
+	if fh == nil {
+		fh = &FloatHistogram{}
+	}
+	fh.CounterResetHint = h.CounterResetHint
+	fh.Schema = h.Schema
+	fh.Count = float64(h.Count)
+	fh.Sum = h.Sum
+
+	if h.UsesCustomBuckets() {
+		fh.ZeroThreshold = 0
+		fh.ZeroCount = 0
+		fh.NegativeSpans = clearIfNotNil(fh.NegativeSpans)
+		fh.NegativeBuckets = clearIfNotNil(fh.NegativeBuckets)
+
+		fh.CustomValues = resize(fh.CustomValues, len(h.CustomValues))
+		copy(fh.CustomValues, h.CustomValues)
+	} else {
+		fh.ZeroThreshold = h.ZeroThreshold
+		fh.ZeroCount = float64(h.ZeroCount)
+
+		fh.NegativeSpans = resize(fh.NegativeSpans, len(h.NegativeSpans))
+		copy(fh.NegativeSpans, h.NegativeSpans)
+
+		fh.NegativeBuckets = resize(fh.NegativeBuckets, len(h.NegativeBuckets))
+		var currentNegative float64
+		for i, b := range h.NegativeBuckets {
+			currentNegative += float64(b)
+			fh.NegativeBuckets[i] = currentNegative
+		}
+		fh.CustomValues = clearIfNotNil(fh.CustomValues)
+	}
+
+	fh.PositiveSpans = resize(fh.PositiveSpans, len(h.PositiveSpans))
+	copy(fh.PositiveSpans, h.PositiveSpans)
+
+	fh.PositiveBuckets = resize(fh.PositiveBuckets, len(h.PositiveBuckets))
+	var currentPositive float64
+	for i, b := range h.PositiveBuckets {
+		currentPositive += float64(b)
+		fh.PositiveBuckets[i] = currentPositive
+	}
+
+	return fh
+}
+
+func resize[T any](items []T, n int) []T {
+	if cap(items) < n {
+		return make([]T, n)
+	}
+	return items[:n]
+}
+
+// Validate validates consistency between span and bucket slices. Also, buckets are checked
+// against negative values. We check to make sure there are no unexpected fields or field values
+// based on the exponential / custom buckets schema.
+// For histograms that have not observed any NaN values (based on IsNaN(h.Sum) check), a
+// strict h.Count = nCount + pCount + h.ZeroCount check is performed.
+// Otherwise, only a lower bound check will be done (h.Count >= nCount + pCount + h.ZeroCount),
+// because NaN observations do not increment the values of buckets (but they do increment
+// the total h.Count).
+func (h *Histogram) Validate() error {
+	var nCount, pCount uint64
+	if h.UsesCustomBuckets() {
+		if err := checkHistogramCustomBounds(h.CustomValues, h.PositiveSpans, len(h.PositiveBuckets)); err != nil {
+			return fmt.Errorf("custom buckets: %w", err)
+		}
+		if h.ZeroCount != 0 {
+			return errors.New("custom buckets: must have zero count of 0")
+		}
+		if h.ZeroThreshold != 0 {
+			return errors.New("custom buckets: must have zero threshold of 0")
+		}
+		if len(h.NegativeSpans) > 0 {
+			return errors.New("custom buckets: must not have negative spans")
+		}
+		if len(h.NegativeBuckets) > 0 {
+			return errors.New("custom buckets: must not have negative buckets")
+		}
+	} else {
+		if err := checkHistogramSpans(h.PositiveSpans, len(h.PositiveBuckets)); err != nil {
+			return fmt.Errorf("positive side: %w", err)
+		}
+		if err := checkHistogramSpans(h.NegativeSpans, len(h.NegativeBuckets)); err != nil {
+			return fmt.Errorf("negative side: %w", err)
+		}
+		err := checkHistogramBuckets(h.NegativeBuckets, &nCount, true)
+		if err != nil {
+			return fmt.Errorf("negative side: %w", err)
+		}
+		if h.CustomValues != nil {
+			return errors.New("histogram with exponential schema must not have custom bounds")
+		}
+	}
+	err := checkHistogramBuckets(h.PositiveBuckets, &pCount, true)
+	if err != nil {
+		return fmt.Errorf("positive side: %w", err)
+	}
+
+	sumOfBuckets := nCount + pCount + h.ZeroCount
+	if math.IsNaN(h.Sum) {
+		if sumOfBuckets > h.Count {
+			return fmt.Errorf("%d observations found in buckets, but the Count field is %d: %w", sumOfBuckets, h.Count, ErrHistogramCountNotBigEnough)
+		}
+	} else {
+		if sumOfBuckets != h.Count {
+			return fmt.Errorf("%d observations found in buckets, but the Count field is %d: %w", sumOfBuckets, h.Count, ErrHistogramCountMismatch)
+		}
+	}
+
+	return nil
+}
+
+type regularBucketIterator struct {
+	baseBucketIterator[uint64, int64]
+}
+
+func newRegularBucketIterator(spans []Span, buckets []int64, schema int32, positive bool, customValues []float64) regularBucketIterator {
+	i := baseBucketIterator[uint64, int64]{
+		schema:       schema,
+		spans:        spans,
+		buckets:      buckets,
+		positive:     positive,
+		customValues: customValues,
+	}
+	return regularBucketIterator{i}
+}
+
+func (r *regularBucketIterator) Next() bool {
+	if r.spansIdx >= len(r.spans) {
+		return false
+	}
+	span := r.spans[r.spansIdx]
+	// Seed currIdx for the first bucket.
+	if r.bucketsIdx == 0 {
+		r.currIdx = span.Offset
+	} else {
+		r.currIdx++
+	}
+	for r.idxInSpan >= span.Length {
+		// We have exhausted the current span and have to find a new
+		// one. We'll even handle pathologic spans of length 0.
+		r.idxInSpan = 0
+		r.spansIdx++
+		if r.spansIdx >= len(r.spans) {
+			return false
+		}
+		span = r.spans[r.spansIdx]
+		r.currIdx += span.Offset
+	}
+
+	r.currCount += r.buckets[r.bucketsIdx]
+	r.idxInSpan++
+	r.bucketsIdx++
+	return true
+}
+
+type cumulativeBucketIterator struct {
+	h *Histogram
+
+	posSpansIdx   int    // Index in h.PositiveSpans we are in. -1 means 0 bucket.
+	posBucketsIdx int    // Index in h.PositiveBuckets.
+	idxInSpan     uint32 // Index in the current span. 0 <= idxInSpan < span.Length.
+
+	initialized         bool
+	currIdx             int32   // The actual bucket index after decoding from spans.
+	currUpper           float64 // The upper boundary of the current bucket.
+	currCount           int64   // Current non-cumulative count for the current bucket. Does not apply for empty bucket.
+	currCumulativeCount uint64  // Current "cumulative" count for the current bucket.
+
+	// Between 2 spans there could be some empty buckets which
+	// still needs to be counted for cumulative buckets.
+	// When we hit the end of a span, we use this to iterate
+	// through the empty buckets.
+	emptyBucketCount int32
+}
+
+func (c *cumulativeBucketIterator) Next() bool {
+	if c.posSpansIdx == -1 {
+		// Zero bucket.
+		c.posSpansIdx++
+		if c.h.ZeroCount == 0 {
+			return c.Next()
+		}
+
+		c.currUpper = c.h.ZeroThreshold
+		c.currCount = int64(c.h.ZeroCount)
+		c.currCumulativeCount = uint64(c.currCount)
+		return true
+	}
+
+	if c.posSpansIdx >= len(c.h.PositiveSpans) {
+		return false
+	}
+
+	if c.emptyBucketCount > 0 {
+		// We are traversing through empty buckets at the moment.
+		c.currUpper = getBound(c.currIdx, c.h.Schema, c.h.CustomValues)
+		c.currIdx++
+		c.emptyBucketCount--
+		return true
+	}
+
+	span := c.h.PositiveSpans[c.posSpansIdx]
+	if c.posSpansIdx == 0 && !c.initialized {
+		// Initializing.
+		c.currIdx = span.Offset
+		// The first bucket is an absolute value and not a delta with Zero bucket.
+		c.currCount = 0
+		c.initialized = true
+	}
+
+	c.currCount += c.h.PositiveBuckets[c.posBucketsIdx]
+	c.currCumulativeCount += uint64(c.currCount)
+	c.currUpper = getBound(c.currIdx, c.h.Schema, c.h.CustomValues)
+
+	c.posBucketsIdx++
+	c.idxInSpan++
+	c.currIdx++
+	if c.idxInSpan >= span.Length {
+		// Move to the next span. This one is done.
+		c.posSpansIdx++
+		c.idxInSpan = 0
+		if c.posSpansIdx < len(c.h.PositiveSpans) {
+			c.emptyBucketCount = c.h.PositiveSpans[c.posSpansIdx].Offset
+		}
+	}
+
+	return true
+}
+
+func (c *cumulativeBucketIterator) At() Bucket[uint64] {
+	return Bucket[uint64]{
+		Upper:          c.currUpper,
+		Lower:          math.Inf(-1),
+		UpperInclusive: true,
+		LowerInclusive: true,
+		Count:          c.currCumulativeCount,
+		Index:          c.currIdx - 1,
+	}
+}
+
+// ReduceResolution reduces the histogram's spans, buckets into target schema.
+// The target schema must be smaller than the current histogram's schema.
+// This will panic if the histogram has custom buckets or if the target schema is
+// a custom buckets schema.
+func (h *Histogram) ReduceResolution(targetSchema int32) *Histogram {
+	if h.UsesCustomBuckets() {
+		panic("cannot reduce resolution when there are custom buckets")
+	}
+	if IsCustomBucketsSchema(targetSchema) {
+		panic("cannot reduce resolution to custom buckets schema")
+	}
+	if targetSchema >= h.Schema {
+		panic(fmt.Errorf("cannot reduce resolution from schema %d to %d", h.Schema, targetSchema))
+	}
+
+	h.PositiveSpans, h.PositiveBuckets = reduceResolution(
+		h.PositiveSpans, h.PositiveBuckets, h.Schema, targetSchema, true, true,
+	)
+	h.NegativeSpans, h.NegativeBuckets = reduceResolution(
+		h.NegativeSpans, h.NegativeBuckets, h.Schema, targetSchema, true, true,
+	)
+	h.Schema = targetSchema
+	return h
+}
diff --git a/vendor/github.com/prometheus/prometheus/model/histogram/test_utils.go b/vendor/github.com/prometheus/prometheus/model/histogram/test_utils.go
new file mode 100644
index 0000000000000000000000000000000000000000..e6b33863bd4c95f34f1bd1f0f8df1c94e914dbe2
--- /dev/null
+++ b/vendor/github.com/prometheus/prometheus/model/histogram/test_utils.go
@@ -0,0 +1,52 @@
+// Copyright 2023 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package histogram
+
+// GenerateBigTestHistograms generates a slice of histograms with given number of buckets each.
+func GenerateBigTestHistograms(numHistograms, numBuckets int) []*Histogram {
+	numSpans := numBuckets / 10
+	bucketsPerSide := numBuckets / 2
+	spanLength := uint32(bucketsPerSide / numSpans)
+	// Given all bucket deltas are 1, sum bucketsPerSide + 1.
+	observationCount := uint64(bucketsPerSide) * (1 + uint64(bucketsPerSide))
+
+	var histograms []*Histogram
+	for i := 0; i < numHistograms; i++ {
+		h := &Histogram{
+			Count:           uint64(i) + observationCount,
+			ZeroCount:       uint64(i),
+			ZeroThreshold:   1e-128,
+			Sum:             18.4 * float64(i+1),
+			Schema:          2,
+			NegativeSpans:   make([]Span, numSpans),
+			PositiveSpans:   make([]Span, numSpans),
+			NegativeBuckets: make([]int64, bucketsPerSide),
+			PositiveBuckets: make([]int64, bucketsPerSide),
+		}
+
+		for j := 0; j < numSpans; j++ {
+			s := Span{Offset: 1, Length: spanLength}
+			h.NegativeSpans[j] = s
+			h.PositiveSpans[j] = s
+		}
+
+		for j := 0; j < bucketsPerSide; j++ {
+			h.NegativeBuckets[j] = 1
+			h.PositiveBuckets[j] = 1
+		}
+
+		histograms = append(histograms, h)
+	}
+	return histograms
+}
diff --git a/vendor/github.com/prometheus/prometheus/pkg/labels/labels.go b/vendor/github.com/prometheus/prometheus/model/labels/labels.go
similarity index 51%
rename from vendor/github.com/prometheus/prometheus/pkg/labels/labels.go
rename to vendor/github.com/prometheus/prometheus/model/labels/labels.go
index 5c11cc2eeef019d029a92c2a15f74146394a7491..0747ab90d92fc42c6ee8d6878bb4ee45f20753c9 100644
--- a/vendor/github.com/prometheus/prometheus/pkg/labels/labels.go
+++ b/vendor/github.com/prometheus/prometheus/model/labels/labels.go
@@ -11,34 +11,19 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
+//go:build !stringlabels && !dedupelabels
+
 package labels
 
 import (
 	"bytes"
-	"encoding/json"
-	"sort"
-	"strconv"
+	"slices"
+	"strings"
+	"unsafe"
 
 	"github.com/cespare/xxhash/v2"
 )
 
-// Well-known label names used by Prometheus components.
-const (
-	MetricName   = "__name__"
-	AlertName    = "alertname"
-	BucketLabel  = "le"
-	InstanceName = "instance"
-
-	labelSep = '\xfe'
-)
-
-var seps = []byte{'\xff'}
-
-// Label is a key/value pair of strings.
-type Label struct {
-	Name, Value string
-}
-
 // Labels is a sorted set of labels. Order has to be guaranteed upon
 // instantiation.
 type Labels []Label
@@ -47,23 +32,6 @@ func (ls Labels) Len() int           { return len(ls) }
 func (ls Labels) Swap(i, j int)      { ls[i], ls[j] = ls[j], ls[i] }
 func (ls Labels) Less(i, j int) bool { return ls[i].Name < ls[j].Name }
 
-func (ls Labels) String() string {
-	var b bytes.Buffer
-
-	b.WriteByte('{')
-	for i, l := range ls {
-		if i > 0 {
-			b.WriteByte(',')
-			b.WriteByte(' ')
-		}
-		b.WriteString(l.Name)
-		b.WriteByte('=')
-		b.WriteString(strconv.Quote(l.Value))
-	}
-	b.WriteByte('}')
-	return b.String()
-}
-
 // Bytes returns ls as a byte slice.
 // It uses an byte invalid character as a separator and so should not be used for printing.
 func (ls Labels) Bytes(buf []byte) []byte {
@@ -71,55 +39,21 @@ func (ls Labels) Bytes(buf []byte) []byte {
 	b.WriteByte(labelSep)
 	for i, l := range ls {
 		if i > 0 {
-			b.WriteByte(seps[0])
+			b.WriteByte(sep)
 		}
 		b.WriteString(l.Name)
-		b.WriteByte(seps[0])
+		b.WriteByte(sep)
 		b.WriteString(l.Value)
 	}
 	return b.Bytes()
 }
 
-// MarshalJSON implements json.Marshaler.
-func (ls Labels) MarshalJSON() ([]byte, error) {
-	return json.Marshal(ls.Map())
-}
-
-// UnmarshalJSON implements json.Unmarshaler.
-func (ls *Labels) UnmarshalJSON(b []byte) error {
-	var m map[string]string
-
-	if err := json.Unmarshal(b, &m); err != nil {
-		return err
-	}
-
-	*ls = FromMap(m)
-	return nil
-}
-
-// MarshalYAML implements yaml.Marshaler.
-func (ls Labels) MarshalYAML() (interface{}, error) {
-	return ls.Map(), nil
-}
-
-// UnmarshalYAML implements yaml.Unmarshaler.
-func (ls *Labels) UnmarshalYAML(unmarshal func(interface{}) error) error {
-	var m map[string]string
-
-	if err := unmarshal(&m); err != nil {
-		return err
-	}
-
-	*ls = FromMap(m)
-	return nil
-}
-
 // MatchLabels returns a subset of Labels that matches/does not match with the provided label names based on the 'on' boolean.
 // If on is set to true, it returns the subset of labels that match with the provided label names and its inverse when 'on' is set to false.
 func (ls Labels) MatchLabels(on bool, names ...string) Labels {
 	matchedLabels := Labels{}
 
-	nameSet := map[string]struct{}{}
+	nameSet := make(map[string]struct{}, len(names))
 	for _, n := range names {
 		nameSet[n] = struct{}{}
 	}
@@ -134,6 +68,7 @@ func (ls Labels) MatchLabels(on bool, names ...string) Labels {
 }
 
 // Hash returns a hash value for the label set.
+// Note: the result is not guaranteed to be consistent across different runs of Prometheus.
 func (ls Labels) Hash() uint64 {
 	// Use xxhash.Sum64(b) for fast path as it's faster.
 	b := make([]byte, 0, 1024)
@@ -152,9 +87,9 @@ func (ls Labels) Hash() uint64 {
 		}
 
 		b = append(b, v.Name...)
-		b = append(b, seps[0])
+		b = append(b, sep)
 		b = append(b, v.Value...)
-		b = append(b, seps[0])
+		b = append(b, sep)
 	}
 	return xxhash.Sum64(b)
 }
@@ -165,15 +100,16 @@ func (ls Labels) HashForLabels(b []byte, names ...string) (uint64, []byte) {
 	b = b[:0]
 	i, j := 0, 0
 	for i < len(ls) && j < len(names) {
-		if names[j] < ls[i].Name {
+		switch {
+		case names[j] < ls[i].Name:
 			j++
-		} else if ls[i].Name < names[j] {
+		case ls[i].Name < names[j]:
 			i++
-		} else {
+		default:
 			b = append(b, ls[i].Name...)
-			b = append(b, seps[0])
+			b = append(b, sep)
 			b = append(b, ls[i].Value...)
-			b = append(b, seps[0])
+			b = append(b, sep)
 			i++
 			j++
 		}
@@ -195,49 +131,60 @@ func (ls Labels) HashWithoutLabels(b []byte, names ...string) (uint64, []byte) {
 			continue
 		}
 		b = append(b, ls[i].Name...)
-		b = append(b, seps[0])
+		b = append(b, sep)
 		b = append(b, ls[i].Value...)
-		b = append(b, seps[0])
+		b = append(b, sep)
 	}
 	return xxhash.Sum64(b), b
 }
 
-// WithLabels returns a new labels.Labels from ls that only contains labels matching names.
+// BytesWithLabels is just as Bytes(), but only for labels matching names.
 // 'names' have to be sorted in ascending order.
-func (ls Labels) WithLabels(names ...string) Labels {
-	ret := make([]Label, 0, len(ls))
-
+func (ls Labels) BytesWithLabels(buf []byte, names ...string) []byte {
+	b := bytes.NewBuffer(buf[:0])
+	b.WriteByte(labelSep)
 	i, j := 0, 0
 	for i < len(ls) && j < len(names) {
-		if names[j] < ls[i].Name {
+		switch {
+		case names[j] < ls[i].Name:
 			j++
-		} else if ls[i].Name < names[j] {
+		case ls[i].Name < names[j]:
 			i++
-		} else {
-			ret = append(ret, ls[i])
+		default:
+			if b.Len() > 1 {
+				b.WriteByte(sep)
+			}
+			b.WriteString(ls[i].Name)
+			b.WriteByte(sep)
+			b.WriteString(ls[i].Value)
 			i++
 			j++
 		}
 	}
-	return ret
+	return b.Bytes()
 }
 
-// WithoutLabels returns a new labels.Labels from ls that contains labels not matching names.
+// BytesWithoutLabels is just as Bytes(), but only for labels not matching names.
 // 'names' have to be sorted in ascending order.
-func (ls Labels) WithoutLabels(names ...string) Labels {
-	ret := make([]Label, 0, len(ls))
-
+func (ls Labels) BytesWithoutLabels(buf []byte, names ...string) []byte {
+	b := bytes.NewBuffer(buf[:0])
+	b.WriteByte(labelSep)
 	j := 0
 	for i := range ls {
 		for j < len(names) && names[j] < ls[i].Name {
 			j++
 		}
-		if ls[i].Name == MetricName || (j < len(names) && ls[i].Name == names[j]) {
+		if j < len(names) && ls[i].Name == names[j] {
 			continue
 		}
-		ret = append(ret, ls[i])
+		if b.Len() > 1 {
+			b.WriteByte(sep)
+		}
+		b.WriteString(ls[i].Name)
+		b.WriteByte(sep)
+		b.WriteString(ls[i].Value)
 	}
-	return ret
+	return b.Bytes()
 }
 
 // Copy returns a copy of the labels.
@@ -307,54 +254,39 @@ func Equal(ls, o Labels) bool {
 		return false
 	}
 	for i, l := range ls {
-		if l.Name != o[i].Name || l.Value != o[i].Value {
+		if l != o[i] {
 			return false
 		}
 	}
 	return true
 }
 
-// Map returns a string map of the labels.
-func (ls Labels) Map() map[string]string {
-	m := make(map[string]string, len(ls))
-	for _, l := range ls {
-		m[l.Name] = l.Value
-	}
-	return m
+// EmptyLabels returns n empty Labels value, for convenience.
+func EmptyLabels() Labels {
+	return Labels{}
 }
 
 // New returns a sorted Labels from the given labels.
 // The caller has to guarantee that all label names are unique.
 func New(ls ...Label) Labels {
 	set := make(Labels, 0, len(ls))
-	for _, l := range ls {
-		set = append(set, l)
-	}
-	sort.Sort(set)
+	set = append(set, ls...)
+	slices.SortFunc(set, func(a, b Label) int { return strings.Compare(a.Name, b.Name) })
 
 	return set
 }
 
-// FromMap returns new sorted Labels from the given map.
-func FromMap(m map[string]string) Labels {
-	l := make([]Label, 0, len(m))
-	for k, v := range m {
-		l = append(l, Label{Name: k, Value: v})
-	}
-	return New(l...)
-}
-
 // FromStrings creates new labels from pairs of strings.
 func FromStrings(ss ...string) Labels {
 	if len(ss)%2 != 0 {
 		panic("invalid number of strings")
 	}
-	var res Labels
+	res := make(Labels, 0, len(ss)/2)
 	for i := 0; i < len(ss); i += 2 {
 		res = append(res, Label{Name: ss[i], Value: ss[i+1]})
 	}
 
-	sort.Sort(res)
+	slices.SortFunc(res, func(a, b Label) int { return strings.Compare(a.Name, b.Name) })
 	return res
 }
 
@@ -384,91 +316,181 @@ func Compare(a, b Labels) int {
 	return len(a) - len(b)
 }
 
-// Builder allows modifying Labels.
-type Builder struct {
-	base Labels
-	del  []string
-	add  []Label
+// CopyFrom copies labels from b on top of whatever was in ls previously,
+// reusing memory or expanding if needed.
+func (ls *Labels) CopyFrom(b Labels) {
+	(*ls) = append((*ls)[:0], b...)
+}
+
+// IsEmpty returns true if ls represents an empty set of labels.
+func (ls Labels) IsEmpty() bool {
+	return len(ls) == 0
 }
 
-// NewBuilder returns a new LabelsBuilder.
-func NewBuilder(base Labels) *Builder {
-	b := &Builder{
-		del: make([]string, 0, 5),
-		add: make([]Label, 0, 5),
+// Range calls f on each label.
+func (ls Labels) Range(f func(l Label)) {
+	for _, l := range ls {
+		f(l)
 	}
-	b.Reset(base)
-	return b
 }
 
-// Reset clears all current state for the builder.
-func (b *Builder) Reset(base Labels) {
-	b.base = base
-	b.del = b.del[:0]
-	b.add = b.add[:0]
-	for _, l := range b.base {
-		if l.Value == "" {
-			b.del = append(b.del, l.Name)
+// Validate calls f on each label. If f returns a non-nil error, then it returns that error cancelling the iteration.
+func (ls Labels) Validate(f func(l Label) error) error {
+	for _, l := range ls {
+		if err := f(l); err != nil {
+			return err
 		}
 	}
+	return nil
 }
 
-// Del deletes the label of the given name.
-func (b *Builder) Del(ns ...string) *Builder {
-	for _, n := range ns {
-		for i, a := range b.add {
-			if a.Name == n {
-				b.add = append(b.add[:i], b.add[i+1:]...)
+// DropMetricName returns Labels with "__name__" removed.
+func (ls Labels) DropMetricName() Labels {
+	for i, l := range ls {
+		if l.Name == MetricName {
+			if i == 0 { // Make common case fast with no allocations.
+				return ls[1:]
 			}
+			// Avoid modifying original Labels - use [:i:i] so that left slice would not
+			// have any spare capacity and append would have to allocate a new slice for the result.
+			return append(ls[:i:i], ls[i+1:]...)
 		}
-		b.del = append(b.del, n)
 	}
-	return b
+	return ls
 }
 
-// Set the name/value pair as a label.
-func (b *Builder) Set(n, v string) *Builder {
-	if v == "" {
-		// Empty labels are the same as missing labels.
-		return b.Del(n)
+// InternStrings calls intern on every string value inside ls, replacing them with what it returns.
+func (ls *Labels) InternStrings(intern func(string) string) {
+	for i, l := range *ls {
+		(*ls)[i].Name = intern(l.Name)
+		(*ls)[i].Value = intern(l.Value)
 	}
-	for i, a := range b.add {
-		if a.Name == n {
-			b.add[i].Value = v
-			return b
-		}
+}
+
+// ReleaseStrings calls release on every string value inside ls.
+func (ls Labels) ReleaseStrings(release func(string)) {
+	for _, l := range ls {
+		release(l.Name)
+		release(l.Value)
 	}
-	b.add = append(b.add, Label{Name: n, Value: v})
+}
+
+// Builder allows modifying Labels.
+type Builder struct {
+	base Labels
+	del  []string
+	add  []Label
+}
 
-	return b
+// Reset clears all current state for the builder.
+func (b *Builder) Reset(base Labels) {
+	b.base = base
+	b.del = b.del[:0]
+	b.add = b.add[:0]
+	b.base.Range(func(l Label) {
+		if l.Value == "" {
+			b.del = append(b.del, l.Name)
+		}
+	})
 }
 
-// Labels returns the labels from the builder. If no modifications
-// were made, the original labels are returned.
+// Labels returns the labels from the builder.
+// If no modifications were made, the original labels are returned.
 func (b *Builder) Labels() Labels {
 	if len(b.del) == 0 && len(b.add) == 0 {
 		return b.base
 	}
 
-	// In the general case, labels are removed, modified or moved
-	// rather than added.
-	res := make(Labels, 0, len(b.base))
-Outer:
+	expectedSize := len(b.base) + len(b.add) - len(b.del)
+	if expectedSize < 1 {
+		expectedSize = 1
+	}
+	res := make(Labels, 0, expectedSize)
 	for _, l := range b.base {
-		for _, n := range b.del {
-			if l.Name == n {
-				continue Outer
-			}
-		}
-		for _, la := range b.add {
-			if l.Name == la.Name {
-				continue Outer
-			}
+		if slices.Contains(b.del, l.Name) || contains(b.add, l.Name) {
+			continue
 		}
 		res = append(res, l)
 	}
-	res = append(res, b.add...)
-	sort.Sort(res)
-
+	if len(b.add) > 0 { // Base is already in order, so we only need to sort if we add to it.
+		res = append(res, b.add...)
+		slices.SortFunc(res, func(a, b Label) int { return strings.Compare(a.Name, b.Name) })
+	}
 	return res
 }
+
+// ScratchBuilder allows efficient construction of a Labels from scratch.
+type ScratchBuilder struct {
+	add Labels
+}
+
+// SymbolTable is no-op, just for api parity with dedupelabels.
+type SymbolTable struct{}
+
+func NewSymbolTable() *SymbolTable { return nil }
+
+func (t *SymbolTable) Len() int { return 0 }
+
+// NewScratchBuilder creates a ScratchBuilder initialized for Labels with n entries.
+func NewScratchBuilder(n int) ScratchBuilder {
+	return ScratchBuilder{add: make([]Label, 0, n)}
+}
+
+// NewBuilderWithSymbolTable creates a Builder, for api parity with dedupelabels.
+func NewBuilderWithSymbolTable(_ *SymbolTable) *Builder {
+	return NewBuilder(EmptyLabels())
+}
+
+// NewScratchBuilderWithSymbolTable creates a ScratchBuilder, for api parity with dedupelabels.
+func NewScratchBuilderWithSymbolTable(_ *SymbolTable, n int) ScratchBuilder {
+	return NewScratchBuilder(n)
+}
+
+func (b *ScratchBuilder) SetSymbolTable(_ *SymbolTable) {
+	// no-op
+}
+
+func (b *ScratchBuilder) Reset() {
+	b.add = b.add[:0]
+}
+
+// Add a name/value pair.
+// Note if you Add the same name twice you will get a duplicate label, which is invalid.
+func (b *ScratchBuilder) Add(name, value string) {
+	b.add = append(b.add, Label{Name: name, Value: value})
+}
+
+// UnsafeAddBytes adds a name/value pair, using []byte instead of string.
+// The '-tags stringlabels' version of this function is unsafe, hence the name.
+// This version is safe - it copies the strings immediately - but we keep the same name so everything compiles.
+func (b *ScratchBuilder) UnsafeAddBytes(name, value []byte) {
+	b.add = append(b.add, Label{Name: string(name), Value: string(value)})
+}
+
+// Sort the labels added so far by name.
+func (b *ScratchBuilder) Sort() {
+	slices.SortFunc(b.add, func(a, b Label) int { return strings.Compare(a.Name, b.Name) })
+}
+
+// Assign is for when you already have a Labels which you want this ScratchBuilder to return.
+func (b *ScratchBuilder) Assign(ls Labels) {
+	b.add = append(b.add[:0], ls...) // Copy on top of our slice, so we don't retain the input slice.
+}
+
+// Labels returns the name/value pairs added so far as a Labels object.
+// Note: if you want them sorted, call Sort() first.
+func (b *ScratchBuilder) Labels() Labels {
+	// Copy the slice, so the next use of ScratchBuilder doesn't overwrite.
+	return append([]Label{}, b.add...)
+}
+
+// Overwrite the newly-built Labels out to ls.
+// Callers must ensure that there are no other references to ls, or any strings fetched from it.
+func (b *ScratchBuilder) Overwrite(ls *Labels) {
+	*ls = append((*ls)[:0], b.add...)
+}
+
+// SizeOfLabels returns the approximate space required for n copies of a label.
+func SizeOfLabels(name, value string, n uint64) uint64 {
+	return (uint64(len(name)) + uint64(unsafe.Sizeof(name)) + uint64(len(value)) + uint64(unsafe.Sizeof(value))) * n
+}
diff --git a/vendor/github.com/prometheus/prometheus/model/labels/labels_common.go b/vendor/github.com/prometheus/prometheus/model/labels/labels_common.go
new file mode 100644
index 0000000000000000000000000000000000000000..a232eeea5d3d7e815fe0fadcca8fb3214c1736f4
--- /dev/null
+++ b/vendor/github.com/prometheus/prometheus/model/labels/labels_common.go
@@ -0,0 +1,238 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package labels
+
+import (
+	"bytes"
+	"encoding/json"
+	"slices"
+	"strconv"
+	"unsafe"
+
+	"github.com/prometheus/common/model"
+)
+
+const (
+	MetricName   = "__name__"
+	AlertName    = "alertname"
+	BucketLabel  = "le"
+	InstanceName = "instance"
+
+	labelSep = '\xfe' // Used at beginning of `Bytes` return.
+	sep      = '\xff' // Used between labels in `Bytes` and `Hash`.
+)
+
+var seps = []byte{sep} // Used with Hash, which has no WriteByte method.
+
+// Label is a key/value pair of strings.
+type Label struct {
+	Name, Value string
+}
+
+func (ls Labels) String() string {
+	var bytea [1024]byte // On stack to avoid memory allocation while building the output.
+	b := bytes.NewBuffer(bytea[:0])
+
+	b.WriteByte('{')
+	i := 0
+	ls.Range(func(l Label) {
+		if i > 0 {
+			b.WriteByte(',')
+			b.WriteByte(' ')
+		}
+		if !model.LabelName(l.Name).IsValidLegacy() {
+			b.Write(strconv.AppendQuote(b.AvailableBuffer(), l.Name))
+		} else {
+			b.WriteString(l.Name)
+		}
+		b.WriteByte('=')
+		b.Write(strconv.AppendQuote(b.AvailableBuffer(), l.Value))
+		i++
+	})
+	b.WriteByte('}')
+	return b.String()
+}
+
+// MarshalJSON implements json.Marshaler.
+func (ls Labels) MarshalJSON() ([]byte, error) {
+	return json.Marshal(ls.Map())
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (ls *Labels) UnmarshalJSON(b []byte) error {
+	var m map[string]string
+
+	if err := json.Unmarshal(b, &m); err != nil {
+		return err
+	}
+
+	*ls = FromMap(m)
+	return nil
+}
+
+// MarshalYAML implements yaml.Marshaler.
+func (ls Labels) MarshalYAML() (interface{}, error) {
+	return ls.Map(), nil
+}
+
+// UnmarshalYAML implements yaml.Unmarshaler.
+func (ls *Labels) UnmarshalYAML(unmarshal func(interface{}) error) error {
+	var m map[string]string
+
+	if err := unmarshal(&m); err != nil {
+		return err
+	}
+
+	*ls = FromMap(m)
+	return nil
+}
+
+// IsValid checks if the metric name or label names are valid.
+func (ls Labels) IsValid(validationScheme model.ValidationScheme) bool {
+	err := ls.Validate(func(l Label) error {
+		if l.Name == model.MetricNameLabel {
+			// If the default validation scheme has been overridden with legacy mode,
+			// we need to call the special legacy validation checker.
+			if validationScheme == model.LegacyValidation && model.NameValidationScheme == model.UTF8Validation && !model.IsValidLegacyMetricName(string(model.LabelValue(l.Value))) {
+				return strconv.ErrSyntax
+			}
+			if !model.IsValidMetricName(model.LabelValue(l.Value)) {
+				return strconv.ErrSyntax
+			}
+		}
+		if validationScheme == model.LegacyValidation && model.NameValidationScheme == model.UTF8Validation {
+			if !model.LabelName(l.Name).IsValidLegacy() || !model.LabelValue(l.Value).IsValid() {
+				return strconv.ErrSyntax
+			}
+		} else if !model.LabelName(l.Name).IsValid() || !model.LabelValue(l.Value).IsValid() {
+			return strconv.ErrSyntax
+		}
+		return nil
+	})
+	return err == nil
+}
+
+// Map returns a string map of the labels.
+func (ls Labels) Map() map[string]string {
+	m := make(map[string]string)
+	ls.Range(func(l Label) {
+		m[l.Name] = l.Value
+	})
+	return m
+}
+
+// FromMap returns new sorted Labels from the given map.
+func FromMap(m map[string]string) Labels {
+	l := make([]Label, 0, len(m))
+	for k, v := range m {
+		l = append(l, Label{Name: k, Value: v})
+	}
+	return New(l...)
+}
+
+// NewBuilder returns a new LabelsBuilder.
+func NewBuilder(base Labels) *Builder {
+	b := &Builder{
+		del: make([]string, 0, 5),
+		add: make([]Label, 0, 5),
+	}
+	b.Reset(base)
+	return b
+}
+
+// Del deletes the label of the given name.
+func (b *Builder) Del(ns ...string) *Builder {
+	for _, n := range ns {
+		for i, a := range b.add {
+			if a.Name == n {
+				b.add = append(b.add[:i], b.add[i+1:]...)
+			}
+		}
+		b.del = append(b.del, n)
+	}
+	return b
+}
+
+// Keep removes all labels from the base except those with the given names.
+func (b *Builder) Keep(ns ...string) *Builder {
+	b.base.Range(func(l Label) {
+		for _, n := range ns {
+			if l.Name == n {
+				return
+			}
+		}
+		b.del = append(b.del, l.Name)
+	})
+	return b
+}
+
+// Set the name/value pair as a label. A value of "" means delete that label.
+func (b *Builder) Set(n, v string) *Builder {
+	if v == "" {
+		// Empty labels are the same as missing labels.
+		return b.Del(n)
+	}
+	for i, a := range b.add {
+		if a.Name == n {
+			b.add[i].Value = v
+			return b
+		}
+	}
+	b.add = append(b.add, Label{Name: n, Value: v})
+
+	return b
+}
+
+func (b *Builder) Get(n string) string {
+	// Del() removes entries from .add but Set() does not remove from .del, so check .add first.
+	for _, a := range b.add {
+		if a.Name == n {
+			return a.Value
+		}
+	}
+	if slices.Contains(b.del, n) {
+		return ""
+	}
+	return b.base.Get(n)
+}
+
+// Range calls f on each label in the Builder.
+func (b *Builder) Range(f func(l Label)) {
+	// Stack-based arrays to avoid heap allocation in most cases.
+	var addStack [128]Label
+	var delStack [128]string
+	// Take a copy of add and del, so they are unaffected by calls to Set() or Del().
+	origAdd, origDel := append(addStack[:0], b.add...), append(delStack[:0], b.del...)
+	b.base.Range(func(l Label) {
+		if !slices.Contains(origDel, l.Name) && !contains(origAdd, l.Name) {
+			f(l)
+		}
+	})
+	for _, a := range origAdd {
+		f(a)
+	}
+}
+
+func contains(s []Label, n string) bool {
+	for _, a := range s {
+		if a.Name == n {
+			return true
+		}
+	}
+	return false
+}
+
+func yoloString(b []byte) string {
+	return unsafe.String(unsafe.SliceData(b), len(b))
+}
diff --git a/vendor/github.com/prometheus/prometheus/model/labels/labels_dedupelabels.go b/vendor/github.com/prometheus/prometheus/model/labels/labels_dedupelabels.go
new file mode 100644
index 0000000000000000000000000000000000000000..a0d83e00447a6afdbc09c4660320cdda1c5f0070
--- /dev/null
+++ b/vendor/github.com/prometheus/prometheus/model/labels/labels_dedupelabels.go
@@ -0,0 +1,822 @@
+// Copyright 2024 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build dedupelabels
+
+package labels
+
+import (
+	"bytes"
+	"slices"
+	"strings"
+	"sync"
+
+	"github.com/cespare/xxhash/v2"
+)
+
+// Labels is implemented by a SymbolTable and string holding name/value
+// pairs encoded as indexes into the table in varint encoding.
+// Names are in alphabetical order.
+type Labels struct {
+	syms *nameTable
+	data string
+}
+
+// Split SymbolTable into the part used by Labels and the part used by Builder.  Only the latter needs the map.
+
+// This part is used by Labels. All fields are immutable after construction.
+type nameTable struct {
+	byNum       []string     // This slice header is never changed, even while we are building the symbol table.
+	symbolTable *SymbolTable // If we need to use it in a Builder.
+}
+
+// SymbolTable is used to map strings into numbers so they can be packed together.
+type SymbolTable struct {
+	mx sync.Mutex
+	*nameTable
+	nextNum int
+	byName  map[string]int
+}
+
+const defaultSymbolTableSize = 1024
+
+func NewSymbolTable() *SymbolTable {
+	t := &SymbolTable{
+		nameTable: &nameTable{byNum: make([]string, defaultSymbolTableSize)},
+		byName:    make(map[string]int, defaultSymbolTableSize),
+	}
+	t.nameTable.symbolTable = t
+	return t
+}
+
+func (t *SymbolTable) Len() int {
+	t.mx.Lock()
+	defer t.mx.Unlock()
+	return len(t.byName)
+}
+
+// ToNum maps a string to an integer, adding the string to the table if it is not already there.
+// Note: copies the string before adding, in case the caller passed part of
+// a buffer that should not be kept alive by this SymbolTable.
+func (t *SymbolTable) ToNum(name string) int {
+	t.mx.Lock()
+	defer t.mx.Unlock()
+	return t.toNumUnlocked(name)
+}
+
+func (t *SymbolTable) toNumUnlocked(name string) int {
+	if i, found := t.byName[name]; found {
+		return i
+	}
+	i := t.nextNum
+	if t.nextNum == cap(t.byNum) {
+		// Name table is full; copy to a new one. Don't touch the existing slice, as nameTable is immutable after construction.
+		newSlice := make([]string, cap(t.byNum)*2)
+		copy(newSlice, t.byNum)
+		t.nameTable = &nameTable{byNum: newSlice, symbolTable: t}
+	}
+	name = strings.Clone(name)
+	t.byNum[i] = name
+	t.byName[name] = i
+	t.nextNum++
+	return i
+}
+
+func (t *SymbolTable) checkNum(name string) (int, bool) {
+	t.mx.Lock()
+	defer t.mx.Unlock()
+	i, bool := t.byName[name]
+	return i, bool
+}
+
+// ToName maps an integer to a string.
+func (t *nameTable) ToName(num int) string {
+	return t.byNum[num]
+}
+
+// "Varint" in this file is non-standard: we encode small numbers (up to 32767) in 2 bytes,
+// because we expect most Prometheus to have more than 127 unique strings.
+// And we don't encode numbers larger than 4 bytes because we don't expect more than 536,870,912 unique strings.
+func decodeVarint(data string, index int) (int, int) {
+	b := int(data[index]) + int(data[index+1])<<8
+	index += 2
+	if b < 0x8000 {
+		return b, index
+	}
+	return decodeVarintRest(b, data, index)
+}
+
+func decodeVarintRest(b int, data string, index int) (int, int) {
+	value := int(b & 0x7FFF)
+	b = int(data[index])
+	index++
+	if b < 0x80 {
+		return value | (b << 15), index
+	}
+
+	value |= (b & 0x7f) << 15
+	b = int(data[index])
+	index++
+	return value | (b << 22), index
+}
+
+func decodeString(t *nameTable, data string, index int) (string, int) {
+	// Copy decodeVarint here, because the Go compiler says it's too big to inline.
+	num := int(data[index]) + int(data[index+1])<<8
+	index += 2
+	if num >= 0x8000 {
+		num, index = decodeVarintRest(num, data, index)
+	}
+	return t.ToName(num), index
+}
+
+// Bytes returns ls as a byte slice.
+// It uses non-printing characters and so should not be used for printing.
+func (ls Labels) Bytes(buf []byte) []byte {
+	b := bytes.NewBuffer(buf[:0])
+	for i := 0; i < len(ls.data); {
+		if i > 0 {
+			b.WriteByte(sep)
+		}
+		var name, value string
+		name, i = decodeString(ls.syms, ls.data, i)
+		value, i = decodeString(ls.syms, ls.data, i)
+		b.WriteString(name)
+		b.WriteByte(sep)
+		b.WriteString(value)
+	}
+	return b.Bytes()
+}
+
+// IsZero implements yaml.IsZeroer - if we don't have this then 'omitempty' fields are always omitted.
+func (ls Labels) IsZero() bool {
+	return len(ls.data) == 0
+}
+
+// MatchLabels returns a subset of Labels that matches/does not match with the provided label names based on the 'on' boolean.
+// If on is set to true, it returns the subset of labels that match with the provided label names and its inverse when 'on' is set to false.
+// TODO: This is only used in printing an error message
+func (ls Labels) MatchLabels(on bool, names ...string) Labels {
+	b := NewBuilder(ls)
+	if on {
+		b.Keep(names...)
+	} else {
+		b.Del(MetricName)
+		b.Del(names...)
+	}
+	return b.Labels()
+}
+
+// Hash returns a hash value for the label set.
+// Note: the result is not guaranteed to be consistent across different runs of Prometheus.
+func (ls Labels) Hash() uint64 {
+	// Use xxhash.Sum64(b) for fast path as it's faster.
+	b := make([]byte, 0, 1024)
+	for pos := 0; pos < len(ls.data); {
+		name, newPos := decodeString(ls.syms, ls.data, pos)
+		value, newPos := decodeString(ls.syms, ls.data, newPos)
+		if len(b)+len(name)+len(value)+2 >= cap(b) {
+			// If labels entry is 1KB+, hash the rest of them via Write().
+			h := xxhash.New()
+			_, _ = h.Write(b)
+			for pos < len(ls.data) {
+				name, pos = decodeString(ls.syms, ls.data, pos)
+				value, pos = decodeString(ls.syms, ls.data, pos)
+				_, _ = h.WriteString(name)
+				_, _ = h.Write(seps)
+				_, _ = h.WriteString(value)
+				_, _ = h.Write(seps)
+			}
+			return h.Sum64()
+		}
+
+		b = append(b, name...)
+		b = append(b, sep)
+		b = append(b, value...)
+		b = append(b, sep)
+		pos = newPos
+	}
+	return xxhash.Sum64(b)
+}
+
+// HashForLabels returns a hash value for the labels matching the provided names.
+// 'names' have to be sorted in ascending order.
+func (ls Labels) HashForLabels(b []byte, names ...string) (uint64, []byte) {
+	b = b[:0]
+	j := 0
+	for i := 0; i < len(ls.data); {
+		var name, value string
+		name, i = decodeString(ls.syms, ls.data, i)
+		value, i = decodeString(ls.syms, ls.data, i)
+		for j < len(names) && names[j] < name {
+			j++
+		}
+		if j == len(names) {
+			break
+		}
+		if name == names[j] {
+			b = append(b, name...)
+			b = append(b, sep)
+			b = append(b, value...)
+			b = append(b, sep)
+		}
+	}
+
+	return xxhash.Sum64(b), b
+}
+
+// HashWithoutLabels returns a hash value for all labels except those matching
+// the provided names.
+// 'names' have to be sorted in ascending order.
+func (ls Labels) HashWithoutLabels(b []byte, names ...string) (uint64, []byte) {
+	b = b[:0]
+	j := 0
+	for i := 0; i < len(ls.data); {
+		var name, value string
+		name, i = decodeString(ls.syms, ls.data, i)
+		value, i = decodeString(ls.syms, ls.data, i)
+		for j < len(names) && names[j] < name {
+			j++
+		}
+		if name == MetricName || (j < len(names) && name == names[j]) {
+			continue
+		}
+		b = append(b, name...)
+		b = append(b, sep)
+		b = append(b, value...)
+		b = append(b, sep)
+	}
+	return xxhash.Sum64(b), b
+}
+
+// BytesWithLabels is just as Bytes(), but only for labels matching names.
+// 'names' have to be sorted in ascending order.
+func (ls Labels) BytesWithLabels(buf []byte, names ...string) []byte {
+	b := bytes.NewBuffer(buf[:0])
+	j := 0
+	for pos := 0; pos < len(ls.data); {
+		lName, newPos := decodeString(ls.syms, ls.data, pos)
+		lValue, newPos := decodeString(ls.syms, ls.data, newPos)
+		for j < len(names) && names[j] < lName {
+			j++
+		}
+		if j == len(names) {
+			break
+		}
+		if lName == names[j] {
+			if b.Len() > 1 {
+				b.WriteByte(sep)
+			}
+			b.WriteString(lName)
+			b.WriteByte(sep)
+			b.WriteString(lValue)
+		}
+		pos = newPos
+	}
+	return b.Bytes()
+}
+
+// BytesWithoutLabels is just as Bytes(), but only for labels not matching names.
+// 'names' have to be sorted in ascending order.
+func (ls Labels) BytesWithoutLabels(buf []byte, names ...string) []byte {
+	b := bytes.NewBuffer(buf[:0])
+	j := 0
+	for pos := 0; pos < len(ls.data); {
+		lName, newPos := decodeString(ls.syms, ls.data, pos)
+		lValue, newPos := decodeString(ls.syms, ls.data, newPos)
+		for j < len(names) && names[j] < lName {
+			j++
+		}
+		if j == len(names) || lName != names[j] {
+			if b.Len() > 1 {
+				b.WriteByte(sep)
+			}
+			b.WriteString(lName)
+			b.WriteByte(sep)
+			b.WriteString(lValue)
+		}
+		pos = newPos
+	}
+	return b.Bytes()
+}
+
+// Copy returns a copy of the labels.
+func (ls Labels) Copy() Labels {
+	return Labels{syms: ls.syms, data: strings.Clone(ls.data)}
+}
+
+// Get returns the value for the label with the given name.
+// Returns an empty string if the label doesn't exist.
+func (ls Labels) Get(name string) string {
+	if name == "" { // Avoid crash in loop if someone asks for "".
+		return "" // Prometheus does not store blank label names.
+	}
+	for i := 0; i < len(ls.data); {
+		var lName, lValue string
+		lName, i = decodeString(ls.syms, ls.data, i)
+		if lName == name {
+			lValue, _ = decodeString(ls.syms, ls.data, i)
+			return lValue
+		} else if lName[0] > name[0] { // Stop looking if we've gone past.
+			break
+		}
+		// Copy decodeVarint here, because the Go compiler says it's too big to inline.
+		num := int(ls.data[i]) + int(ls.data[i+1])<<8
+		i += 2
+		if num >= 0x8000 {
+			_, i = decodeVarintRest(num, ls.data, i)
+		}
+	}
+	return ""
+}
+
+// Has returns true if the label with the given name is present.
+func (ls Labels) Has(name string) bool {
+	if name == "" { // Avoid crash in loop if someone asks for "".
+		return false // Prometheus does not store blank label names.
+	}
+	for i := 0; i < len(ls.data); {
+		var lName string
+		lName, i = decodeString(ls.syms, ls.data, i)
+		if lName == name {
+			return true
+		} else if lName[0] > name[0] { // Stop looking if we've gone past.
+			break
+		}
+		// Copy decodeVarint here, because the Go compiler says it's too big to inline.
+		num := int(ls.data[i]) + int(ls.data[i+1])<<8
+		i += 2
+		if num >= 0x8000 {
+			_, i = decodeVarintRest(num, ls.data, i)
+		}
+	}
+	return false
+}
+
+// HasDuplicateLabelNames returns whether ls has duplicate label names.
+// It assumes that the labelset is sorted.
+func (ls Labels) HasDuplicateLabelNames() (string, bool) {
+	prevNum := -1
+	for i := 0; i < len(ls.data); {
+		var lNum int
+		lNum, i = decodeVarint(ls.data, i)
+		_, i = decodeVarint(ls.data, i)
+		if lNum == prevNum {
+			return ls.syms.ToName(lNum), true
+		}
+		prevNum = lNum
+	}
+	return "", false
+}
+
+// WithoutEmpty returns the labelset without empty labels.
+// May return the same labelset.
+func (ls Labels) WithoutEmpty() Labels {
+	if ls.IsEmpty() {
+		return ls
+	}
+	// Idea: have a constant symbol for blank, then we don't have to look it up.
+	blank, ok := ls.syms.symbolTable.checkNum("")
+	if !ok { // Symbol table has no entry for blank - none of the values can be blank.
+		return ls
+	}
+	for pos := 0; pos < len(ls.data); {
+		_, newPos := decodeVarint(ls.data, pos)
+		lValue, newPos := decodeVarint(ls.data, newPos)
+		if lValue != blank {
+			pos = newPos
+			continue
+		}
+		// Do not copy the slice until it's necessary.
+		// TODO: could optimise the case where all blanks are at the end.
+		// Note: we size the new buffer on the assumption there is exactly one blank value.
+		buf := make([]byte, pos, pos+(len(ls.data)-newPos))
+		copy(buf, ls.data[:pos]) // copy the initial non-blank labels
+		pos = newPos             // move past the first blank value
+		for pos < len(ls.data) {
+			var newPos int
+			_, newPos = decodeVarint(ls.data, pos)
+			lValue, newPos = decodeVarint(ls.data, newPos)
+			if lValue != blank {
+				buf = append(buf, ls.data[pos:newPos]...)
+			}
+			pos = newPos
+		}
+		return Labels{syms: ls.syms, data: yoloString(buf)}
+	}
+	return ls
+}
+
+// Equal returns whether the two label sets are equal.
+func Equal(a, b Labels) bool {
+	if a.syms == b.syms {
+		return a.data == b.data
+	}
+
+	la, lb := len(a.data), len(b.data)
+	ia, ib := 0, 0
+	for ia < la && ib < lb {
+		var aValue, bValue string
+		aValue, ia = decodeString(a.syms, a.data, ia)
+		bValue, ib = decodeString(b.syms, b.data, ib)
+		if aValue != bValue {
+			return false
+		}
+	}
+	if ia != la || ib != lb {
+		return false
+	}
+	return true
+}
+
+// EmptyLabels returns an empty Labels value, for convenience.
+func EmptyLabels() Labels {
+	return Labels{}
+}
+
+// New returns a sorted Labels from the given labels.
+// The caller has to guarantee that all label names are unique.
+// Note this function is not efficient; should not be used in performance-critical places.
+func New(ls ...Label) Labels {
+	slices.SortFunc(ls, func(a, b Label) int { return strings.Compare(a.Name, b.Name) })
+	syms := NewSymbolTable()
+	var stackSpace [16]int
+	size, nums := mapLabelsToNumbers(syms, ls, stackSpace[:])
+	buf := make([]byte, size)
+	marshalNumbersToSizedBuffer(nums, buf)
+	return Labels{syms: syms.nameTable, data: yoloString(buf)}
+}
+
+// FromStrings creates new labels from pairs of strings.
+func FromStrings(ss ...string) Labels {
+	if len(ss)%2 != 0 {
+		panic("invalid number of strings")
+	}
+	ls := make([]Label, 0, len(ss)/2)
+	for i := 0; i < len(ss); i += 2 {
+		ls = append(ls, Label{Name: ss[i], Value: ss[i+1]})
+	}
+
+	return New(ls...)
+}
+
+// Compare compares the two label sets.
+// The result will be 0 if a==b, <0 if a < b, and >0 if a > b.
+func Compare(a, b Labels) int {
+	la, lb := len(a.data), len(b.data)
+	ia, ib := 0, 0
+	for ia < la && ib < lb {
+		var aName, bName string
+		aName, ia = decodeString(a.syms, a.data, ia)
+		bName, ib = decodeString(b.syms, b.data, ib)
+		if aName != bName {
+			if aName < bName {
+				return -1
+			}
+			return 1
+		}
+		var aValue, bValue string
+		aValue, ia = decodeString(a.syms, a.data, ia)
+		bValue, ib = decodeString(b.syms, b.data, ib)
+		if aValue != bValue {
+			if aValue < bValue {
+				return -1
+			}
+			return 1
+		}
+	}
+	// If all labels so far were in common, the set with fewer labels comes first.
+	return (la - ia) - (lb - ib)
+}
+
+// Copy labels from b on top of whatever was in ls previously, reusing memory or expanding if needed.
+func (ls *Labels) CopyFrom(b Labels) {
+	*ls = b // Straightforward memberwise copy is all we need.
+}
+
+// IsEmpty returns true if ls represents an empty set of labels.
+func (ls Labels) IsEmpty() bool {
+	return len(ls.data) == 0
+}
+
+// Len returns the number of labels; it is relatively slow.
+func (ls Labels) Len() int {
+	count := 0
+	for i := 0; i < len(ls.data); {
+		_, i = decodeVarint(ls.data, i)
+		_, i = decodeVarint(ls.data, i)
+		count++
+	}
+	return count
+}
+
+// Range calls f on each label.
+func (ls Labels) Range(f func(l Label)) {
+	for i := 0; i < len(ls.data); {
+		var lName, lValue string
+		lName, i = decodeString(ls.syms, ls.data, i)
+		lValue, i = decodeString(ls.syms, ls.data, i)
+		f(Label{Name: lName, Value: lValue})
+	}
+}
+
+// Validate calls f on each label. If f returns a non-nil error, then it returns that error cancelling the iteration.
+func (ls Labels) Validate(f func(l Label) error) error {
+	for i := 0; i < len(ls.data); {
+		var lName, lValue string
+		lName, i = decodeString(ls.syms, ls.data, i)
+		lValue, i = decodeString(ls.syms, ls.data, i)
+		err := f(Label{Name: lName, Value: lValue})
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// InternStrings calls intern on every string value inside ls, replacing them with what it returns.
+func (ls *Labels) InternStrings(intern func(string) string) {
+	// TODO: remove these calls as there is nothing to do.
+}
+
+// ReleaseStrings calls release on every string value inside ls.
+func (ls Labels) ReleaseStrings(release func(string)) {
+	// TODO: remove these calls as there is nothing to do.
+}
+
+// DropMetricName returns Labels with "__name__" removed.
+func (ls Labels) DropMetricName() Labels {
+	for i := 0; i < len(ls.data); {
+		lName, i2 := decodeString(ls.syms, ls.data, i)
+		_, i2 = decodeVarint(ls.data, i2)
+		if lName == MetricName {
+			if i == 0 { // Make common case fast with no allocations.
+				ls.data = ls.data[i2:]
+			} else {
+				ls.data = ls.data[:i] + ls.data[i2:]
+			}
+			break
+		} else if lName[0] > MetricName[0] { // Stop looking if we've gone past.
+			break
+		}
+		i = i2
+	}
+	return ls
+}
+
+// Builder allows modifying Labels.
+type Builder struct {
+	syms *SymbolTable
+	nums []int
+	base Labels
+	del  []string
+	add  []Label
+}
+
+// NewBuilderWithSymbolTable returns a new LabelsBuilder not based on any labels, but with the SymbolTable.
+func NewBuilderWithSymbolTable(s *SymbolTable) *Builder {
+	return &Builder{
+		syms: s,
+	}
+}
+
+// Reset clears all current state for the builder.
+func (b *Builder) Reset(base Labels) {
+	if base.syms != nil { // If base has a symbol table, use that.
+		b.syms = base.syms.symbolTable
+	} else if b.syms == nil { // Or continue using previous symbol table in builder.
+		b.syms = NewSymbolTable() // Don't do this in performance-sensitive code.
+	}
+
+	b.base = base
+	b.del = b.del[:0]
+	b.add = b.add[:0]
+	base.Range(func(l Label) {
+		if l.Value == "" {
+			b.del = append(b.del, l.Name)
+		}
+	})
+}
+
+// Labels returns the labels from the builder.
+// If no modifications were made, the original labels are returned.
+func (b *Builder) Labels() Labels {
+	if len(b.del) == 0 && len(b.add) == 0 {
+		return b.base
+	}
+
+	slices.SortFunc(b.add, func(a, b Label) int { return strings.Compare(a.Name, b.Name) })
+	slices.Sort(b.del)
+	a, d, newSize := 0, 0, 0
+
+	newSize, b.nums = mapLabelsToNumbers(b.syms, b.add, b.nums)
+	bufSize := len(b.base.data) + newSize
+	buf := make([]byte, 0, bufSize)
+	for pos := 0; pos < len(b.base.data); {
+		oldPos := pos
+		var lName string
+		lName, pos = decodeString(b.base.syms, b.base.data, pos)
+		_, pos = decodeVarint(b.base.data, pos)
+		for d < len(b.del) && b.del[d] < lName {
+			d++
+		}
+		if d < len(b.del) && b.del[d] == lName {
+			continue // This label has been deleted.
+		}
+		for ; a < len(b.add) && b.add[a].Name < lName; a++ {
+			buf = appendLabelTo(b.nums[a*2], b.nums[a*2+1], buf) // Insert label that was not in the base set.
+		}
+		if a < len(b.add) && b.add[a].Name == lName {
+			buf = appendLabelTo(b.nums[a*2], b.nums[a*2+1], buf)
+			a++
+			continue // This label has been replaced.
+		}
+		buf = append(buf, b.base.data[oldPos:pos]...) // If base had a symbol-table we are using it, so we don't need to look up these symbols.
+	}
+	// We have come to the end of the base set; add any remaining labels.
+	for ; a < len(b.add); a++ {
+		buf = appendLabelTo(b.nums[a*2], b.nums[a*2+1], buf)
+	}
+	return Labels{syms: b.syms.nameTable, data: yoloString(buf)}
+}
+
+func marshalNumbersToSizedBuffer(nums []int, data []byte) int {
+	i := len(data)
+	for index := len(nums) - 1; index >= 0; index-- {
+		i = encodeVarint(data, i, nums[index])
+	}
+	return len(data) - i
+}
+
+func sizeVarint(x uint64) (n int) {
+	// Most common case first
+	if x < 1<<15 {
+		return 2
+	}
+	if x < 1<<22 {
+		return 3
+	}
+	if x >= 1<<29 {
+		panic("Number too large to represent")
+	}
+	return 4
+}
+
+func encodeVarintSlow(data []byte, offset int, v uint64) int {
+	offset -= sizeVarint(v)
+	base := offset
+	data[offset] = uint8(v)
+	v >>= 8
+	offset++
+	for v >= 1<<7 {
+		data[offset] = uint8(v&0x7f | 0x80)
+		v >>= 7
+		offset++
+	}
+	data[offset] = uint8(v)
+	return base
+}
+
+// Special code for the common case that a value is less than 32768
+func encodeVarint(data []byte, offset, v int) int {
+	if v < 1<<15 {
+		offset -= 2
+		data[offset] = uint8(v)
+		data[offset+1] = uint8(v >> 8)
+		return offset
+	}
+	return encodeVarintSlow(data, offset, uint64(v))
+}
+
+// Map all the strings in lbls to the symbol table; return the total size required to hold them and all the individual mappings.
+func mapLabelsToNumbers(t *SymbolTable, lbls []Label, buf []int) (totalSize int, nums []int) {
+	nums = buf[:0]
+	t.mx.Lock()
+	defer t.mx.Unlock()
+	// we just encode name/value/name/value, without any extra tags or length bytes
+	for _, m := range lbls {
+		// strings are encoded as a single varint, the index into the symbol table.
+		i := t.toNumUnlocked(m.Name)
+		nums = append(nums, i)
+		totalSize += sizeVarint(uint64(i))
+		i = t.toNumUnlocked(m.Value)
+		nums = append(nums, i)
+		totalSize += sizeVarint(uint64(i))
+	}
+	return totalSize, nums
+}
+
+func appendLabelTo(nameNum, valueNum int, buf []byte) []byte {
+	size := sizeVarint(uint64(nameNum)) + sizeVarint(uint64(valueNum))
+	sizeRequired := len(buf) + size
+	if cap(buf) >= sizeRequired {
+		buf = buf[:sizeRequired]
+	} else {
+		bufSize := cap(buf)
+		// Double size of buffer each time it needs to grow, to amortise copying cost.
+		for bufSize < sizeRequired {
+			bufSize = bufSize*2 + 1
+		}
+		newBuf := make([]byte, sizeRequired, bufSize)
+		copy(newBuf, buf)
+		buf = newBuf
+	}
+	i := sizeRequired
+	i = encodeVarint(buf, i, valueNum)
+	i = encodeVarint(buf, i, nameNum)
+	return buf
+}
+
+// ScratchBuilder allows efficient construction of a Labels from scratch.
+type ScratchBuilder struct {
+	syms            *SymbolTable
+	nums            []int
+	add             []Label
+	output          Labels
+	overwriteBuffer []byte
+}
+
+// NewScratchBuilder creates a ScratchBuilder initialized for Labels with n entries.
+// Warning: expensive; don't call in tight loops.
+func NewScratchBuilder(n int) ScratchBuilder {
+	return ScratchBuilder{syms: NewSymbolTable(), add: make([]Label, 0, n)}
+}
+
+// NewScratchBuilderWithSymbolTable creates a ScratchBuilder initialized for Labels with n entries.
+func NewScratchBuilderWithSymbolTable(s *SymbolTable, n int) ScratchBuilder {
+	return ScratchBuilder{syms: s, add: make([]Label, 0, n)}
+}
+
+func (b *ScratchBuilder) SetSymbolTable(s *SymbolTable) {
+	b.syms = s
+}
+
+func (b *ScratchBuilder) Reset() {
+	b.add = b.add[:0]
+	b.output = EmptyLabels()
+}
+
+// Add a name/value pair.
+// Note if you Add the same name twice you will get a duplicate label, which is invalid.
+func (b *ScratchBuilder) Add(name, value string) {
+	b.add = append(b.add, Label{Name: name, Value: value})
+}
+
+// Add a name/value pair, using []byte instead of string to reduce memory allocations.
+// The values must remain live until Labels() is called.
+func (b *ScratchBuilder) UnsafeAddBytes(name, value []byte) {
+	b.add = append(b.add, Label{Name: yoloString(name), Value: yoloString(value)})
+}
+
+// Sort the labels added so far by name.
+func (b *ScratchBuilder) Sort() {
+	slices.SortFunc(b.add, func(a, b Label) int { return strings.Compare(a.Name, b.Name) })
+}
+
+// Assign is for when you already have a Labels which you want this ScratchBuilder to return.
+func (b *ScratchBuilder) Assign(l Labels) {
+	b.output = l
+}
+
+// Labels returns the name/value pairs added as a Labels object. Calling Add() after Labels() has no effect.
+// Note: if you want them sorted, call Sort() first.
+func (b *ScratchBuilder) Labels() Labels {
+	if b.output.IsEmpty() {
+		var size int
+		size, b.nums = mapLabelsToNumbers(b.syms, b.add, b.nums)
+		buf := make([]byte, size)
+		marshalNumbersToSizedBuffer(b.nums, buf)
+		b.output = Labels{syms: b.syms.nameTable, data: yoloString(buf)}
+	}
+	return b.output
+}
+
+// Write the newly-built Labels out to ls, reusing an internal buffer.
+// Callers must ensure that there are no other references to ls, or any strings fetched from it.
+func (b *ScratchBuilder) Overwrite(ls *Labels) {
+	var size int
+	size, b.nums = mapLabelsToNumbers(b.syms, b.add, b.nums)
+	if size <= cap(b.overwriteBuffer) {
+		b.overwriteBuffer = b.overwriteBuffer[:size]
+	} else {
+		b.overwriteBuffer = make([]byte, size)
+	}
+	marshalNumbersToSizedBuffer(b.nums, b.overwriteBuffer)
+	ls.syms = b.syms.nameTable
+	ls.data = yoloString(b.overwriteBuffer)
+}
+
+// SizeOfLabels returns the approximate space required for n copies of a label.
+func SizeOfLabels(name, value string, n uint64) uint64 {
+	return uint64(len(name)+len(value)) + n*4 // Assuming most symbol-table entries are 2 bytes long.
+}
diff --git a/vendor/github.com/prometheus/prometheus/model/labels/labels_stringlabels.go b/vendor/github.com/prometheus/prometheus/model/labels/labels_stringlabels.go
new file mode 100644
index 0000000000000000000000000000000000000000..f49ed96f650e7df46d74e653d383eaf56db87869
--- /dev/null
+++ b/vendor/github.com/prometheus/prometheus/model/labels/labels_stringlabels.go
@@ -0,0 +1,698 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build stringlabels
+
+package labels
+
+import (
+	"slices"
+	"strings"
+	"unsafe"
+
+	"github.com/cespare/xxhash/v2"
+)
+
+// Labels is implemented by a single flat string holding name/value pairs.
+// Each name and value is preceded by its length in varint encoding.
+// Names are in order.
+type Labels struct {
+	data string
+}
+
+func decodeSize(data string, index int) (int, int) {
+	// Fast-path for common case of a single byte, value 0..127.
+	b := data[index]
+	index++
+	if b < 0x80 {
+		return int(b), index
+	}
+	size := int(b & 0x7F)
+	for shift := uint(7); ; shift += 7 {
+		// Just panic if we go of the end of data, since all Labels strings are constructed internally and
+		// malformed data indicates a bug, or memory corruption.
+		b := data[index]
+		index++
+		size |= int(b&0x7F) << shift
+		if b < 0x80 {
+			break
+		}
+	}
+	return size, index
+}
+
+func decodeString(data string, index int) (string, int) {
+	var size int
+	size, index = decodeSize(data, index)
+	return data[index : index+size], index + size
+}
+
+// Bytes returns ls as a byte slice.
+// It uses non-printing characters and so should not be used for printing.
+func (ls Labels) Bytes(buf []byte) []byte {
+	if cap(buf) < len(ls.data) {
+		buf = make([]byte, len(ls.data))
+	} else {
+		buf = buf[:len(ls.data)]
+	}
+	copy(buf, ls.data)
+	return buf
+}
+
+// IsZero implements yaml.IsZeroer - if we don't have this then 'omitempty' fields are always omitted.
+func (ls Labels) IsZero() bool {
+	return len(ls.data) == 0
+}
+
+// MatchLabels returns a subset of Labels that matches/does not match with the provided label names based on the 'on' boolean.
+// If on is set to true, it returns the subset of labels that match with the provided label names and its inverse when 'on' is set to false.
+// TODO: This is only used in printing an error message
+func (ls Labels) MatchLabels(on bool, names ...string) Labels {
+	b := NewBuilder(ls)
+	if on {
+		b.Keep(names...)
+	} else {
+		b.Del(MetricName)
+		b.Del(names...)
+	}
+	return b.Labels()
+}
+
+// Hash returns a hash value for the label set.
+// Note: the result is not guaranteed to be consistent across different runs of Prometheus.
+func (ls Labels) Hash() uint64 {
+	return xxhash.Sum64(yoloBytes(ls.data))
+}
+
+// HashForLabels returns a hash value for the labels matching the provided names.
+// 'names' have to be sorted in ascending order.
+func (ls Labels) HashForLabels(b []byte, names ...string) (uint64, []byte) {
+	b = b[:0]
+	j := 0
+	for i := 0; i < len(ls.data); {
+		var name, value string
+		name, i = decodeString(ls.data, i)
+		value, i = decodeString(ls.data, i)
+		for j < len(names) && names[j] < name {
+			j++
+		}
+		if j == len(names) {
+			break
+		}
+		if name == names[j] {
+			b = append(b, name...)
+			b = append(b, sep)
+			b = append(b, value...)
+			b = append(b, sep)
+		}
+	}
+
+	return xxhash.Sum64(b), b
+}
+
+// HashWithoutLabels returns a hash value for all labels except those matching
+// the provided names.
+// 'names' have to be sorted in ascending order.
+func (ls Labels) HashWithoutLabels(b []byte, names ...string) (uint64, []byte) {
+	b = b[:0]
+	j := 0
+	for i := 0; i < len(ls.data); {
+		var name, value string
+		name, i = decodeString(ls.data, i)
+		value, i = decodeString(ls.data, i)
+		for j < len(names) && names[j] < name {
+			j++
+		}
+		if name == MetricName || (j < len(names) && name == names[j]) {
+			continue
+		}
+		b = append(b, name...)
+		b = append(b, sep)
+		b = append(b, value...)
+		b = append(b, sep)
+	}
+	return xxhash.Sum64(b), b
+}
+
+// BytesWithLabels is just as Bytes(), but only for labels matching names.
+// 'names' have to be sorted in ascending order.
+func (ls Labels) BytesWithLabels(buf []byte, names ...string) []byte {
+	b := buf[:0]
+	j := 0
+	for pos := 0; pos < len(ls.data); {
+		lName, newPos := decodeString(ls.data, pos)
+		_, newPos = decodeString(ls.data, newPos)
+		for j < len(names) && names[j] < lName {
+			j++
+		}
+		if j == len(names) {
+			break
+		}
+		if lName == names[j] {
+			b = append(b, ls.data[pos:newPos]...)
+		}
+		pos = newPos
+	}
+	return b
+}
+
+// BytesWithoutLabels is just as Bytes(), but only for labels not matching names.
+// 'names' have to be sorted in ascending order.
+func (ls Labels) BytesWithoutLabels(buf []byte, names ...string) []byte {
+	b := buf[:0]
+	j := 0
+	for pos := 0; pos < len(ls.data); {
+		lName, newPos := decodeString(ls.data, pos)
+		_, newPos = decodeString(ls.data, newPos)
+		for j < len(names) && names[j] < lName {
+			j++
+		}
+		if j == len(names) || lName != names[j] {
+			b = append(b, ls.data[pos:newPos]...)
+		}
+		pos = newPos
+	}
+	return b
+}
+
+// Copy returns a copy of the labels.
+func (ls Labels) Copy() Labels {
+	return Labels{data: strings.Clone(ls.data)}
+}
+
+// Get returns the value for the label with the given name.
+// Returns an empty string if the label doesn't exist.
+func (ls Labels) Get(name string) string {
+	if name == "" { // Avoid crash in loop if someone asks for "".
+		return "" // Prometheus does not store blank label names.
+	}
+	for i := 0; i < len(ls.data); {
+		var size int
+		size, i = decodeSize(ls.data, i)
+		if ls.data[i] == name[0] {
+			lName := ls.data[i : i+size]
+			i += size
+			if lName == name {
+				lValue, _ := decodeString(ls.data, i)
+				return lValue
+			}
+		} else {
+			if ls.data[i] > name[0] { // Stop looking if we've gone past.
+				break
+			}
+			i += size
+		}
+		size, i = decodeSize(ls.data, i)
+		i += size
+	}
+	return ""
+}
+
+// Has returns true if the label with the given name is present.
+func (ls Labels) Has(name string) bool {
+	if name == "" { // Avoid crash in loop if someone asks for "".
+		return false // Prometheus does not store blank label names.
+	}
+	for i := 0; i < len(ls.data); {
+		var size int
+		size, i = decodeSize(ls.data, i)
+		if ls.data[i] == name[0] {
+			lName := ls.data[i : i+size]
+			i += size
+			if lName == name {
+				return true
+			}
+		} else {
+			if ls.data[i] > name[0] { // Stop looking if we've gone past.
+				break
+			}
+			i += size
+		}
+		size, i = decodeSize(ls.data, i)
+		i += size
+	}
+	return false
+}
+
+// HasDuplicateLabelNames returns whether ls has duplicate label names.
+// It assumes that the labelset is sorted.
+func (ls Labels) HasDuplicateLabelNames() (string, bool) {
+	var lName, prevName string
+	for i := 0; i < len(ls.data); {
+		lName, i = decodeString(ls.data, i)
+		_, i = decodeString(ls.data, i)
+		if lName == prevName {
+			return lName, true
+		}
+		prevName = lName
+	}
+	return "", false
+}
+
+// WithoutEmpty returns the labelset without empty labels.
+// May return the same labelset.
+func (ls Labels) WithoutEmpty() Labels {
+	for pos := 0; pos < len(ls.data); {
+		_, newPos := decodeString(ls.data, pos)
+		lValue, newPos := decodeString(ls.data, newPos)
+		if lValue != "" {
+			pos = newPos
+			continue
+		}
+		// Do not copy the slice until it's necessary.
+		// TODO: could optimise the case where all blanks are at the end.
+		// Note: we size the new buffer on the assumption there is exactly one blank value.
+		buf := make([]byte, pos, pos+(len(ls.data)-newPos))
+		copy(buf, ls.data[:pos]) // copy the initial non-blank labels
+		pos = newPos             // move past the first blank value
+		for pos < len(ls.data) {
+			var newPos int
+			_, newPos = decodeString(ls.data, pos)
+			lValue, newPos = decodeString(ls.data, newPos)
+			if lValue != "" {
+				buf = append(buf, ls.data[pos:newPos]...)
+			}
+			pos = newPos
+		}
+		return Labels{data: yoloString(buf)}
+	}
+	return ls
+}
+
+// Equal returns whether the two label sets are equal.
+func Equal(ls, o Labels) bool {
+	return ls.data == o.data
+}
+
+// EmptyLabels returns an empty Labels value, for convenience.
+func EmptyLabels() Labels {
+	return Labels{}
+}
+func yoloBytes(s string) []byte {
+	return unsafe.Slice(unsafe.StringData(s), len(s))
+}
+
+// New returns a sorted Labels from the given labels.
+// The caller has to guarantee that all label names are unique.
+func New(ls ...Label) Labels {
+	slices.SortFunc(ls, func(a, b Label) int { return strings.Compare(a.Name, b.Name) })
+	size := labelsSize(ls)
+	buf := make([]byte, size)
+	marshalLabelsToSizedBuffer(ls, buf)
+	return Labels{data: yoloString(buf)}
+}
+
+// FromStrings creates new labels from pairs of strings.
+func FromStrings(ss ...string) Labels {
+	if len(ss)%2 != 0 {
+		panic("invalid number of strings")
+	}
+	ls := make([]Label, 0, len(ss)/2)
+	for i := 0; i < len(ss); i += 2 {
+		ls = append(ls, Label{Name: ss[i], Value: ss[i+1]})
+	}
+
+	return New(ls...)
+}
+
+// Compare compares the two label sets.
+// The result will be 0 if a==b, <0 if a < b, and >0 if a > b.
+func Compare(a, b Labels) int {
+	// Find the first byte in the string where a and b differ.
+	shorter, longer := a.data, b.data
+	if len(b.data) < len(a.data) {
+		shorter, longer = b.data, a.data
+	}
+	i := 0
+	// First, go 8 bytes at a time. Data strings are expected to be 8-byte aligned.
+	sp := unsafe.Pointer(unsafe.StringData(shorter))
+	lp := unsafe.Pointer(unsafe.StringData(longer))
+	for ; i < len(shorter)-8; i += 8 {
+		if *(*uint64)(unsafe.Add(sp, i)) != *(*uint64)(unsafe.Add(lp, i)) {
+			break
+		}
+	}
+	// Now go 1 byte at a time.
+	for ; i < len(shorter); i++ {
+		if shorter[i] != longer[i] {
+			break
+		}
+	}
+	if i == len(shorter) {
+		// One Labels was a prefix of the other; the set with fewer labels compares lower.
+		return len(a.data) - len(b.data)
+	}
+
+	// Now we know that there is some difference before the end of a and b.
+	// Go back through the fields and find which field that difference is in.
+	firstCharDifferent, i := i, 0
+	size, nextI := decodeSize(a.data, i)
+	for nextI+size <= firstCharDifferent {
+		i = nextI + size
+		size, nextI = decodeSize(a.data, i)
+	}
+	// Difference is inside this entry.
+	aStr, _ := decodeString(a.data, i)
+	bStr, _ := decodeString(b.data, i)
+	if aStr < bStr {
+		return -1
+	}
+	return +1
+}
+
+// Copy labels from b on top of whatever was in ls previously, reusing memory or expanding if needed.
+func (ls *Labels) CopyFrom(b Labels) {
+	ls.data = b.data // strings are immutable
+}
+
+// IsEmpty returns true if ls represents an empty set of labels.
+func (ls Labels) IsEmpty() bool {
+	return len(ls.data) == 0
+}
+
+// Len returns the number of labels; it is relatively slow.
+func (ls Labels) Len() int {
+	count := 0
+	for i := 0; i < len(ls.data); {
+		var size int
+		size, i = decodeSize(ls.data, i)
+		i += size
+		size, i = decodeSize(ls.data, i)
+		i += size
+		count++
+	}
+	return count
+}
+
+// Range calls f on each label.
+func (ls Labels) Range(f func(l Label)) {
+	for i := 0; i < len(ls.data); {
+		var lName, lValue string
+		lName, i = decodeString(ls.data, i)
+		lValue, i = decodeString(ls.data, i)
+		f(Label{Name: lName, Value: lValue})
+	}
+}
+
+// Validate calls f on each label. If f returns a non-nil error, then it returns that error cancelling the iteration.
+func (ls Labels) Validate(f func(l Label) error) error {
+	for i := 0; i < len(ls.data); {
+		var lName, lValue string
+		lName, i = decodeString(ls.data, i)
+		lValue, i = decodeString(ls.data, i)
+		err := f(Label{Name: lName, Value: lValue})
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// DropMetricName returns Labels with "__name__" removed.
+func (ls Labels) DropMetricName() Labels {
+	for i := 0; i < len(ls.data); {
+		lName, i2 := decodeString(ls.data, i)
+		size, i2 := decodeSize(ls.data, i2)
+		i2 += size
+		if lName == MetricName {
+			if i == 0 { // Make common case fast with no allocations.
+				ls.data = ls.data[i2:]
+			} else {
+				ls.data = ls.data[:i] + ls.data[i2:]
+			}
+			break
+		} else if lName[0] > MetricName[0] { // Stop looking if we've gone past.
+			break
+		}
+		i = i2
+	}
+	return ls
+}
+
+// InternStrings is a no-op because it would only save when the whole set of labels is identical.
+func (ls *Labels) InternStrings(intern func(string) string) {
+}
+
+// ReleaseStrings is a no-op for the same reason as InternStrings.
+func (ls Labels) ReleaseStrings(release func(string)) {
+}
+
+// Builder allows modifying Labels.
+type Builder struct {
+	base Labels
+	del  []string
+	add  []Label
+}
+
+// Reset clears all current state for the builder.
+func (b *Builder) Reset(base Labels) {
+	b.base = base
+	b.del = b.del[:0]
+	b.add = b.add[:0]
+	b.base.Range(func(l Label) {
+		if l.Value == "" {
+			b.del = append(b.del, l.Name)
+		}
+	})
+}
+
+// Labels returns the labels from the builder.
+// If no modifications were made, the original labels are returned.
+func (b *Builder) Labels() Labels {
+	if len(b.del) == 0 && len(b.add) == 0 {
+		return b.base
+	}
+
+	slices.SortFunc(b.add, func(a, b Label) int { return strings.Compare(a.Name, b.Name) })
+	slices.Sort(b.del)
+	a, d := 0, 0
+
+	bufSize := len(b.base.data) + labelsSize(b.add)
+	buf := make([]byte, 0, bufSize)
+	for pos := 0; pos < len(b.base.data); {
+		oldPos := pos
+		var lName string
+		lName, pos = decodeString(b.base.data, pos)
+		_, pos = decodeString(b.base.data, pos)
+		for d < len(b.del) && b.del[d] < lName {
+			d++
+		}
+		if d < len(b.del) && b.del[d] == lName {
+			continue // This label has been deleted.
+		}
+		for ; a < len(b.add) && b.add[a].Name < lName; a++ {
+			buf = appendLabelTo(buf, &b.add[a]) // Insert label that was not in the base set.
+		}
+		if a < len(b.add) && b.add[a].Name == lName {
+			buf = appendLabelTo(buf, &b.add[a])
+			a++
+			continue // This label has been replaced.
+		}
+		buf = append(buf, b.base.data[oldPos:pos]...)
+	}
+	// We have come to the end of the base set; add any remaining labels.
+	for ; a < len(b.add); a++ {
+		buf = appendLabelTo(buf, &b.add[a])
+	}
+	return Labels{data: yoloString(buf)}
+}
+
+func marshalLabelsToSizedBuffer(lbls []Label, data []byte) int {
+	i := len(data)
+	for index := len(lbls) - 1; index >= 0; index-- {
+		size := marshalLabelToSizedBuffer(&lbls[index], data[:i])
+		i -= size
+	}
+	return len(data) - i
+}
+
+func marshalLabelToSizedBuffer(m *Label, data []byte) int {
+	i := len(data)
+	i -= len(m.Value)
+	copy(data[i:], m.Value)
+	i = encodeSize(data, i, len(m.Value))
+	i -= len(m.Name)
+	copy(data[i:], m.Name)
+	i = encodeSize(data, i, len(m.Name))
+	return len(data) - i
+}
+
+func sizeVarint(x uint64) (n int) {
+	// Most common case first
+	if x < 1<<7 {
+		return 1
+	}
+	if x >= 1<<56 {
+		return 9
+	}
+	if x >= 1<<28 {
+		x >>= 28
+		n = 4
+	}
+	if x >= 1<<14 {
+		x >>= 14
+		n += 2
+	}
+	if x >= 1<<7 {
+		n++
+	}
+	return n + 1
+}
+
+func encodeVarint(data []byte, offset int, v uint64) int {
+	offset -= sizeVarint(v)
+	base := offset
+	for v >= 1<<7 {
+		data[offset] = uint8(v&0x7f | 0x80)
+		v >>= 7
+		offset++
+	}
+	data[offset] = uint8(v)
+	return base
+}
+
+// Special code for the common case that a size is less than 128
+func encodeSize(data []byte, offset, v int) int {
+	if v < 1<<7 {
+		offset--
+		data[offset] = uint8(v)
+		return offset
+	}
+	return encodeVarint(data, offset, uint64(v))
+}
+
+func labelsSize(lbls []Label) (n int) {
+	// we just encode name/value/name/value, without any extra tags or length bytes
+	for _, e := range lbls {
+		n += labelSize(&e)
+	}
+	return n
+}
+
+func labelSize(m *Label) (n int) {
+	// strings are encoded as length followed by contents.
+	l := len(m.Name)
+	n += l + sizeVarint(uint64(l))
+	l = len(m.Value)
+	n += l + sizeVarint(uint64(l))
+	return n
+}
+
+func appendLabelTo(buf []byte, m *Label) []byte {
+	size := labelSize(m)
+	sizeRequired := len(buf) + size
+	if cap(buf) >= sizeRequired {
+		buf = buf[:sizeRequired]
+	} else {
+		bufSize := cap(buf)
+		// Double size of buffer each time it needs to grow, to amortise copying cost.
+		for bufSize < sizeRequired {
+			bufSize = bufSize*2 + 1
+		}
+		newBuf := make([]byte, sizeRequired, bufSize)
+		copy(newBuf, buf)
+		buf = newBuf
+	}
+	marshalLabelToSizedBuffer(m, buf)
+	return buf
+}
+
+// ScratchBuilder allows efficient construction of a Labels from scratch.
+type ScratchBuilder struct {
+	add             []Label
+	output          Labels
+	overwriteBuffer []byte
+}
+
+// NewScratchBuilder creates a ScratchBuilder initialized for Labels with n entries.
+func NewScratchBuilder(n int) ScratchBuilder {
+	return ScratchBuilder{add: make([]Label, 0, n)}
+}
+
+func (b *ScratchBuilder) Reset() {
+	b.add = b.add[:0]
+	b.output = EmptyLabels()
+}
+
+// Add a name/value pair.
+// Note if you Add the same name twice you will get a duplicate label, which is invalid.
+func (b *ScratchBuilder) Add(name, value string) {
+	b.add = append(b.add, Label{Name: name, Value: value})
+}
+
+// Add a name/value pair, using []byte instead of string to reduce memory allocations.
+// The values must remain live until Labels() is called.
+func (b *ScratchBuilder) UnsafeAddBytes(name, value []byte) {
+	b.add = append(b.add, Label{Name: yoloString(name), Value: yoloString(value)})
+}
+
+// Sort the labels added so far by name.
+func (b *ScratchBuilder) Sort() {
+	slices.SortFunc(b.add, func(a, b Label) int { return strings.Compare(a.Name, b.Name) })
+}
+
+// Assign is for when you already have a Labels which you want this ScratchBuilder to return.
+func (b *ScratchBuilder) Assign(l Labels) {
+	b.output = l
+}
+
+// Labels returns the name/value pairs added as a Labels object. Calling Add() after Labels() has no effect.
+// Note: if you want them sorted, call Sort() first.
+func (b *ScratchBuilder) Labels() Labels {
+	if b.output.IsEmpty() {
+		size := labelsSize(b.add)
+		buf := make([]byte, size)
+		marshalLabelsToSizedBuffer(b.add, buf)
+		b.output = Labels{data: yoloString(buf)}
+	}
+	return b.output
+}
+
+// Write the newly-built Labels out to ls, reusing an internal buffer.
+// Callers must ensure that there are no other references to ls, or any strings fetched from it.
+func (b *ScratchBuilder) Overwrite(ls *Labels) {
+	size := labelsSize(b.add)
+	if size <= cap(b.overwriteBuffer) {
+		b.overwriteBuffer = b.overwriteBuffer[:size]
+	} else {
+		b.overwriteBuffer = make([]byte, size)
+	}
+	marshalLabelsToSizedBuffer(b.add, b.overwriteBuffer)
+	ls.data = yoloString(b.overwriteBuffer)
+}
+
+// Symbol-table is no-op, just for api parity with dedupelabels.
+type SymbolTable struct{}
+
+func NewSymbolTable() *SymbolTable { return nil }
+
+func (t *SymbolTable) Len() int { return 0 }
+
+// NewBuilderWithSymbolTable creates a Builder, for api parity with dedupelabels.
+func NewBuilderWithSymbolTable(_ *SymbolTable) *Builder {
+	return NewBuilder(EmptyLabels())
+}
+
+// NewScratchBuilderWithSymbolTable creates a ScratchBuilder, for api parity with dedupelabels.
+func NewScratchBuilderWithSymbolTable(_ *SymbolTable, n int) ScratchBuilder {
+	return NewScratchBuilder(n)
+}
+
+func (b *ScratchBuilder) SetSymbolTable(_ *SymbolTable) {
+	// no-op
+}
+
+// SizeOfLabels returns the approximate space required for n copies of a label.
+func SizeOfLabels(name, value string, n uint64) uint64 {
+	return uint64(labelSize(&Label{Name: name, Value: value})) * n
+}
diff --git a/vendor/github.com/prometheus/prometheus/pkg/labels/matcher.go b/vendor/github.com/prometheus/prometheus/model/labels/matcher.go
similarity index 61%
rename from vendor/github.com/prometheus/prometheus/pkg/labels/matcher.go
rename to vendor/github.com/prometheus/prometheus/model/labels/matcher.go
index 88d463233a02ea95ca81af08fda93d40624511ff..a09c838e3f86376245364d3e7d4d0609607904aa 100644
--- a/vendor/github.com/prometheus/prometheus/pkg/labels/matcher.go
+++ b/vendor/github.com/prometheus/prometheus/model/labels/matcher.go
@@ -14,7 +14,8 @@
 package labels
 
 import (
-	"fmt"
+	"bytes"
+	"strconv"
 )
 
 // MatchType is an enum for label matching types.
@@ -28,17 +29,18 @@ const (
 	MatchNotRegexp
 )
 
+var matchTypeToStr = [...]string{
+	MatchEqual:     "=",
+	MatchNotEqual:  "!=",
+	MatchRegexp:    "=~",
+	MatchNotRegexp: "!~",
+}
+
 func (m MatchType) String() string {
-	typeToStr := map[MatchType]string{
-		MatchEqual:     "=",
-		MatchNotEqual:  "!=",
-		MatchRegexp:    "=~",
-		MatchNotRegexp: "!~",
-	}
-	if str, ok := typeToStr[m]; ok {
-		return str
+	if m < MatchEqual || m > MatchNotRegexp {
+		panic("unknown match type")
 	}
-	panic("unknown match type")
+	return matchTypeToStr[m]
 }
 
 // Matcher models the matching of a label.
@@ -77,7 +79,29 @@ func MustNewMatcher(mt MatchType, name, val string) *Matcher {
 }
 
 func (m *Matcher) String() string {
-	return fmt.Sprintf("%s%s%q", m.Name, m.Type, m.Value)
+	// Start a buffer with a pre-allocated size on stack to cover most needs.
+	var bytea [1024]byte
+	b := bytes.NewBuffer(bytea[:0])
+
+	if m.shouldQuoteName() {
+		b.Write(strconv.AppendQuote(b.AvailableBuffer(), m.Name))
+	} else {
+		b.WriteString(m.Name)
+	}
+	b.WriteString(m.Type.String())
+	b.Write(strconv.AppendQuote(b.AvailableBuffer(), m.Value))
+
+	return b.String()
+}
+
+func (m *Matcher) shouldQuoteName() bool {
+	for i, c := range m.Name {
+		if c == '_' || (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || (i > 0 && c >= '0' && c <= '9') {
+			continue
+		}
+		return true
+	}
+	return len(m.Name) == 0
 }
 
 // Matches returns whether the matcher matches the given string value.
@@ -117,3 +141,30 @@ func (m *Matcher) GetRegexString() string {
 	}
 	return m.re.GetRegexString()
 }
+
+// SetMatches returns a set of equality matchers for the current regex matchers if possible.
+// For examples the regexp `a(b|f)` will returns "ab" and "af".
+// Returns nil if we can't replace the regexp by only equality matchers.
+func (m *Matcher) SetMatches() []string {
+	if m.re == nil {
+		return nil
+	}
+	return m.re.SetMatches()
+}
+
+// Prefix returns the required prefix of the value to match, if possible.
+// It will be empty if it's an equality matcher or if the prefix can't be determined.
+func (m *Matcher) Prefix() string {
+	if m.re == nil {
+		return ""
+	}
+	return m.re.prefix
+}
+
+// IsRegexOptimized returns whether regex is optimized.
+func (m *Matcher) IsRegexOptimized() bool {
+	if m.re == nil {
+		return false
+	}
+	return m.re.IsOptimized()
+}
diff --git a/vendor/github.com/prometheus/prometheus/model/labels/regexp.go b/vendor/github.com/prometheus/prometheus/model/labels/regexp.go
new file mode 100644
index 0000000000000000000000000000000000000000..cf6c9158e971062b805f766502d11982c95612e9
--- /dev/null
+++ b/vendor/github.com/prometheus/prometheus/model/labels/regexp.go
@@ -0,0 +1,1110 @@
+// Copyright 2020 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package labels
+
+import (
+	"slices"
+	"strings"
+	"unicode"
+	"unicode/utf8"
+
+	"github.com/grafana/regexp"
+	"github.com/grafana/regexp/syntax"
+	"golang.org/x/text/unicode/norm"
+)
+
+const (
+	maxSetMatches = 256
+
+	// The minimum number of alternate values a regex should have to trigger
+	// the optimization done by optimizeEqualOrPrefixStringMatchers() and so use a map
+	// to match values instead of iterating over a list. This value has
+	// been computed running BenchmarkOptimizeEqualStringMatchers.
+	minEqualMultiStringMatcherMapThreshold = 16
+)
+
+type FastRegexMatcher struct {
+	// Under some conditions, re is nil because the expression is never parsed.
+	// We store the original string to be able to return it in GetRegexString().
+	reString string
+	re       *regexp.Regexp
+
+	setMatches    []string
+	stringMatcher StringMatcher
+	prefix        string
+	suffix        string
+	contains      []string
+
+	// matchString is the "compiled" function to run by MatchString().
+	matchString func(string) bool
+}
+
+func NewFastRegexMatcher(v string) (*FastRegexMatcher, error) {
+	m := &FastRegexMatcher{
+		reString: v,
+	}
+
+	m.stringMatcher, m.setMatches = optimizeAlternatingLiterals(v)
+	if m.stringMatcher != nil {
+		// If we already have a string matcher, we don't need to parse the regex
+		// or compile the matchString function. This also avoids the behavior in
+		// compileMatchStringFunction where it prefers to use setMatches when
+		// available, even if the string matcher is faster.
+		m.matchString = m.stringMatcher.Matches
+	} else {
+		parsed, err := syntax.Parse(v, syntax.Perl|syntax.DotNL)
+		if err != nil {
+			return nil, err
+		}
+		// Simplify the syntax tree to run faster.
+		parsed = parsed.Simplify()
+		m.re, err = regexp.Compile("^(?s:" + parsed.String() + ")$")
+		if err != nil {
+			return nil, err
+		}
+		if parsed.Op == syntax.OpConcat {
+			m.prefix, m.suffix, m.contains = optimizeConcatRegex(parsed)
+		}
+		if matches, caseSensitive := findSetMatches(parsed); caseSensitive {
+			m.setMatches = matches
+		}
+		m.stringMatcher = stringMatcherFromRegexp(parsed)
+		m.matchString = m.compileMatchStringFunction()
+	}
+
+	return m, nil
+}
+
+// compileMatchStringFunction returns the function to run by MatchString().
+func (m *FastRegexMatcher) compileMatchStringFunction() func(string) bool {
+	// If the only optimization available is the string matcher, then we can just run it.
+	if len(m.setMatches) == 0 && m.prefix == "" && m.suffix == "" && len(m.contains) == 0 && m.stringMatcher != nil {
+		return m.stringMatcher.Matches
+	}
+
+	return func(s string) bool {
+		if len(m.setMatches) != 0 {
+			for _, match := range m.setMatches {
+				if match == s {
+					return true
+				}
+			}
+			return false
+		}
+		if m.prefix != "" && !strings.HasPrefix(s, m.prefix) {
+			return false
+		}
+		if m.suffix != "" && !strings.HasSuffix(s, m.suffix) {
+			return false
+		}
+		if len(m.contains) > 0 && !containsInOrder(s, m.contains) {
+			return false
+		}
+		if m.stringMatcher != nil {
+			return m.stringMatcher.Matches(s)
+		}
+		return m.re.MatchString(s)
+	}
+}
+
+// IsOptimized returns true if any fast-path optimization is applied to the
+// regex matcher.
+func (m *FastRegexMatcher) IsOptimized() bool {
+	return len(m.setMatches) > 0 || m.stringMatcher != nil || m.prefix != "" || m.suffix != "" || len(m.contains) > 0
+}
+
+// findSetMatches extract equality matches from a regexp.
+// Returns nil if we can't replace the regexp by only equality matchers or the regexp contains
+// a mix of case sensitive and case insensitive matchers.
+func findSetMatches(re *syntax.Regexp) (matches []string, caseSensitive bool) {
+	clearBeginEndText(re)
+
+	return findSetMatchesInternal(re, "")
+}
+
+func findSetMatchesInternal(re *syntax.Regexp, base string) (matches []string, caseSensitive bool) {
+	switch re.Op {
+	case syntax.OpBeginText:
+		// Correctly handling the begin text operator inside a regex is tricky,
+		// so in this case we fallback to the regex engine.
+		return nil, false
+	case syntax.OpEndText:
+		// Correctly handling the end text operator inside a regex is tricky,
+		// so in this case we fallback to the regex engine.
+		return nil, false
+	case syntax.OpLiteral:
+		return []string{base + string(re.Rune)}, isCaseSensitive(re)
+	case syntax.OpEmptyMatch:
+		if base != "" {
+			return []string{base}, isCaseSensitive(re)
+		}
+	case syntax.OpAlternate:
+		return findSetMatchesFromAlternate(re, base)
+	case syntax.OpCapture:
+		clearCapture(re)
+		return findSetMatchesInternal(re, base)
+	case syntax.OpConcat:
+		return findSetMatchesFromConcat(re, base)
+	case syntax.OpCharClass:
+		if len(re.Rune)%2 != 0 {
+			return nil, false
+		}
+		var matches []string
+		var totalSet int
+		for i := 0; i+1 < len(re.Rune); i += 2 {
+			totalSet += int(re.Rune[i+1]-re.Rune[i]) + 1
+		}
+		// limits the total characters that can be used to create matches.
+		// In some case like negation [^0-9] a lot of possibilities exists and that
+		// can create thousands of possible matches at which points we're better off using regexp.
+		if totalSet > maxSetMatches {
+			return nil, false
+		}
+		for i := 0; i+1 < len(re.Rune); i += 2 {
+			lo, hi := re.Rune[i], re.Rune[i+1]
+			for c := lo; c <= hi; c++ {
+				matches = append(matches, base+string(c))
+			}
+		}
+		return matches, isCaseSensitive(re)
+	default:
+		return nil, false
+	}
+	return nil, false
+}
+
+func findSetMatchesFromConcat(re *syntax.Regexp, base string) (matches []string, matchesCaseSensitive bool) {
+	if len(re.Sub) == 0 {
+		return nil, false
+	}
+	clearCapture(re.Sub...)
+
+	matches = []string{base}
+
+	for i := 0; i < len(re.Sub); i++ {
+		var newMatches []string
+		for j, b := range matches {
+			m, caseSensitive := findSetMatchesInternal(re.Sub[i], b)
+			if m == nil {
+				return nil, false
+			}
+			if tooManyMatches(newMatches, m...) {
+				return nil, false
+			}
+
+			// All matches must have the same case sensitivity. If it's the first set of matches
+			// returned, we store its sensitivity as the expected case, and then we'll check all
+			// other ones.
+			if i == 0 && j == 0 {
+				matchesCaseSensitive = caseSensitive
+			}
+			if matchesCaseSensitive != caseSensitive {
+				return nil, false
+			}
+
+			newMatches = append(newMatches, m...)
+		}
+		matches = newMatches
+	}
+
+	return matches, matchesCaseSensitive
+}
+
+func findSetMatchesFromAlternate(re *syntax.Regexp, base string) (matches []string, matchesCaseSensitive bool) {
+	for i, sub := range re.Sub {
+		found, caseSensitive := findSetMatchesInternal(sub, base)
+		if found == nil {
+			return nil, false
+		}
+		if tooManyMatches(matches, found...) {
+			return nil, false
+		}
+
+		// All matches must have the same case sensitivity. If it's the first set of matches
+		// returned, we store its sensitivity as the expected case, and then we'll check all
+		// other ones.
+		if i == 0 {
+			matchesCaseSensitive = caseSensitive
+		}
+		if matchesCaseSensitive != caseSensitive {
+			return nil, false
+		}
+
+		matches = append(matches, found...)
+	}
+
+	return matches, matchesCaseSensitive
+}
+
+// clearCapture removes capture operation as they are not used for matching.
+func clearCapture(regs ...*syntax.Regexp) {
+	for _, r := range regs {
+		// Iterate on the regexp because capture groups could be nested.
+		for r.Op == syntax.OpCapture {
+			*r = *r.Sub[0]
+		}
+	}
+}
+
+// clearBeginEndText removes the begin and end text from the regexp. Prometheus regexp are anchored to the beginning and end of the string.
+func clearBeginEndText(re *syntax.Regexp) {
+	// Do not clear begin/end text from an alternate operator because it could
+	// change the actual regexp properties.
+	if re.Op == syntax.OpAlternate {
+		return
+	}
+
+	if len(re.Sub) == 0 {
+		return
+	}
+	if len(re.Sub) == 1 {
+		if re.Sub[0].Op == syntax.OpBeginText || re.Sub[0].Op == syntax.OpEndText {
+			// We need to remove this element. Since it's the only one, we convert into a matcher of an empty string.
+			// OpEmptyMatch is regexp's nop operator.
+			re.Op = syntax.OpEmptyMatch
+			re.Sub = nil
+			return
+		}
+	}
+	if re.Sub[0].Op == syntax.OpBeginText {
+		re.Sub = re.Sub[1:]
+	}
+	if re.Sub[len(re.Sub)-1].Op == syntax.OpEndText {
+		re.Sub = re.Sub[:len(re.Sub)-1]
+	}
+}
+
+// isCaseInsensitive tells if a regexp is case insensitive.
+// The flag should be check at each level of the syntax tree.
+func isCaseInsensitive(reg *syntax.Regexp) bool {
+	return (reg.Flags & syntax.FoldCase) != 0
+}
+
+// isCaseSensitive tells if a regexp is case sensitive.
+// The flag should be check at each level of the syntax tree.
+func isCaseSensitive(reg *syntax.Regexp) bool {
+	return !isCaseInsensitive(reg)
+}
+
+// tooManyMatches guards against creating too many set matches.
+func tooManyMatches(matches []string, added ...string) bool {
+	return len(matches)+len(added) > maxSetMatches
+}
+
+func (m *FastRegexMatcher) MatchString(s string) bool {
+	return m.matchString(s)
+}
+
+func (m *FastRegexMatcher) SetMatches() []string {
+	// IMPORTANT: always return a copy, otherwise if the caller manipulate this slice it will
+	// also get manipulated in the cached FastRegexMatcher instance.
+	return slices.Clone(m.setMatches)
+}
+
+func (m *FastRegexMatcher) GetRegexString() string {
+	return m.reString
+}
+
+// optimizeAlternatingLiterals optimizes a regex of the form
+//
+//	`literal1|literal2|literal3|...`
+//
+// this function returns an optimized StringMatcher or nil if the regex
+// cannot be optimized in this way, and a list of setMatches up to maxSetMatches.
+func optimizeAlternatingLiterals(s string) (StringMatcher, []string) {
+	if len(s) == 0 {
+		return emptyStringMatcher{}, nil
+	}
+
+	estimatedAlternates := strings.Count(s, "|") + 1
+
+	// If there are no alternates, check if the string is a literal
+	if estimatedAlternates == 1 {
+		if regexp.QuoteMeta(s) == s {
+			return &equalStringMatcher{s: s, caseSensitive: true}, []string{s}
+		}
+		return nil, nil
+	}
+
+	multiMatcher := newEqualMultiStringMatcher(true, estimatedAlternates, 0, 0)
+
+	for end := strings.IndexByte(s, '|'); end > -1; end = strings.IndexByte(s, '|') {
+		// Split the string into the next literal and the remainder
+		subMatch := s[:end]
+		s = s[end+1:]
+
+		// break if any of the submatches are not literals
+		if regexp.QuoteMeta(subMatch) != subMatch {
+			return nil, nil
+		}
+
+		multiMatcher.add(subMatch)
+	}
+
+	// break if the remainder is not a literal
+	if regexp.QuoteMeta(s) != s {
+		return nil, nil
+	}
+	multiMatcher.add(s)
+
+	return multiMatcher, multiMatcher.setMatches()
+}
+
+// optimizeConcatRegex returns literal prefix/suffix text that can be safely
+// checked against the label value before running the regexp matcher.
+func optimizeConcatRegex(r *syntax.Regexp) (prefix, suffix string, contains []string) {
+	sub := r.Sub
+	clearCapture(sub...)
+
+	// We can safely remove begin and end text matchers respectively
+	// at the beginning and end of the regexp.
+	if len(sub) > 0 && sub[0].Op == syntax.OpBeginText {
+		sub = sub[1:]
+	}
+	if len(sub) > 0 && sub[len(sub)-1].Op == syntax.OpEndText {
+		sub = sub[:len(sub)-1]
+	}
+
+	if len(sub) == 0 {
+		return
+	}
+
+	// Given Prometheus regex matchers are always anchored to the begin/end
+	// of the text, if the first/last operations are literals, we can safely
+	// treat them as prefix/suffix.
+	if sub[0].Op == syntax.OpLiteral && (sub[0].Flags&syntax.FoldCase) == 0 {
+		prefix = string(sub[0].Rune)
+	}
+	if last := len(sub) - 1; sub[last].Op == syntax.OpLiteral && (sub[last].Flags&syntax.FoldCase) == 0 {
+		suffix = string(sub[last].Rune)
+	}
+
+	// If contains any literal which is not a prefix/suffix, we keep track of
+	// all the ones which are case-sensitive.
+	for i := 1; i < len(sub)-1; i++ {
+		if sub[i].Op == syntax.OpLiteral && (sub[i].Flags&syntax.FoldCase) == 0 {
+			contains = append(contains, string(sub[i].Rune))
+		}
+	}
+
+	return
+}
+
+// StringMatcher is a matcher that matches a string in place of a regular expression.
+type StringMatcher interface {
+	Matches(s string) bool
+}
+
+// stringMatcherFromRegexp attempts to replace a common regexp with a string matcher.
+// It returns nil if the regexp is not supported.
+func stringMatcherFromRegexp(re *syntax.Regexp) StringMatcher {
+	clearBeginEndText(re)
+
+	m := stringMatcherFromRegexpInternal(re)
+	m = optimizeEqualOrPrefixStringMatchers(m, minEqualMultiStringMatcherMapThreshold)
+
+	return m
+}
+
+func stringMatcherFromRegexpInternal(re *syntax.Regexp) StringMatcher {
+	clearCapture(re)
+
+	switch re.Op {
+	case syntax.OpBeginText:
+		// Correctly handling the begin text operator inside a regex is tricky,
+		// so in this case we fallback to the regex engine.
+		return nil
+	case syntax.OpEndText:
+		// Correctly handling the end text operator inside a regex is tricky,
+		// so in this case we fallback to the regex engine.
+		return nil
+	case syntax.OpPlus:
+		if re.Sub[0].Op != syntax.OpAnyChar && re.Sub[0].Op != syntax.OpAnyCharNotNL {
+			return nil
+		}
+		return &anyNonEmptyStringMatcher{
+			matchNL: re.Sub[0].Op == syntax.OpAnyChar,
+		}
+	case syntax.OpStar:
+		if re.Sub[0].Op != syntax.OpAnyChar && re.Sub[0].Op != syntax.OpAnyCharNotNL {
+			return nil
+		}
+
+		// If the newline is valid, than this matcher literally match any string (even empty).
+		if re.Sub[0].Op == syntax.OpAnyChar {
+			return trueMatcher{}
+		}
+
+		// Any string is fine (including an empty one), as far as it doesn't contain any newline.
+		return anyStringWithoutNewlineMatcher{}
+	case syntax.OpQuest:
+		// Only optimize for ".?".
+		if len(re.Sub) != 1 || (re.Sub[0].Op != syntax.OpAnyChar && re.Sub[0].Op != syntax.OpAnyCharNotNL) {
+			return nil
+		}
+
+		return &zeroOrOneCharacterStringMatcher{
+			matchNL: re.Sub[0].Op == syntax.OpAnyChar,
+		}
+	case syntax.OpEmptyMatch:
+		return emptyStringMatcher{}
+
+	case syntax.OpLiteral:
+		return &equalStringMatcher{
+			s:             string(re.Rune),
+			caseSensitive: !isCaseInsensitive(re),
+		}
+	case syntax.OpAlternate:
+		or := make([]StringMatcher, 0, len(re.Sub))
+		for _, sub := range re.Sub {
+			m := stringMatcherFromRegexpInternal(sub)
+			if m == nil {
+				return nil
+			}
+			or = append(or, m)
+		}
+		return orStringMatcher(or)
+	case syntax.OpConcat:
+		clearCapture(re.Sub...)
+
+		if len(re.Sub) == 0 {
+			return emptyStringMatcher{}
+		}
+		if len(re.Sub) == 1 {
+			return stringMatcherFromRegexpInternal(re.Sub[0])
+		}
+
+		var left, right StringMatcher
+
+		// Let's try to find if there's a first and last any matchers.
+		if re.Sub[0].Op == syntax.OpPlus || re.Sub[0].Op == syntax.OpStar || re.Sub[0].Op == syntax.OpQuest {
+			left = stringMatcherFromRegexpInternal(re.Sub[0])
+			if left == nil {
+				return nil
+			}
+			re.Sub = re.Sub[1:]
+		}
+		if re.Sub[len(re.Sub)-1].Op == syntax.OpPlus || re.Sub[len(re.Sub)-1].Op == syntax.OpStar || re.Sub[len(re.Sub)-1].Op == syntax.OpQuest {
+			right = stringMatcherFromRegexpInternal(re.Sub[len(re.Sub)-1])
+			if right == nil {
+				return nil
+			}
+			re.Sub = re.Sub[:len(re.Sub)-1]
+		}
+
+		matches, matchesCaseSensitive := findSetMatchesInternal(re, "")
+
+		if len(matches) == 0 && len(re.Sub) == 2 {
+			// We have not find fixed set matches. We look for other known cases that
+			// we can optimize.
+			switch {
+			// Prefix is literal.
+			case right == nil && re.Sub[0].Op == syntax.OpLiteral:
+				right = stringMatcherFromRegexpInternal(re.Sub[1])
+				if right != nil {
+					matches = []string{string(re.Sub[0].Rune)}
+					matchesCaseSensitive = !isCaseInsensitive(re.Sub[0])
+				}
+
+			// Suffix is literal.
+			case left == nil && re.Sub[1].Op == syntax.OpLiteral:
+				left = stringMatcherFromRegexpInternal(re.Sub[0])
+				if left != nil {
+					matches = []string{string(re.Sub[1].Rune)}
+					matchesCaseSensitive = !isCaseInsensitive(re.Sub[1])
+				}
+			}
+		}
+
+		// Ensure we've found some literals to match (optionally with a left and/or right matcher).
+		// If not, then this optimization doesn't trigger.
+		if len(matches) == 0 {
+			return nil
+		}
+
+		// Use the right (and best) matcher based on what we've found.
+		switch {
+		// No left and right matchers (only fixed set matches).
+		case left == nil && right == nil:
+			// if there's no any matchers on both side it's a concat of literals
+			or := make([]StringMatcher, 0, len(matches))
+			for _, match := range matches {
+				or = append(or, &equalStringMatcher{
+					s:             match,
+					caseSensitive: matchesCaseSensitive,
+				})
+			}
+			return orStringMatcher(or)
+
+		// Right matcher with 1 fixed set match.
+		case left == nil && len(matches) == 1:
+			return newLiteralPrefixStringMatcher(matches[0], matchesCaseSensitive, right)
+
+		// Left matcher with 1 fixed set match.
+		case right == nil && len(matches) == 1:
+			return &literalSuffixStringMatcher{
+				left:                left,
+				suffix:              matches[0],
+				suffixCaseSensitive: matchesCaseSensitive,
+			}
+
+		// We found literals in the middle. We can trigger the fast path only if
+		// the matches are case sensitive because containsStringMatcher doesn't
+		// support case insensitive.
+		case matchesCaseSensitive:
+			return &containsStringMatcher{
+				substrings: matches,
+				left:       left,
+				right:      right,
+			}
+		}
+	}
+	return nil
+}
+
+// containsStringMatcher matches a string if it contains any of the substrings.
+// If left and right are not nil, it's a contains operation where left and right must match.
+// If left is nil, it's a hasPrefix operation and right must match.
+// Finally, if right is nil it's a hasSuffix operation and left must match.
+type containsStringMatcher struct {
+	// The matcher that must match the left side. Can be nil.
+	left StringMatcher
+
+	// At least one of these strings must match in the "middle", between left and right matchers.
+	substrings []string
+
+	// The matcher that must match the right side. Can be nil.
+	right StringMatcher
+}
+
+func (m *containsStringMatcher) Matches(s string) bool {
+	for _, substr := range m.substrings {
+		switch {
+		case m.right != nil && m.left != nil:
+			searchStartPos := 0
+
+			for {
+				pos := strings.Index(s[searchStartPos:], substr)
+				if pos < 0 {
+					break
+				}
+
+				// Since we started searching from searchStartPos, we have to add that offset
+				// to get the actual position of the substring inside the text.
+				pos += searchStartPos
+
+				// If both the left and right matchers match, then we can stop searching because
+				// we've found a match.
+				if m.left.Matches(s[:pos]) && m.right.Matches(s[pos+len(substr):]) {
+					return true
+				}
+
+				// Continue searching for another occurrence of the substring inside the text.
+				searchStartPos = pos + 1
+			}
+		case m.left != nil:
+			// If we have to check for characters on the left then we need to match a suffix.
+			if strings.HasSuffix(s, substr) && m.left.Matches(s[:len(s)-len(substr)]) {
+				return true
+			}
+		case m.right != nil:
+			if strings.HasPrefix(s, substr) && m.right.Matches(s[len(substr):]) {
+				return true
+			}
+		}
+	}
+	return false
+}
+
+func newLiteralPrefixStringMatcher(prefix string, prefixCaseSensitive bool, right StringMatcher) StringMatcher {
+	if prefixCaseSensitive {
+		return &literalPrefixSensitiveStringMatcher{
+			prefix: prefix,
+			right:  right,
+		}
+	}
+
+	return &literalPrefixInsensitiveStringMatcher{
+		prefix: prefix,
+		right:  right,
+	}
+}
+
+// literalPrefixSensitiveStringMatcher matches a string with the given literal case-sensitive prefix and right side matcher.
+type literalPrefixSensitiveStringMatcher struct {
+	prefix string
+
+	// The matcher that must match the right side. Can be nil.
+	right StringMatcher
+}
+
+func (m *literalPrefixSensitiveStringMatcher) Matches(s string) bool {
+	if !strings.HasPrefix(s, m.prefix) {
+		return false
+	}
+
+	// Ensure the right side matches.
+	return m.right.Matches(s[len(m.prefix):])
+}
+
+// literalPrefixInsensitiveStringMatcher matches a string with the given literal case-insensitive prefix and right side matcher.
+type literalPrefixInsensitiveStringMatcher struct {
+	prefix string
+
+	// The matcher that must match the right side. Can be nil.
+	right StringMatcher
+}
+
+func (m *literalPrefixInsensitiveStringMatcher) Matches(s string) bool {
+	if !hasPrefixCaseInsensitive(s, m.prefix) {
+		return false
+	}
+
+	// Ensure the right side matches.
+	return m.right.Matches(s[len(m.prefix):])
+}
+
+// literalSuffixStringMatcher matches a string with the given literal suffix and left side matcher.
+type literalSuffixStringMatcher struct {
+	// The matcher that must match the left side. Can be nil.
+	left StringMatcher
+
+	suffix              string
+	suffixCaseSensitive bool
+}
+
+func (m *literalSuffixStringMatcher) Matches(s string) bool {
+	// Ensure the suffix matches.
+	if m.suffixCaseSensitive && !strings.HasSuffix(s, m.suffix) {
+		return false
+	}
+	if !m.suffixCaseSensitive && !hasSuffixCaseInsensitive(s, m.suffix) {
+		return false
+	}
+
+	// Ensure the left side matches.
+	return m.left.Matches(s[:len(s)-len(m.suffix)])
+}
+
+// emptyStringMatcher matches an empty string.
+type emptyStringMatcher struct{}
+
+func (m emptyStringMatcher) Matches(s string) bool {
+	return len(s) == 0
+}
+
+// orStringMatcher matches any of the sub-matchers.
+type orStringMatcher []StringMatcher
+
+func (m orStringMatcher) Matches(s string) bool {
+	for _, matcher := range m {
+		if matcher.Matches(s) {
+			return true
+		}
+	}
+	return false
+}
+
+// equalStringMatcher matches a string exactly and support case insensitive.
+type equalStringMatcher struct {
+	s             string
+	caseSensitive bool
+}
+
+func (m *equalStringMatcher) Matches(s string) bool {
+	if m.caseSensitive {
+		return m.s == s
+	}
+	return strings.EqualFold(m.s, s)
+}
+
+type multiStringMatcherBuilder interface {
+	StringMatcher
+	add(s string)
+	addPrefix(prefix string, prefixCaseSensitive bool, matcher StringMatcher)
+	setMatches() []string
+}
+
+func newEqualMultiStringMatcher(caseSensitive bool, estimatedSize, estimatedPrefixes, minPrefixLength int) multiStringMatcherBuilder {
+	// If the estimated size is low enough, it's faster to use a slice instead of a map.
+	if estimatedSize < minEqualMultiStringMatcherMapThreshold && estimatedPrefixes == 0 {
+		return &equalMultiStringSliceMatcher{caseSensitive: caseSensitive, values: make([]string, 0, estimatedSize)}
+	}
+
+	return &equalMultiStringMapMatcher{
+		values:        make(map[string]struct{}, estimatedSize),
+		prefixes:      make(map[string][]StringMatcher, estimatedPrefixes),
+		minPrefixLen:  minPrefixLength,
+		caseSensitive: caseSensitive,
+	}
+}
+
+// equalMultiStringSliceMatcher matches a string exactly against a slice of valid values.
+type equalMultiStringSliceMatcher struct {
+	values []string
+
+	caseSensitive bool
+}
+
+func (m *equalMultiStringSliceMatcher) add(s string) {
+	m.values = append(m.values, s)
+}
+
+func (m *equalMultiStringSliceMatcher) addPrefix(_ string, _ bool, _ StringMatcher) {
+	panic("not implemented")
+}
+
+func (m *equalMultiStringSliceMatcher) setMatches() []string {
+	return m.values
+}
+
+func (m *equalMultiStringSliceMatcher) Matches(s string) bool {
+	if m.caseSensitive {
+		for _, v := range m.values {
+			if s == v {
+				return true
+			}
+		}
+	} else {
+		for _, v := range m.values {
+			if strings.EqualFold(s, v) {
+				return true
+			}
+		}
+	}
+	return false
+}
+
+// equalMultiStringMapMatcher matches a string exactly against a map of valid values
+// or against a set of prefix matchers.
+type equalMultiStringMapMatcher struct {
+	// values contains values to match a string against. If the matching is case insensitive,
+	// the values here must be lowercase.
+	values map[string]struct{}
+	// prefixes maps strings, all of length minPrefixLen, to sets of matchers to check the rest of the string.
+	// If the matching is case insensitive, prefixes are all lowercase.
+	prefixes map[string][]StringMatcher
+	// minPrefixLen can be zero, meaning there are no prefix matchers.
+	minPrefixLen  int
+	caseSensitive bool
+}
+
+func (m *equalMultiStringMapMatcher) add(s string) {
+	if !m.caseSensitive {
+		s = toNormalisedLower(s, nil) // Don't pass a stack buffer here - it will always escape to heap.
+	}
+
+	m.values[s] = struct{}{}
+}
+
+func (m *equalMultiStringMapMatcher) addPrefix(prefix string, prefixCaseSensitive bool, matcher StringMatcher) {
+	if m.minPrefixLen == 0 {
+		panic("addPrefix called when no prefix length defined")
+	}
+	if len(prefix) < m.minPrefixLen {
+		panic("addPrefix called with a too short prefix")
+	}
+	if m.caseSensitive != prefixCaseSensitive {
+		panic("addPrefix called with a prefix whose case sensitivity is different than the expected one")
+	}
+
+	s := prefix[:m.minPrefixLen]
+	if !m.caseSensitive {
+		s = strings.ToLower(s)
+	}
+
+	m.prefixes[s] = append(m.prefixes[s], matcher)
+}
+
+func (m *equalMultiStringMapMatcher) setMatches() []string {
+	if len(m.values) >= maxSetMatches || len(m.prefixes) > 0 {
+		return nil
+	}
+
+	matches := make([]string, 0, len(m.values))
+	for s := range m.values {
+		matches = append(matches, s)
+	}
+	return matches
+}
+
+func (m *equalMultiStringMapMatcher) Matches(s string) bool {
+	if len(m.values) > 0 {
+		sNorm := s
+		var a [32]byte
+		if !m.caseSensitive {
+			sNorm = toNormalisedLower(s, a[:])
+		}
+		if _, ok := m.values[sNorm]; ok {
+			return true
+		}
+	}
+
+	if m.minPrefixLen > 0 && len(s) >= m.minPrefixLen {
+		prefix := s[:m.minPrefixLen]
+		var a [32]byte
+		if !m.caseSensitive {
+			prefix = toNormalisedLower(s[:m.minPrefixLen], a[:])
+		}
+		for _, matcher := range m.prefixes[prefix] {
+			if matcher.Matches(s) {
+				return true
+			}
+		}
+	}
+	return false
+}
+
+// toNormalisedLower normalise the input string using "Unicode Normalization Form D" and then convert
+// it to lower case.
+func toNormalisedLower(s string, a []byte) string {
+	for i := 0; i < len(s); i++ {
+		c := s[i]
+		if c >= utf8.RuneSelf {
+			return strings.Map(unicode.ToLower, norm.NFKD.String(s))
+		}
+		if 'A' <= c && c <= 'Z' {
+			return toNormalisedLowerSlow(s, i, a)
+		}
+	}
+	return s
+}
+
+// toNormalisedLowerSlow is split from toNormalisedLower because having a call
+// to `copy` slows it down even when it is not called.
+func toNormalisedLowerSlow(s string, i int, a []byte) string {
+	var buf []byte
+	if cap(a) > len(s) {
+		buf = a[:len(s)]
+		copy(buf, s)
+	} else {
+		buf = []byte(s)
+	}
+	for ; i < len(s); i++ {
+		c := s[i]
+		if c >= utf8.RuneSelf {
+			return strings.Map(unicode.ToLower, norm.NFKD.String(s))
+		}
+		if 'A' <= c && c <= 'Z' {
+			buf[i] = c + 'a' - 'A'
+		}
+	}
+	return yoloString(buf)
+}
+
+// anyStringWithoutNewlineMatcher is a stringMatcher which matches any string
+// (including an empty one) as far as it doesn't contain any newline character.
+type anyStringWithoutNewlineMatcher struct{}
+
+func (m anyStringWithoutNewlineMatcher) Matches(s string) bool {
+	// We need to make sure it doesn't contain a newline. Since the newline is
+	// an ASCII character, we can use strings.IndexByte().
+	return strings.IndexByte(s, '\n') == -1
+}
+
+// anyNonEmptyStringMatcher is a stringMatcher which matches any non-empty string.
+type anyNonEmptyStringMatcher struct {
+	matchNL bool
+}
+
+func (m *anyNonEmptyStringMatcher) Matches(s string) bool {
+	if m.matchNL {
+		// It's OK if the string contains a newline so we just need to make
+		// sure it's non-empty.
+		return len(s) > 0
+	}
+
+	// We need to make sure it non-empty and doesn't contain a newline.
+	// Since the newline is an ASCII character, we can use strings.IndexByte().
+	return len(s) > 0 && strings.IndexByte(s, '\n') == -1
+}
+
+// zeroOrOneCharacterStringMatcher is a StringMatcher which matches zero or one occurrence
+// of any character. The newline character is matches only if matchNL is set to true.
+type zeroOrOneCharacterStringMatcher struct {
+	matchNL bool
+}
+
+func (m *zeroOrOneCharacterStringMatcher) Matches(s string) bool {
+	// If there's more than one rune in the string, then it can't match.
+	if r, size := utf8.DecodeRuneInString(s); r == utf8.RuneError {
+		// Size is 0 for empty strings, 1 for invalid rune.
+		// Empty string matches, invalid rune matches if there isn't anything else.
+		return size == len(s)
+	} else if size < len(s) {
+		return false
+	}
+
+	// No need to check for the newline if the string is empty or matching a newline is OK.
+	if m.matchNL || len(s) == 0 {
+		return true
+	}
+
+	return s[0] != '\n'
+}
+
+// trueMatcher is a stringMatcher which matches any string (always returns true).
+type trueMatcher struct{}
+
+func (m trueMatcher) Matches(_ string) bool {
+	return true
+}
+
+// optimizeEqualOrPrefixStringMatchers optimize a specific case where all matchers are made by an
+// alternation (orStringMatcher) of strings checked for equality (equalStringMatcher) or
+// with a literal prefix (literalPrefixSensitiveStringMatcher or literalPrefixInsensitiveStringMatcher).
+//
+// In this specific case, when we have many strings to match against we can use a map instead
+// of iterating over the list of strings.
+func optimizeEqualOrPrefixStringMatchers(input StringMatcher, threshold int) StringMatcher {
+	var (
+		caseSensitive    bool
+		caseSensitiveSet bool
+		numValues        int
+		numPrefixes      int
+		minPrefixLength  int
+	)
+
+	// Analyse the input StringMatcher to count the number of occurrences
+	// and ensure all of them have the same case sensitivity.
+	analyseEqualMatcherCallback := func(matcher *equalStringMatcher) bool {
+		// Ensure we don't have mixed case sensitivity.
+		if caseSensitiveSet && caseSensitive != matcher.caseSensitive {
+			return false
+		} else if !caseSensitiveSet {
+			caseSensitive = matcher.caseSensitive
+			caseSensitiveSet = true
+		}
+
+		numValues++
+		return true
+	}
+
+	analysePrefixMatcherCallback := func(prefix string, prefixCaseSensitive bool, _ StringMatcher) bool {
+		// Ensure we don't have mixed case sensitivity.
+		if caseSensitiveSet && caseSensitive != prefixCaseSensitive {
+			return false
+		} else if !caseSensitiveSet {
+			caseSensitive = prefixCaseSensitive
+			caseSensitiveSet = true
+		}
+		if numPrefixes == 0 || len(prefix) < minPrefixLength {
+			minPrefixLength = len(prefix)
+		}
+
+		numPrefixes++
+		return true
+	}
+
+	if !findEqualOrPrefixStringMatchers(input, analyseEqualMatcherCallback, analysePrefixMatcherCallback) {
+		return input
+	}
+
+	// If the number of values and prefixes found is less than the threshold, then we should skip the optimization.
+	if (numValues + numPrefixes) < threshold {
+		return input
+	}
+
+	// Parse again the input StringMatcher to extract all values and storing them.
+	// We can skip the case sensitivity check because we've already checked it and
+	// if the code reach this point then it means all matchers have the same case sensitivity.
+	multiMatcher := newEqualMultiStringMatcher(caseSensitive, numValues, numPrefixes, minPrefixLength)
+
+	// Ignore the return value because we already iterated over the input StringMatcher
+	// and it was all good.
+	findEqualOrPrefixStringMatchers(input, func(matcher *equalStringMatcher) bool {
+		multiMatcher.add(matcher.s)
+		return true
+	}, func(prefix string, _ bool, matcher StringMatcher) bool {
+		multiMatcher.addPrefix(prefix, caseSensitive, matcher)
+		return true
+	})
+
+	return multiMatcher
+}
+
+// findEqualOrPrefixStringMatchers analyze the input StringMatcher and calls the equalMatcherCallback for each
+// equalStringMatcher found, and prefixMatcherCallback for each literalPrefixSensitiveStringMatcher and literalPrefixInsensitiveStringMatcher found.
+//
+// Returns true if and only if the input StringMatcher is *only* composed by an alternation of equalStringMatcher and/or
+// literal prefix matcher. Returns false if prefixMatcherCallback is nil and a literal prefix matcher is encountered.
+func findEqualOrPrefixStringMatchers(input StringMatcher, equalMatcherCallback func(matcher *equalStringMatcher) bool, prefixMatcherCallback func(prefix string, prefixCaseSensitive bool, matcher StringMatcher) bool) bool {
+	orInput, ok := input.(orStringMatcher)
+	if !ok {
+		return false
+	}
+
+	for _, m := range orInput {
+		switch casted := m.(type) {
+		case orStringMatcher:
+			if !findEqualOrPrefixStringMatchers(m, equalMatcherCallback, prefixMatcherCallback) {
+				return false
+			}
+
+		case *equalStringMatcher:
+			if !equalMatcherCallback(casted) {
+				return false
+			}
+
+		case *literalPrefixSensitiveStringMatcher:
+			if prefixMatcherCallback == nil || !prefixMatcherCallback(casted.prefix, true, casted) {
+				return false
+			}
+
+		case *literalPrefixInsensitiveStringMatcher:
+			if prefixMatcherCallback == nil || !prefixMatcherCallback(casted.prefix, false, casted) {
+				return false
+			}
+
+		default:
+			// It's not an equal or prefix string matcher, so we have to stop searching
+			// cause this optimization can't be applied.
+			return false
+		}
+	}
+
+	return true
+}
+
+func hasPrefixCaseInsensitive(s, prefix string) bool {
+	return len(s) >= len(prefix) && strings.EqualFold(s[0:len(prefix)], prefix)
+}
+
+func hasSuffixCaseInsensitive(s, suffix string) bool {
+	return len(s) >= len(suffix) && strings.EqualFold(s[len(s)-len(suffix):], suffix)
+}
+
+func containsInOrder(s string, contains []string) bool {
+	// Optimization for the case we only have to look for 1 substring.
+	if len(contains) == 1 {
+		return strings.Contains(s, contains[0])
+	}
+
+	return containsInOrderMulti(s, contains)
+}
+
+func containsInOrderMulti(s string, contains []string) bool {
+	offset := 0
+
+	for _, substr := range contains {
+		at := strings.Index(s[offset:], substr)
+		if at == -1 {
+			return false
+		}
+
+		offset += at + len(substr)
+	}
+
+	return true
+}
diff --git a/vendor/github.com/prometheus/prometheus/model/labels/sharding.go b/vendor/github.com/prometheus/prometheus/model/labels/sharding.go
new file mode 100644
index 0000000000000000000000000000000000000000..8b3a369397d12cb660acd150b726519a75da1720
--- /dev/null
+++ b/vendor/github.com/prometheus/prometheus/model/labels/sharding.go
@@ -0,0 +1,47 @@
+// Copyright 2020 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !stringlabels && !dedupelabels
+
+package labels
+
+import (
+	"github.com/cespare/xxhash/v2"
+)
+
+// StableHash is a labels hashing implementation which is guaranteed to not change over time.
+// This function should be used whenever labels hashing backward compatibility must be guaranteed.
+func StableHash(ls Labels) uint64 {
+	// Use xxhash.Sum64(b) for fast path as it's faster.
+	b := make([]byte, 0, 1024)
+	for i, v := range ls {
+		if len(b)+len(v.Name)+len(v.Value)+2 >= cap(b) {
+			// If labels entry is 1KB+ do not allocate whole entry.
+			h := xxhash.New()
+			_, _ = h.Write(b)
+			for _, v := range ls[i:] {
+				_, _ = h.WriteString(v.Name)
+				_, _ = h.Write(seps)
+				_, _ = h.WriteString(v.Value)
+				_, _ = h.Write(seps)
+			}
+			return h.Sum64()
+		}
+
+		b = append(b, v.Name...)
+		b = append(b, sep)
+		b = append(b, v.Value...)
+		b = append(b, sep)
+	}
+	return xxhash.Sum64(b)
+}
diff --git a/vendor/github.com/prometheus/prometheus/model/labels/sharding_dedupelabels.go b/vendor/github.com/prometheus/prometheus/model/labels/sharding_dedupelabels.go
new file mode 100644
index 0000000000000000000000000000000000000000..5bf41b05d6acd28823f8984d2251326af7a58803
--- /dev/null
+++ b/vendor/github.com/prometheus/prometheus/model/labels/sharding_dedupelabels.go
@@ -0,0 +1,52 @@
+// Copyright 2024 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build dedupelabels
+
+package labels
+
+import (
+	"github.com/cespare/xxhash/v2"
+)
+
+// StableHash is a labels hashing implementation which is guaranteed to not change over time.
+// This function should be used whenever labels hashing backward compatibility must be guaranteed.
+func StableHash(ls Labels) uint64 {
+	// Use xxhash.Sum64(b) for fast path as it's faster.
+	b := make([]byte, 0, 1024)
+	for pos := 0; pos < len(ls.data); {
+		name, newPos := decodeString(ls.syms, ls.data, pos)
+		value, newPos := decodeString(ls.syms, ls.data, newPos)
+		if len(b)+len(name)+len(value)+2 >= cap(b) {
+			// If labels entry is 1KB+, hash the rest of them via Write().
+			h := xxhash.New()
+			_, _ = h.Write(b)
+			for pos < len(ls.data) {
+				name, pos = decodeString(ls.syms, ls.data, pos)
+				value, pos = decodeString(ls.syms, ls.data, pos)
+				_, _ = h.WriteString(name)
+				_, _ = h.Write(seps)
+				_, _ = h.WriteString(value)
+				_, _ = h.Write(seps)
+			}
+			return h.Sum64()
+		}
+
+		b = append(b, name...)
+		b = append(b, sep)
+		b = append(b, value...)
+		b = append(b, sep)
+		pos = newPos
+	}
+	return xxhash.Sum64(b)
+}
diff --git a/vendor/github.com/prometheus/prometheus/model/labels/sharding_stringlabels.go b/vendor/github.com/prometheus/prometheus/model/labels/sharding_stringlabels.go
new file mode 100644
index 0000000000000000000000000000000000000000..798f268eb97881dcc9eb227d50e144077c150a63
--- /dev/null
+++ b/vendor/github.com/prometheus/prometheus/model/labels/sharding_stringlabels.go
@@ -0,0 +1,54 @@
+// Copyright 2020 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build stringlabels
+
+package labels
+
+import (
+	"github.com/cespare/xxhash/v2"
+)
+
+// StableHash is a labels hashing implementation which is guaranteed to not change over time.
+// This function should be used whenever labels hashing backward compatibility must be guaranteed.
+func StableHash(ls Labels) uint64 {
+	// Use xxhash.Sum64(b) for fast path as it's faster.
+	b := make([]byte, 0, 1024)
+	var h *xxhash.Digest
+	for i := 0; i < len(ls.data); {
+		var v Label
+		v.Name, i = decodeString(ls.data, i)
+		v.Value, i = decodeString(ls.data, i)
+		if h == nil && len(b)+len(v.Name)+len(v.Value)+2 >= cap(b) {
+			// If labels entry is 1KB+, switch to Write API. Copy in the values up to this point.
+			h = xxhash.New()
+			_, _ = h.Write(b)
+		}
+		if h != nil {
+			_, _ = h.WriteString(v.Name)
+			_, _ = h.Write(seps)
+			_, _ = h.WriteString(v.Value)
+			_, _ = h.Write(seps)
+			continue
+		}
+
+		b = append(b, v.Name...)
+		b = append(b, sep)
+		b = append(b, v.Value...)
+		b = append(b, sep)
+	}
+	if h != nil {
+		return h.Sum64()
+	}
+	return xxhash.Sum64(b)
+}
diff --git a/vendor/github.com/prometheus/prometheus/pkg/labels/test_utils.go b/vendor/github.com/prometheus/prometheus/model/labels/test_utils.go
similarity index 90%
rename from vendor/github.com/prometheus/prometheus/pkg/labels/test_utils.go
rename to vendor/github.com/prometheus/prometheus/model/labels/test_utils.go
index 319ee6184ec0c400f52d1f12d6d24b72216eade1..d060def4811540d55131fe8e58f277580efb0b2b 100644
--- a/vendor/github.com/prometheus/prometheus/pkg/labels/test_utils.go
+++ b/vendor/github.com/prometheus/prometheus/model/labels/test_utils.go
@@ -15,11 +15,9 @@ package labels
 
 import (
 	"bufio"
+	"fmt"
 	"os"
-	"sort"
 	"strings"
-
-	"github.com/pkg/errors"
 )
 
 // Slice is a sortable slice of label sets.
@@ -52,13 +50,14 @@ func ReadLabels(fn string, n int) ([]Labels, error) {
 	defer f.Close()
 
 	scanner := bufio.NewScanner(f)
+	b := NewScratchBuilder(0)
 
 	var mets []Labels
 	hashes := map[uint64]struct{}{}
 	i := 0
 
 	for scanner.Scan() && i < n {
-		m := make(Labels, 0, 10)
+		b.Reset()
 
 		r := strings.NewReplacer("\"", "", "{", "", "}", "")
 		s := r.Replace(scanner.Text())
@@ -66,10 +65,11 @@ func ReadLabels(fn string, n int) ([]Labels, error) {
 		labelChunks := strings.Split(s, ",")
 		for _, labelChunk := range labelChunks {
 			split := strings.Split(labelChunk, ":")
-			m = append(m, Label{Name: split[0], Value: split[1]})
+			b.Add(split[0], split[1])
 		}
 		// Order of the k/v labels matters, don't assume we'll always receive them already sorted.
-		sort.Sort(m)
+		b.Sort()
+		m := b.Labels()
 
 		h := m.Hash()
 		if _, ok := hashes[h]; ok {
@@ -81,7 +81,7 @@ func ReadLabels(fn string, n int) ([]Labels, error) {
 	}
 
 	if i != n {
-		return mets, errors.Errorf("requested %d metrics but found %d", n, i)
+		return mets, fmt.Errorf("requested %d metrics but found %d", n, i)
 	}
 	return mets, nil
 }
diff --git a/vendor/github.com/prometheus/prometheus/model/metadata/metadata.go b/vendor/github.com/prometheus/prometheus/model/metadata/metadata.go
new file mode 100644
index 0000000000000000000000000000000000000000..1b7e63e0f3501f4a213f30afd0a9d6dc9b7a5354
--- /dev/null
+++ b/vendor/github.com/prometheus/prometheus/model/metadata/metadata.go
@@ -0,0 +1,23 @@
+// Copyright 2022 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metadata
+
+import "github.com/prometheus/common/model"
+
+// Metadata stores a series' metadata information.
+type Metadata struct {
+	Type model.MetricType `json:"type"`
+	Unit string           `json:"unit"`
+	Help string           `json:"help"`
+}
diff --git a/vendor/github.com/prometheus/prometheus/model/timestamp/timestamp.go b/vendor/github.com/prometheus/prometheus/model/timestamp/timestamp.go
new file mode 100644
index 0000000000000000000000000000000000000000..93458f644d1b82d1c941b27017f08e5422241bf4
--- /dev/null
+++ b/vendor/github.com/prometheus/prometheus/model/timestamp/timestamp.go
@@ -0,0 +1,34 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package timestamp
+
+import (
+	"math"
+	"time"
+)
+
+// FromTime returns a new millisecond timestamp from a time.
+func FromTime(t time.Time) int64 {
+	return t.Unix()*1000 + int64(t.Nanosecond())/int64(time.Millisecond)
+}
+
+// Time returns a new time.Time object from a millisecond timestamp.
+func Time(ts int64) time.Time {
+	return time.Unix(ts/1000, (ts%1000)*int64(time.Millisecond)).UTC()
+}
+
+// FromFloatSeconds returns a millisecond timestamp from float seconds.
+func FromFloatSeconds(ts float64) int64 {
+	return int64(math.Round(ts * 1000))
+}
diff --git a/vendor/github.com/prometheus/prometheus/pkg/value/value.go b/vendor/github.com/prometheus/prometheus/model/value/value.go
similarity index 100%
rename from vendor/github.com/prometheus/prometheus/pkg/value/value.go
rename to vendor/github.com/prometheus/prometheus/model/value/value.go
diff --git a/vendor/github.com/prometheus/prometheus/pkg/labels/regexp.go b/vendor/github.com/prometheus/prometheus/pkg/labels/regexp.go
deleted file mode 100644
index eb2b0799587c6f94f6073cddb1476b3daa139c45..0000000000000000000000000000000000000000
--- a/vendor/github.com/prometheus/prometheus/pkg/labels/regexp.go
+++ /dev/null
@@ -1,107 +0,0 @@
-// Copyright 2020 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package labels
-
-import (
-	"regexp"
-	"regexp/syntax"
-	"strings"
-)
-
-type FastRegexMatcher struct {
-	re       *regexp.Regexp
-	prefix   string
-	suffix   string
-	contains string
-}
-
-func NewFastRegexMatcher(v string) (*FastRegexMatcher, error) {
-	re, err := regexp.Compile("^(?:" + v + ")$")
-	if err != nil {
-		return nil, err
-	}
-
-	parsed, err := syntax.Parse(v, syntax.Perl)
-	if err != nil {
-		return nil, err
-	}
-
-	m := &FastRegexMatcher{
-		re: re,
-	}
-
-	if parsed.Op == syntax.OpConcat {
-		m.prefix, m.suffix, m.contains = optimizeConcatRegex(parsed)
-	}
-
-	return m, nil
-}
-
-func (m *FastRegexMatcher) MatchString(s string) bool {
-	if m.prefix != "" && !strings.HasPrefix(s, m.prefix) {
-		return false
-	}
-	if m.suffix != "" && !strings.HasSuffix(s, m.suffix) {
-		return false
-	}
-	if m.contains != "" && !strings.Contains(s, m.contains) {
-		return false
-	}
-	return m.re.MatchString(s)
-}
-
-func (m *FastRegexMatcher) GetRegexString() string {
-	return m.re.String()
-}
-
-// optimizeConcatRegex returns literal prefix/suffix text that can be safely
-// checked against the label value before running the regexp matcher.
-func optimizeConcatRegex(r *syntax.Regexp) (prefix, suffix, contains string) {
-	sub := r.Sub
-
-	// We can safely remove begin and end text matchers respectively
-	// at the beginning and end of the regexp.
-	if len(sub) > 0 && sub[0].Op == syntax.OpBeginText {
-		sub = sub[1:]
-	}
-	if len(sub) > 0 && sub[len(sub)-1].Op == syntax.OpEndText {
-		sub = sub[:len(sub)-1]
-	}
-
-	if len(sub) == 0 {
-		return
-	}
-
-	// Given Prometheus regex matchers are always anchored to the begin/end
-	// of the text, if the first/last operations are literals, we can safely
-	// treat them as prefix/suffix.
-	if sub[0].Op == syntax.OpLiteral && (sub[0].Flags&syntax.FoldCase) == 0 {
-		prefix = string(sub[0].Rune)
-	}
-	if last := len(sub) - 1; sub[last].Op == syntax.OpLiteral && (sub[last].Flags&syntax.FoldCase) == 0 {
-		suffix = string(sub[last].Rune)
-	}
-
-	// If contains any literal which is not a prefix/suffix, we keep the
-	// 1st one. We do not keep the whole list of literals to simplify the
-	// fast path.
-	for i := 1; i < len(sub)-1; i++ {
-		if sub[i].Op == syntax.OpLiteral && (sub[i].Flags&syntax.FoldCase) == 0 {
-			contains = string(sub[i].Rune)
-			break
-		}
-	}
-
-	return
-}
diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/ast.go b/vendor/github.com/prometheus/prometheus/promql/parser/ast.go
index de82d67254653d70c3aa9a24f00853c2cc2e75ef..132ef3f0d28b8e81967bf44c13f0bbce90fd00fd 100644
--- a/vendor/github.com/prometheus/prometheus/promql/parser/ast.go
+++ b/vendor/github.com/prometheus/prometheus/promql/parser/ast.go
@@ -15,12 +15,13 @@ package parser
 
 import (
 	"context"
+	"fmt"
 	"time"
 
-	"github.com/pkg/errors"
-
-	"github.com/prometheus/prometheus/pkg/labels"
+	"github.com/prometheus/prometheus/model/labels"
 	"github.com/prometheus/prometheus/storage"
+
+	"github.com/prometheus/prometheus/promql/parser/posrange"
 )
 
 // Node is a generic interface for all nodes in an AST.
@@ -29,20 +30,24 @@ import (
 // or a chain of function definitions (e.g. String(), PromQLExpr(), etc.) convention is
 // to list them as follows:
 //
-// 	- Statements
-// 	- statement types (alphabetical)
-// 	- ...
-// 	- Expressions
-// 	- expression types (alphabetical)
-// 	- ...
-//
+//   - Statements
+//   - statement types (alphabetical)
+//   - ...
+//   - Expressions
+//   - expression types (alphabetical)
+//   - ...
 type Node interface {
 	// String representation of the node that returns the given node when parsed
 	// as part of a valid query.
 	String() string
 
+	// Pretty returns the prettified representation of the node.
+	// It uses the level information to determine at which level/depth the current
+	// node is in the AST and uses this to apply indentation.
+	Pretty(level int) string
+
 	// PositionRange returns the position of the AST Node in the query string.
-	PositionRange() PositionRange
+	PositionRange() posrange.PositionRange
 }
 
 // Statement is a generic interface for all statements.
@@ -50,7 +55,6 @@ type Statement interface {
 	Node
 
 	// PromQLStmt ensures that no other type accidentally implements the interface
-	// nolint:unused
 	PromQLStmt()
 }
 
@@ -64,6 +68,8 @@ type EvalStmt struct {
 	Start, End time.Time
 	// Time between two evaluated instants for the range [Start:End].
 	Interval time.Duration
+	// Lookback delta to use for this evaluation.
+	LookbackDelta time.Duration
 }
 
 func (*EvalStmt) PromQLStmt() {}
@@ -89,7 +95,7 @@ type AggregateExpr struct {
 	Param    Expr     // Parameter used by some aggregators.
 	Grouping []string // The labels by which to group the Vector.
 	Without  bool     // Whether to drop the given labels rather than keep them.
-	PosRange PositionRange
+	PosRange posrange.PositionRange
 }
 
 // BinaryExpr represents a binary expression between two child expressions.
@@ -110,7 +116,7 @@ type Call struct {
 	Func *Function   // The function that was called.
 	Args Expressions // Arguments used in the call.
 
-	PosRange PositionRange
+	PosRange posrange.PositionRange
 }
 
 // MatrixSelector represents a Matrix selection.
@@ -120,37 +126,45 @@ type MatrixSelector struct {
 	VectorSelector Expr
 	Range          time.Duration
 
-	EndPos Pos
+	EndPos posrange.Pos
 }
 
 // SubqueryExpr represents a subquery.
 type SubqueryExpr struct {
-	Expr   Expr
-	Range  time.Duration
-	Offset time.Duration
-	Step   time.Duration
-
-	EndPos Pos
+	Expr  Expr
+	Range time.Duration
+	// OriginalOffset is the actual offset that was set in the query.
+	// This never changes.
+	OriginalOffset time.Duration
+	// Offset is the offset used during the query execution
+	// which is calculated using the original offset, at modifier time,
+	// eval time, and subquery offsets in the AST tree.
+	Offset     time.Duration
+	Timestamp  *int64
+	StartOrEnd ItemType // Set when @ is used with start() or end()
+	Step       time.Duration
+
+	EndPos posrange.Pos
 }
 
 // NumberLiteral represents a number.
 type NumberLiteral struct {
 	Val float64
 
-	PosRange PositionRange
+	PosRange posrange.PositionRange
 }
 
 // ParenExpr wraps an expression so it cannot be disassembled as a consequence
 // of operator precedence.
 type ParenExpr struct {
 	Expr     Expr
-	PosRange PositionRange
+	PosRange posrange.PositionRange
 }
 
 // StringLiteral represents a string.
 type StringLiteral struct {
 	Val      string
-	PosRange PositionRange
+	PosRange posrange.PositionRange
 }
 
 // UnaryExpr represents a unary operation on another expression.
@@ -159,31 +173,58 @@ type UnaryExpr struct {
 	Op   ItemType
 	Expr Expr
 
-	StartPos Pos
+	StartPos posrange.Pos
+}
+
+// StepInvariantExpr represents a query which evaluates to the same result
+// irrespective of the evaluation time given the raw samples from TSDB remain unchanged.
+// Currently this is only used for engine optimisations and the parser does not produce this.
+type StepInvariantExpr struct {
+	Expr Expr
+}
+
+func (e *StepInvariantExpr) String() string { return e.Expr.String() }
+
+func (e *StepInvariantExpr) PositionRange() posrange.PositionRange {
+	return e.Expr.PositionRange()
 }
 
 // VectorSelector represents a Vector selection.
 type VectorSelector struct {
-	Name          string
-	Offset        time.Duration
-	LabelMatchers []*labels.Matcher
+	Name string
+	// OriginalOffset is the actual offset that was set in the query.
+	// This never changes.
+	OriginalOffset time.Duration
+	// Offset is the offset used during the query execution
+	// which is calculated using the original offset, at modifier time,
+	// eval time, and subquery offsets in the AST tree.
+	Offset               time.Duration
+	Timestamp            *int64
+	SkipHistogramBuckets bool     // Set when decoding native histogram buckets is not needed for query evaluation.
+	StartOrEnd           ItemType // Set when @ is used with start() or end()
+	LabelMatchers        []*labels.Matcher
 
 	// The unexpanded seriesSet populated at query preparation time.
 	UnexpandedSeriesSet storage.SeriesSet
 	Series              []storage.Series
 
-	PosRange PositionRange
+	// BypassEmptyMatcherCheck is true when the VectorSelector isn't required to have at least one matcher matching the empty string.
+	// This is the case when VectorSelector is used to represent the info function's second argument.
+	BypassEmptyMatcherCheck bool
+
+	PosRange posrange.PositionRange
 }
 
 // TestStmt is an internal helper statement that allows execution
 // of an arbitrary function during handling. It is used to test the Engine.
 type TestStmt func(context.Context) error
 
-func (TestStmt) String() string { return "test statement" }
-func (TestStmt) PromQLStmt()    {}
+func (TestStmt) String() string      { return "test statement" }
+func (TestStmt) PromQLStmt()         {}
+func (t TestStmt) Pretty(int) string { return t.String() }
 
-func (TestStmt) PositionRange() PositionRange {
-	return PositionRange{
+func (TestStmt) PositionRange() posrange.PositionRange {
+	return posrange.PositionRange{
 		Start: -1,
 		End:   -1,
 	}
@@ -203,17 +244,19 @@ func (e *BinaryExpr) Type() ValueType {
 	}
 	return ValueTypeVector
 }
+func (e *StepInvariantExpr) Type() ValueType { return e.Expr.Type() }
 
-func (*AggregateExpr) PromQLExpr()  {}
-func (*BinaryExpr) PromQLExpr()     {}
-func (*Call) PromQLExpr()           {}
-func (*MatrixSelector) PromQLExpr() {}
-func (*SubqueryExpr) PromQLExpr()   {}
-func (*NumberLiteral) PromQLExpr()  {}
-func (*ParenExpr) PromQLExpr()      {}
-func (*StringLiteral) PromQLExpr()  {}
-func (*UnaryExpr) PromQLExpr()      {}
-func (*VectorSelector) PromQLExpr() {}
+func (*AggregateExpr) PromQLExpr()     {}
+func (*BinaryExpr) PromQLExpr()        {}
+func (*Call) PromQLExpr()              {}
+func (*MatrixSelector) PromQLExpr()    {}
+func (*SubqueryExpr) PromQLExpr()      {}
+func (*NumberLiteral) PromQLExpr()     {}
+func (*ParenExpr) PromQLExpr()         {}
+func (*StringLiteral) PromQLExpr()     {}
+func (*UnaryExpr) PromQLExpr()         {}
+func (*VectorSelector) PromQLExpr()    {}
+func (*StepInvariantExpr) PromQLExpr() {}
 
 // VectorMatchCardinality describes the cardinality relationship
 // of two Vectors in a binary operation.
@@ -287,6 +330,18 @@ func Walk(v Visitor, node Node, path []Node) error {
 	return err
 }
 
+func ExtractSelectors(expr Expr) [][]*labels.Matcher {
+	var selectors [][]*labels.Matcher
+	Inspect(expr, func(node Node, _ []Node) error {
+		vs, ok := node.(*VectorSelector)
+		if ok {
+			selectors = append(selectors, vs.LabelMatchers)
+		}
+		return nil
+	})
+	return selectors
+}
+
 type inspector func(Node, []Node) error
 
 func (f inspector) Visit(node Node, path []Node) (Visitor, error) {
@@ -301,8 +356,7 @@ func (f inspector) Visit(node Node, path []Node) (Visitor, error) {
 // f(node, path); node must not be nil. If f returns a nil error, Inspect invokes f
 // for all the non-nil children of node, recursively.
 func Inspect(node Node, f inspector) {
-	//nolint: errcheck
-	Walk(inspector(f), node, nil)
+	Walk(f, node, nil) //nolint:errcheck
 }
 
 // Children returns a list of all child nodes of a syntax tree node.
@@ -321,13 +375,14 @@ func Children(node Node) []Node {
 	case *AggregateExpr:
 		// While this does not look nice, it should avoid unnecessary allocations
 		// caused by slice resizing
-		if n.Expr == nil && n.Param == nil {
+		switch {
+		case n.Expr == nil && n.Param == nil:
 			return nil
-		} else if n.Expr == nil {
+		case n.Expr == nil:
 			return []Node{n.Param}
-		} else if n.Param == nil {
+		case n.Param == nil:
 			return []Node{n.Expr}
-		} else {
+		default:
 			return []Node{n.Expr, n.Param}
 		}
 	case *BinaryExpr:
@@ -347,88 +402,95 @@ func Children(node Node) []Node {
 		return []Node{n.Expr}
 	case *MatrixSelector:
 		return []Node{n.VectorSelector}
+	case *StepInvariantExpr:
+		return []Node{n.Expr}
 	case *NumberLiteral, *StringLiteral, *VectorSelector:
 		// nothing to do
 		return []Node{}
 	default:
-		panic(errors.Errorf("promql.Children: unhandled node type %T", node))
+		panic(fmt.Errorf("promql.Children: unhandled node type %T", node))
 	}
 }
 
-// PositionRange describes a position in the input string of the parser.
-type PositionRange struct {
-	Start Pos
-	End   Pos
-}
-
 // mergeRanges is a helper function to merge the PositionRanges of two Nodes.
 // Note that the arguments must be in the same order as they
 // occur in the input string.
-func mergeRanges(first Node, last Node) PositionRange {
-	return PositionRange{
+func mergeRanges(first, last Node) posrange.PositionRange {
+	return posrange.PositionRange{
 		Start: first.PositionRange().Start,
 		End:   last.PositionRange().End,
 	}
 }
 
-// Item implements the Node interface.
+// PositionRange implements the Node interface.
 // This makes it possible to call mergeRanges on them.
-func (i *Item) PositionRange() PositionRange {
-	return PositionRange{
+func (i *Item) PositionRange() posrange.PositionRange {
+	return posrange.PositionRange{
 		Start: i.Pos,
-		End:   i.Pos + Pos(len(i.Val)),
+		End:   i.Pos + posrange.Pos(len(i.Val)),
 	}
 }
 
-func (e *AggregateExpr) PositionRange() PositionRange {
+func (e *AggregateExpr) PositionRange() posrange.PositionRange {
 	return e.PosRange
 }
-func (e *BinaryExpr) PositionRange() PositionRange {
+
+func (e *BinaryExpr) PositionRange() posrange.PositionRange {
 	return mergeRanges(e.LHS, e.RHS)
 }
-func (e *Call) PositionRange() PositionRange {
+
+func (e *Call) PositionRange() posrange.PositionRange {
 	return e.PosRange
 }
-func (e *EvalStmt) PositionRange() PositionRange {
+
+func (e *EvalStmt) PositionRange() posrange.PositionRange {
 	return e.Expr.PositionRange()
 }
-func (e Expressions) PositionRange() PositionRange {
+
+func (e Expressions) PositionRange() posrange.PositionRange {
 	if len(e) == 0 {
 		// Position undefined.
-		return PositionRange{
+		return posrange.PositionRange{
 			Start: -1,
 			End:   -1,
 		}
 	}
 	return mergeRanges(e[0], e[len(e)-1])
 }
-func (e *MatrixSelector) PositionRange() PositionRange {
-	return PositionRange{
+
+func (e *MatrixSelector) PositionRange() posrange.PositionRange {
+	return posrange.PositionRange{
 		Start: e.VectorSelector.PositionRange().Start,
 		End:   e.EndPos,
 	}
 }
-func (e *SubqueryExpr) PositionRange() PositionRange {
-	return PositionRange{
+
+func (e *SubqueryExpr) PositionRange() posrange.PositionRange {
+	return posrange.PositionRange{
 		Start: e.Expr.PositionRange().Start,
 		End:   e.EndPos,
 	}
 }
-func (e *NumberLiteral) PositionRange() PositionRange {
+
+func (e *NumberLiteral) PositionRange() posrange.PositionRange {
 	return e.PosRange
 }
-func (e *ParenExpr) PositionRange() PositionRange {
+
+func (e *ParenExpr) PositionRange() posrange.PositionRange {
 	return e.PosRange
 }
-func (e *StringLiteral) PositionRange() PositionRange {
+
+func (e *StringLiteral) PositionRange() posrange.PositionRange {
 	return e.PosRange
 }
-func (e *UnaryExpr) PositionRange() PositionRange {
-	return PositionRange{
+
+func (e *UnaryExpr) PositionRange() posrange.PositionRange {
+	return posrange.PositionRange{
 		Start: e.StartPos,
 		End:   e.Expr.PositionRange().End,
 	}
 }
-func (e *VectorSelector) PositionRange() PositionRange {
+
+func (e *VectorSelector) PositionRange() posrange.PositionRange {
 	return e.PosRange
 }
diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/functions.go b/vendor/github.com/prometheus/prometheus/promql/parser/functions.go
index 4516829e551e2d7fa65c42337c7e746997cbab17..aa65aca2755cd47baa527a77c4848befd24ddb50 100644
--- a/vendor/github.com/prometheus/prometheus/promql/parser/functions.go
+++ b/vendor/github.com/prometheus/prometheus/promql/parser/functions.go
@@ -16,12 +16,16 @@ package parser
 // Function represents a function of the expression language and is
 // used by function nodes.
 type Function struct {
-	Name       string
-	ArgTypes   []ValueType
-	Variadic   int
-	ReturnType ValueType
+	Name         string
+	ArgTypes     []ValueType
+	Variadic     int
+	ReturnType   ValueType
+	Experimental bool
 }
 
+// EnableExperimentalFunctions controls whether experimentalFunctions are enabled.
+var EnableExperimentalFunctions bool
+
 // Functions is a list of all functions supported by PromQL, including their types.
 var Functions = map[string]*Function{
 	"abs": {
@@ -39,6 +43,36 @@ var Functions = map[string]*Function{
 		ArgTypes:   []ValueType{ValueTypeMatrix},
 		ReturnType: ValueTypeVector,
 	},
+	"acos": {
+		Name:       "acos",
+		ArgTypes:   []ValueType{ValueTypeVector},
+		ReturnType: ValueTypeVector,
+	},
+	"acosh": {
+		Name:       "acosh",
+		ArgTypes:   []ValueType{ValueTypeVector},
+		ReturnType: ValueTypeVector,
+	},
+	"asin": {
+		Name:       "asin",
+		ArgTypes:   []ValueType{ValueTypeVector},
+		ReturnType: ValueTypeVector,
+	},
+	"asinh": {
+		Name:       "asinh",
+		ArgTypes:   []ValueType{ValueTypeVector},
+		ReturnType: ValueTypeVector,
+	},
+	"atan": {
+		Name:       "atan",
+		ArgTypes:   []ValueType{ValueTypeVector},
+		ReturnType: ValueTypeVector,
+	},
+	"atanh": {
+		Name:       "atanh",
+		ArgTypes:   []ValueType{ValueTypeVector},
+		ReturnType: ValueTypeVector,
+	},
 	"avg_over_time": {
 		Name:       "avg_over_time",
 		ArgTypes:   []ValueType{ValueTypeMatrix},
@@ -54,6 +88,11 @@ var Functions = map[string]*Function{
 		ArgTypes:   []ValueType{ValueTypeMatrix},
 		ReturnType: ValueTypeVector,
 	},
+	"clamp": {
+		Name:       "clamp",
+		ArgTypes:   []ValueType{ValueTypeVector, ValueTypeScalar, ValueTypeScalar},
+		ReturnType: ValueTypeVector,
+	},
 	"clamp_max": {
 		Name:       "clamp_max",
 		ArgTypes:   []ValueType{ValueTypeVector, ValueTypeScalar},
@@ -64,6 +103,16 @@ var Functions = map[string]*Function{
 		ArgTypes:   []ValueType{ValueTypeVector, ValueTypeScalar},
 		ReturnType: ValueTypeVector,
 	},
+	"cos": {
+		Name:       "cos",
+		ArgTypes:   []ValueType{ValueTypeVector},
+		ReturnType: ValueTypeVector,
+	},
+	"cosh": {
+		Name:       "cosh",
+		ArgTypes:   []ValueType{ValueTypeVector},
+		ReturnType: ValueTypeVector,
+	},
 	"count_over_time": {
 		Name:       "count_over_time",
 		ArgTypes:   []ValueType{ValueTypeMatrix},
@@ -87,6 +136,17 @@ var Functions = map[string]*Function{
 		Variadic:   1,
 		ReturnType: ValueTypeVector,
 	},
+	"day_of_year": {
+		Name:       "day_of_year",
+		ArgTypes:   []ValueType{ValueTypeVector},
+		Variadic:   1,
+		ReturnType: ValueTypeVector,
+	},
+	"deg": {
+		Name:       "deg",
+		ArgTypes:   []ValueType{ValueTypeVector},
+		ReturnType: ValueTypeVector,
+	},
 	"delta": {
 		Name:       "delta",
 		ArgTypes:   []ValueType{ValueTypeMatrix},
@@ -107,15 +167,46 @@ var Functions = map[string]*Function{
 		ArgTypes:   []ValueType{ValueTypeVector},
 		ReturnType: ValueTypeVector,
 	},
+	"histogram_avg": {
+		Name:       "histogram_avg",
+		ArgTypes:   []ValueType{ValueTypeVector},
+		ReturnType: ValueTypeVector,
+	},
+	"histogram_count": {
+		Name:       "histogram_count",
+		ArgTypes:   []ValueType{ValueTypeVector},
+		ReturnType: ValueTypeVector,
+	},
+	"histogram_sum": {
+		Name:       "histogram_sum",
+		ArgTypes:   []ValueType{ValueTypeVector},
+		ReturnType: ValueTypeVector,
+	},
+	"histogram_stddev": {
+		Name:       "histogram_stddev",
+		ArgTypes:   []ValueType{ValueTypeVector},
+		ReturnType: ValueTypeVector,
+	},
+	"histogram_stdvar": {
+		Name:       "histogram_stdvar",
+		ArgTypes:   []ValueType{ValueTypeVector},
+		ReturnType: ValueTypeVector,
+	},
+	"histogram_fraction": {
+		Name:       "histogram_fraction",
+		ArgTypes:   []ValueType{ValueTypeScalar, ValueTypeScalar, ValueTypeVector},
+		ReturnType: ValueTypeVector,
+	},
 	"histogram_quantile": {
 		Name:       "histogram_quantile",
 		ArgTypes:   []ValueType{ValueTypeScalar, ValueTypeVector},
 		ReturnType: ValueTypeVector,
 	},
-	"holt_winters": {
-		Name:       "holt_winters",
-		ArgTypes:   []ValueType{ValueTypeMatrix, ValueTypeScalar, ValueTypeScalar},
-		ReturnType: ValueTypeVector,
+	"double_exponential_smoothing": {
+		Name:         "double_exponential_smoothing",
+		ArgTypes:     []ValueType{ValueTypeMatrix, ValueTypeScalar, ValueTypeScalar},
+		ReturnType:   ValueTypeVector,
+		Experimental: true,
 	},
 	"hour": {
 		Name:       "hour",
@@ -133,6 +224,13 @@ var Functions = map[string]*Function{
 		ArgTypes:   []ValueType{ValueTypeMatrix},
 		ReturnType: ValueTypeVector,
 	},
+	"info": {
+		Name:         "info",
+		ArgTypes:     []ValueType{ValueTypeVector, ValueTypeVector},
+		ReturnType:   ValueTypeVector,
+		Experimental: true,
+		Variadic:     1,
+	},
 	"irate": {
 		Name:       "irate",
 		ArgTypes:   []ValueType{ValueTypeMatrix},
@@ -149,6 +247,11 @@ var Functions = map[string]*Function{
 		Variadic:   -1,
 		ReturnType: ValueTypeVector,
 	},
+	"last_over_time": {
+		Name:       "last_over_time",
+		ArgTypes:   []ValueType{ValueTypeMatrix},
+		ReturnType: ValueTypeVector,
+	},
 	"ln": {
 		Name:       "ln",
 		ArgTypes:   []ValueType{ValueTypeVector},
@@ -164,6 +267,12 @@ var Functions = map[string]*Function{
 		ArgTypes:   []ValueType{ValueTypeVector},
 		ReturnType: ValueTypeVector,
 	},
+	"mad_over_time": {
+		Name:         "mad_over_time",
+		ArgTypes:     []ValueType{ValueTypeMatrix},
+		ReturnType:   ValueTypeVector,
+		Experimental: true,
+	},
 	"max_over_time": {
 		Name:       "max_over_time",
 		ArgTypes:   []ValueType{ValueTypeMatrix},
@@ -186,16 +295,31 @@ var Functions = map[string]*Function{
 		Variadic:   1,
 		ReturnType: ValueTypeVector,
 	},
+	"pi": {
+		Name:       "pi",
+		ArgTypes:   []ValueType{},
+		ReturnType: ValueTypeScalar,
+	},
 	"predict_linear": {
 		Name:       "predict_linear",
 		ArgTypes:   []ValueType{ValueTypeMatrix, ValueTypeScalar},
 		ReturnType: ValueTypeVector,
 	},
+	"present_over_time": {
+		Name:       "present_over_time",
+		ArgTypes:   []ValueType{ValueTypeMatrix},
+		ReturnType: ValueTypeVector,
+	},
 	"quantile_over_time": {
 		Name:       "quantile_over_time",
 		ArgTypes:   []ValueType{ValueTypeScalar, ValueTypeMatrix},
 		ReturnType: ValueTypeVector,
 	},
+	"rad": {
+		Name:       "rad",
+		ArgTypes:   []ValueType{ValueTypeVector},
+		ReturnType: ValueTypeVector,
+	},
 	"rate": {
 		Name:       "rate",
 		ArgTypes:   []ValueType{ValueTypeMatrix},
@@ -217,6 +341,21 @@ var Functions = map[string]*Function{
 		ArgTypes:   []ValueType{ValueTypeVector},
 		ReturnType: ValueTypeScalar,
 	},
+	"sgn": {
+		Name:       "sgn",
+		ArgTypes:   []ValueType{ValueTypeVector},
+		ReturnType: ValueTypeVector,
+	},
+	"sin": {
+		Name:       "sin",
+		ArgTypes:   []ValueType{ValueTypeVector},
+		ReturnType: ValueTypeVector,
+	},
+	"sinh": {
+		Name:       "sinh",
+		ArgTypes:   []ValueType{ValueTypeVector},
+		ReturnType: ValueTypeVector,
+	},
 	"sort": {
 		Name:       "sort",
 		ArgTypes:   []ValueType{ValueTypeVector},
@@ -227,6 +366,20 @@ var Functions = map[string]*Function{
 		ArgTypes:   []ValueType{ValueTypeVector},
 		ReturnType: ValueTypeVector,
 	},
+	"sort_by_label": {
+		Name:         "sort_by_label",
+		ArgTypes:     []ValueType{ValueTypeVector, ValueTypeString},
+		Variadic:     -1,
+		ReturnType:   ValueTypeVector,
+		Experimental: true,
+	},
+	"sort_by_label_desc": {
+		Name:         "sort_by_label_desc",
+		ArgTypes:     []ValueType{ValueTypeVector, ValueTypeString},
+		Variadic:     -1,
+		ReturnType:   ValueTypeVector,
+		Experimental: true,
+	},
 	"sqrt": {
 		Name:       "sqrt",
 		ArgTypes:   []ValueType{ValueTypeVector},
@@ -247,6 +400,16 @@ var Functions = map[string]*Function{
 		ArgTypes:   []ValueType{ValueTypeMatrix},
 		ReturnType: ValueTypeVector,
 	},
+	"tan": {
+		Name:       "tan",
+		ArgTypes:   []ValueType{ValueTypeVector},
+		ReturnType: ValueTypeVector,
+	},
+	"tanh": {
+		Name:       "tanh",
+		ArgTypes:   []ValueType{ValueTypeVector},
+		ReturnType: ValueTypeVector,
+	},
 	"time": {
 		Name:       "time",
 		ArgTypes:   []ValueType{},
@@ -271,7 +434,7 @@ var Functions = map[string]*Function{
 }
 
 // getFunction returns a predefined Function object for the given name.
-func getFunction(name string) (*Function, bool) {
-	function, ok := Functions[name]
+func getFunction(name string, functions map[string]*Function) (*Function, bool) {
+	function, ok := functions[name]
 	return function, ok
 }
diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y b/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y
index f0bdc320fc9b6a9c512e7c19937353495363573c..cdb4532d3bddde8cd9db1f8890649ade968346a4 100644
--- a/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y
+++ b/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y
@@ -16,27 +16,35 @@ package parser
 
 import (
         "math"
-        "sort"
         "strconv"
         "time"
 
-        "github.com/prometheus/prometheus/pkg/labels"
-        "github.com/prometheus/prometheus/pkg/value"
+        "github.com/prometheus/prometheus/model/labels"
+        "github.com/prometheus/prometheus/model/value"
+        "github.com/prometheus/prometheus/model/histogram"
+        "github.com/prometheus/prometheus/promql/parser/posrange"
+
+        "github.com/prometheus/common/model"
 )
+
 %}
 
 %union {
-    node      Node
-    item      Item
-    matchers  []*labels.Matcher
-    matcher   *labels.Matcher
-    label     labels.Label
-    labels    labels.Labels
-    strings   []string
-    series    []SequenceValue
-    uint      uint64
-    float     float64
-    duration  time.Duration
+    node        Node
+    item        Item
+    matchers    []*labels.Matcher
+    matcher     *labels.Matcher
+    label       labels.Label
+    labels      labels.Labels
+    lblList     []labels.Label
+    strings     []string
+    series      []SequenceValue
+    histogram   *histogram.FloatHistogram
+    descriptors map[string]interface{}
+    bucket_set  []float64
+    int         int64
+    uint        uint64
+    float       float64
 }
 
 
@@ -53,6 +61,8 @@ IDENTIFIER
 LEFT_BRACE
 LEFT_BRACKET
 LEFT_PAREN
+OPEN_HIST
+CLOSE_HIST
 METRIC_IDENTIFIER
 NUMBER
 RIGHT_BRACE
@@ -63,6 +73,22 @@ SPACE
 STRING
 TIMES
 
+// Histogram Descriptors.
+%token histogramDescStart
+%token <item>
+SUM_DESC
+COUNT_DESC
+SCHEMA_DESC
+OFFSET_DESC
+NEGATIVE_OFFSET_DESC
+BUCKETS_DESC
+NEGATIVE_BUCKETS_DESC
+ZERO_BUCKET_DESC
+ZERO_BUCKET_WIDTH_DESC
+CUSTOM_VALUES_DESC
+COUNTER_RESET_HINT_DESC
+%token histogramDescEnd
+
 // Operators.
 %token	operatorsStart
 %token <item>
@@ -83,6 +109,8 @@ NEQ
 NEQ_REGEX
 POW
 SUB
+AT
+ATAN2
 %token	operatorsEnd
 
 // Aggregators.
@@ -100,6 +128,8 @@ STDDEV
 STDVAR
 SUM
 TOPK
+LIMITK
+LIMIT_RATIO
 %token	aggregatorsEnd
 
 // Keywords.
@@ -115,6 +145,21 @@ ON
 WITHOUT
 %token keywordsEnd
 
+// Preprocessors.
+%token preprocessorStart
+%token <item>
+START
+END
+%token preprocessorEnd
+
+// Counter reset hints.
+%token counterResetHintsStart
+%token <item>
+UNKNOWN_COUNTER_RESET
+COUNTER_RESET
+NOT_COUNTER_RESET
+GAUGE_TYPE
+%token counterResetHintsEnd
 
 // Start symbols for the generated parser.
 %token	startSymbolsStart
@@ -129,17 +174,19 @@ START_METRIC_SELECTOR
 // Type definitions for grammar rules.
 %type <matchers> label_match_list
 %type <matcher> label_matcher
-
-%type <item> aggregate_op grouping_label match_op maybe_label metric_identifier unary_op
-
-%type <labels> label_set label_set_list metric
+%type <item> aggregate_op grouping_label match_op maybe_label metric_identifier unary_op at_modifier_preprocessors string_identifier counter_reset_hint
+%type <labels> label_set metric
+%type <lblList> label_set_list
 %type <label> label_set_item
 %type <strings> grouping_label_list grouping_labels maybe_grouping_labels
 %type <series> series_item series_values
+%type <histogram> histogram_series_value
+%type <descriptors> histogram_desc_map histogram_desc_item
+%type <bucket_set> bucket_set bucket_set_list
+%type <int> int
 %type <uint> uint
-%type <float> number series_value signed_number
-%type <node> aggregate_expr aggregate_modifier bin_modifier binary_expr bool_modifier expr function_call function_call_args function_call_body group_modifiers label_matchers matrix_selector number_literal offset_expr on_or_ignoring paren_expr string_literal subquery_expr unary_expr vector_selector
-%type <duration> duration maybe_duration
+%type <float> number series_value signed_number signed_or_unsigned_number
+%type <node> step_invariant_expr aggregate_expr aggregate_modifier bin_modifier binary_expr bool_modifier expr function_call function_call_args function_call_body group_modifiers label_matchers matrix_selector number_duration_literal offset_expr on_or_ignoring paren_expr string_literal subquery_expr unary_expr vector_selector
 
 %start start
 
@@ -148,7 +195,7 @@ START_METRIC_SELECTOR
 %left LAND LUNLESS
 %left EQLC GTE GTR LSS LTE NEQ
 %left ADD SUB
-%left MUL DIV MOD
+%left MUL DIV MOD ATAN2
 %right POW
 
 // Offset modifiers do not have associativity.
@@ -165,7 +212,7 @@ start           :
                         { yylex.(*parser).generatedParserResult = $2 }
                 | START_SERIES_DESCRIPTION series_description
                 | START_EXPRESSION /* empty */ EOF
-                        { yylex.(*parser).addParseErrf(PositionRange{}, "no expression found in input")}
+                        { yylex.(*parser).addParseErrf(posrange.PositionRange{}, "no expression found in input")}
                 | START_EXPRESSION expr
                         { yylex.(*parser).generatedParserResult = $2 }
                 | START_METRIC_SELECTOR vector_selector
@@ -180,13 +227,14 @@ expr            :
                 | binary_expr
                 | function_call
                 | matrix_selector
-                | number_literal
+                | number_duration_literal
                 | offset_expr
                 | paren_expr
                 | string_literal
                 | subquery_expr
                 | unary_expr
                 | vector_selector
+                | step_invariant_expr
                 ;
 
 /*
@@ -200,8 +248,8 @@ aggregate_expr  : aggregate_op aggregate_modifier function_call_body
                 | aggregate_op function_call_body
                         { $$ = yylex.(*parser).newAggregateExpr($1, &AggregateExpr{}, $2) }
                 | aggregate_op error
-                        { 
-                        yylex.(*parser).unexpected("aggregation",""); 
+                        {
+                        yylex.(*parser).unexpected("aggregation","");
                         $$ = yylex.(*parser).newAggregateExpr($1, &AggregateExpr{}, Expressions{})
                         }
                 ;
@@ -228,6 +276,7 @@ aggregate_modifier:
 
 // Operator precedence only works if each of those is listed separately.
 binary_expr     : expr ADD     bin_modifier expr { $$ = yylex.(*parser).newBinaryExpression($1, $2, $3, $4) }
+                | expr ATAN2   bin_modifier expr { $$ = yylex.(*parser).newBinaryExpression($1, $2, $3, $4) }
                 | expr DIV     bin_modifier expr { $$ = yylex.(*parser).newBinaryExpression($1, $2, $3, $4) }
                 | expr EQLC    bin_modifier expr { $$ = yylex.(*parser).newBinaryExpression($1, $2, $3, $4) }
                 | expr GTE     bin_modifier expr { $$ = yylex.(*parser).newBinaryExpression($1, $2, $3, $4) }
@@ -245,7 +294,7 @@ binary_expr     : expr ADD     bin_modifier expr { $$ = yylex.(*parser).newBinar
                 ;
 
 // Using left recursion for the modifier rules, helps to keep the parser stack small and
-// reduces allocations
+// reduces allocations.
 bin_modifier    : group_modifiers;
 
 bool_modifier   : /* empty */
@@ -313,10 +362,19 @@ grouping_label_list:
 
 grouping_label  : maybe_label
                         {
-                        if !isLabel($1.Val) {
-                                yylex.(*parser).unexpected("grouping opts", "label")
+                        if !model.LabelName($1.Val).IsValid() {
+                                yylex.(*parser).addParseErrf($1.PositionRange(),"invalid label name for grouping: %q", $1.Val)
+                        }
+                        $$ = $1
+                        }
+                | STRING {
+                        unquoted := yylex.(*parser).unquoteString($1.Val)
+                        if !model.LabelName(unquoted).IsValid() {
+                                yylex.(*parser).addParseErrf($1.PositionRange(),"invalid label name for grouping: %q", unquoted)
                         }
                         $$ = $1
+                        $$.Pos++
+                        $$.Val = unquoted
                         }
                 | error
                         { yylex.(*parser).unexpected("grouping opts", "label"); $$ = Item{} }
@@ -328,14 +386,17 @@ grouping_label  : maybe_label
 
 function_call   : IDENTIFIER function_call_body
                         {
-                        fn, exist := getFunction($1.Val)
+                        fn, exist := getFunction($1.Val, yylex.(*parser).functions)
                         if !exist{
                                 yylex.(*parser).addParseErrf($1.PositionRange(),"unknown function with name %q", $1.Val)
                         }
+                        if fn != nil && fn.Experimental && !EnableExperimentalFunctions {
+                                yylex.(*parser).addParseErrf($1.PositionRange(),"function %q is not enabled", $1.Val)
+                        }
                         $$ = &Call{
                                 Func: fn,
                                 Args: $2.(Expressions),
-                                PosRange: PositionRange{
+                                PosRange: posrange.PositionRange{
                                         Start: $1.Pos,
                                         End:   yylex.(*parser).lastClosing,
                                 },
@@ -372,60 +433,102 @@ paren_expr      : LEFT_PAREN expr RIGHT_PAREN
  * Offset modifiers.
  */
 
-offset_expr: expr OFFSET duration
+offset_expr: expr OFFSET number_duration_literal
                         {
-                        yylex.(*parser).addOffset($1, $3)
-                        $$ = $1
+  		            numLit, _ := $3.(*NumberLiteral)
+      		            dur := time.Duration(numLit.Val * 1000) * time.Millisecond
+                 	    yylex.(*parser).addOffset($1, dur)
+                            $$ = $1
+                        }
+                | expr OFFSET SUB number_duration_literal
+                        {
+			    numLit, _ := $4.(*NumberLiteral)
+		            dur := time.Duration(numLit.Val * 1000) * time.Millisecond
+			    yylex.(*parser).addOffset($1, -dur)
+                            $$ = $1
                         }
                 | expr OFFSET error
-                        { yylex.(*parser).unexpected("offset", "duration"); $$ = $1 }
+                        { yylex.(*parser).unexpected("offset", "number or duration"); $$ = $1 }
                 ;
+/*
+ * @ modifiers.
+ */
+
+step_invariant_expr: expr AT signed_or_unsigned_number
+                        {
+                        yylex.(*parser).setTimestamp($1, $3)
+                        $$ = $1
+                        }
+                | expr AT at_modifier_preprocessors LEFT_PAREN RIGHT_PAREN
+                        {
+                        yylex.(*parser).setAtModifierPreprocessor($1, $3)
+                        $$ = $1
+                        }
+                | expr AT error
+                        { yylex.(*parser).unexpected("@", "timestamp"); $$ = $1 }
+                ;
+
+at_modifier_preprocessors: START | END;
 
 /*
  * Subquery and range selectors.
  */
 
-matrix_selector : expr LEFT_BRACKET duration RIGHT_BRACKET
+matrix_selector : expr LEFT_BRACKET number_duration_literal RIGHT_BRACKET
                         {
                         var errMsg string
                         vs, ok := $1.(*VectorSelector)
                         if !ok{
                                 errMsg = "ranges only allowed for vector selectors"
-                        } else if vs.Offset != 0{
+                        } else if vs.OriginalOffset != 0{
                                 errMsg = "no offset modifiers allowed before range"
+                        } else if vs.Timestamp != nil {
+                                errMsg = "no @ modifiers allowed before range"
                         }
 
                         if errMsg != ""{
                                 errRange := mergeRanges(&$2, &$4)
-                                yylex.(*parser).addParseErrf(errRange, errMsg)
+                                yylex.(*parser).addParseErrf(errRange, "%s", errMsg)
                         }
 
+			numLit, _ := $3.(*NumberLiteral)
                         $$ = &MatrixSelector{
                                 VectorSelector: $1.(Expr),
-                                Range: $3,
+                                Range: time.Duration(numLit.Val * 1000) * time.Millisecond,
                                 EndPos: yylex.(*parser).lastClosing,
                         }
                         }
                 ;
 
-subquery_expr   : expr LEFT_BRACKET duration COLON maybe_duration RIGHT_BRACKET
+subquery_expr   : expr LEFT_BRACKET number_duration_literal COLON number_duration_literal RIGHT_BRACKET
                         {
+			numLitRange, _ := $3.(*NumberLiteral)
+			numLitStep, _ := $5.(*NumberLiteral)
                         $$ = &SubqueryExpr{
                                 Expr:  $1.(Expr),
-                                Range: $3,
-                                Step:  $5,
-
+                                Range: time.Duration(numLitRange.Val * 1000) * time.Millisecond,
+                                Step:  time.Duration(numLitStep.Val * 1000) * time.Millisecond,
                                 EndPos: $6.Pos + 1,
                         }
                         }
-                | expr LEFT_BRACKET duration COLON duration error
+                | expr LEFT_BRACKET number_duration_literal COLON RIGHT_BRACKET
+		        {
+		          numLitRange, _ := $3.(*NumberLiteral)
+		          $$ = &SubqueryExpr{
+		          Expr:  $1.(Expr),
+		          Range: time.Duration(numLitRange.Val * 1000) * time.Millisecond,
+		          Step:  0,
+		          EndPos: $5.Pos + 1,
+		          }
+		        }
+                | expr LEFT_BRACKET number_duration_literal COLON number_duration_literal error
                         { yylex.(*parser).unexpected("subquery selector", "\"]\""); $$ = $1 }
-                | expr LEFT_BRACKET duration COLON error
-                        { yylex.(*parser).unexpected("subquery selector", "duration or \"]\""); $$ = $1 }
-                | expr LEFT_BRACKET duration error
+                | expr LEFT_BRACKET number_duration_literal COLON error
+                        { yylex.(*parser).unexpected("subquery selector", "number or duration or \"]\""); $$ = $1 }
+                | expr LEFT_BRACKET number_duration_literal error
                         { yylex.(*parser).unexpected("subquery or range", "\":\" or \"]\""); $$ = $1 }
                 | expr LEFT_BRACKET error
-                        { yylex.(*parser).unexpected("subquery selector", "duration"); $$ = $1 }
+		        { yylex.(*parser).unexpected("subquery selector", "number or duration"); $$ = $1 }
                 ;
 
 /*
@@ -433,7 +536,7 @@ subquery_expr   : expr LEFT_BRACKET duration COLON maybe_duration RIGHT_BRACKET
  */
 
 unary_expr      :
-                /* gives the rule the same precedence as MUL. This aligns with mathematical conventions */
+                /* Gives the rule the same precedence as MUL. This aligns with mathematical conventions. */
                 unary_op expr %prec MUL
                         {
                         if nl, ok := $2.(*NumberLiteral); ok {
@@ -516,7 +619,13 @@ label_match_list: label_match_list COMMA label_matcher
                 ;
 
 label_matcher   : IDENTIFIER match_op STRING
-                        { $$ = yylex.(*parser).newLabelMatcher($1, $2, $3);  }
+                        { $$ = yylex.(*parser).newLabelMatcher($1, $2, $3); }
+                | string_identifier match_op STRING
+                        { $$ = yylex.(*parser).newLabelMatcher($1, $2, $3); }
+                | string_identifier
+                        { $$ = yylex.(*parser).newMetricNameMatcher($1); }
+                | string_identifier match_op error
+                        { yylex.(*parser).unexpected("label matching", "string"); $$ = nil}
                 | IDENTIFIER match_op error
                         { yylex.(*parser).unexpected("label matching", "string"); $$ = nil}
                 | IDENTIFIER error
@@ -530,13 +639,13 @@ label_matcher   : IDENTIFIER match_op STRING
  */
 
 metric          : metric_identifier label_set
-                        { $$ = append($2, labels.Label{Name: labels.MetricName, Value: $1.Val}); sort.Sort($$) }
+                        { b := labels.NewBuilder($2); b.Set(labels.MetricName, $1.Val); $$ = b.Labels() }
                 | label_set
                         {$$ = $1}
                 ;
 
 
-metric_identifier: AVG | BOTTOMK | BY | COUNT | COUNT_VALUES | GROUP | IDENTIFIER |  LAND | LOR | LUNLESS | MAX | METRIC_IDENTIFIER | MIN | OFFSET | QUANTILE | STDDEV | STDVAR | SUM | TOPK | WITHOUT;
+metric_identifier: AVG | BOTTOMK | BY | COUNT | COUNT_VALUES | GROUP | IDENTIFIER |  LAND | LOR | LUNLESS | MAX | METRIC_IDENTIFIER | MIN | OFFSET | QUANTILE | STDDEV | STDVAR | SUM | TOPK | WITHOUT | START | END | LIMITK | LIMIT_RATIO;
 
 label_set       : LEFT_BRACE label_set_list RIGHT_BRACE
                         { $$ = labels.New($2...) }
@@ -559,8 +668,14 @@ label_set_list  : label_set_list COMMA label_set_item
 
 label_set_item  : IDENTIFIER EQL STRING
                         { $$ = labels.Label{Name: $1.Val, Value: yylex.(*parser).unquoteString($3.Val) } }
+                | string_identifier EQL STRING
+                        { $$ = labels.Label{Name: $1.Val, Value: yylex.(*parser).unquoteString($3.Val) } }
+                | string_identifier
+                        { $$ = labels.Label{Name: labels.MetricName, Value: $1.Val} }
                 | IDENTIFIER EQL error
                         { yylex.(*parser).unexpected("label set", "string"); $$ = labels.Label{}}
+                | string_identifier EQL error
+                        { yylex.(*parser).unexpected("label set", "string"); $$ = labels.Label{}}
                 | IDENTIFIER error
                         { yylex.(*parser).unexpected("label set", "\"=\""); $$ = labels.Label{}}
                 | error
@@ -568,7 +683,10 @@ label_set_item  : IDENTIFIER EQL STRING
                 ;
 
 /*
- * Series descriptions (only used by unit tests).
+ * Series descriptions:
+ * A separate language that is used to generate series values promtool.
+ * It is included in the promQL parser, because it shares common functionality, such as parsing a metric.
+ * The syntax is described in https://prometheus.io/docs/prometheus/latest/configuration/unit_testing_rules/#series
  */
 
 series_description: metric series_values
@@ -604,6 +722,7 @@ series_item     : BLANK
                 | series_value TIMES uint
                         {
                         $$ = []SequenceValue{}
+                        // Add an additional value for time 0, which we ignore in tests.
                         for i:=uint64(0); i <= $3; i++{
                                 $$ = append($$, SequenceValue{Value: $1})
                         }
@@ -611,11 +730,42 @@ series_item     : BLANK
                 | series_value signed_number TIMES uint
                         {
                         $$ = []SequenceValue{}
+                        // Add an additional value for time 0, which we ignore in tests.
                         for i:=uint64(0); i <= $4; i++{
                                 $$ = append($$, SequenceValue{Value: $1})
                                 $1 += $2
                         }
                         }
+                // Histogram descriptions (part of unit testing).
+                | histogram_series_value
+                        {
+                        $$ = []SequenceValue{{Histogram:$1}}
+                        }
+                | histogram_series_value TIMES uint
+                        {
+                        $$ = []SequenceValue{}
+                        // Add an additional value for time 0, which we ignore in tests.
+                        for i:=uint64(0); i <= $3; i++{
+                                $$ = append($$, SequenceValue{Histogram:$1})
+                                //$1 += $2
+                        }
+                        }
+                | histogram_series_value ADD histogram_series_value TIMES uint
+                        {
+                        val, err := yylex.(*parser).histogramsIncreaseSeries($1,$3,$5)
+                        if err != nil {
+                          yylex.(*parser).addSemanticError(err)
+                        }
+                        $$ = val
+                        }
+                | histogram_series_value SUB histogram_series_value TIMES uint
+                        {
+                        val, err := yylex.(*parser).histogramsDecreaseSeries($1,$3,$5)
+                        if err != nil {
+                          yylex.(*parser).addSemanticError(err)
+                        }
+                        $$ = val
+                        }
                 ;
 
 series_value    : IDENTIFIER
@@ -629,17 +779,130 @@ series_value    : IDENTIFIER
                 | signed_number
                 ;
 
-
-
+histogram_series_value
+                : OPEN_HIST histogram_desc_map SPACE CLOSE_HIST
+                {
+                  $$ = yylex.(*parser).buildHistogramFromMap(&$2)
+                }
+                | OPEN_HIST histogram_desc_map CLOSE_HIST
+                {
+                  $$ = yylex.(*parser).buildHistogramFromMap(&$2)
+                }
+                | OPEN_HIST SPACE CLOSE_HIST
+                {
+                  m := yylex.(*parser).newMap()
+                  $$ = yylex.(*parser).buildHistogramFromMap(&m)
+                }
+                | OPEN_HIST CLOSE_HIST
+                {
+                  m := yylex.(*parser).newMap()
+                  $$ = yylex.(*parser).buildHistogramFromMap(&m)
+                }
+                ;
+
+histogram_desc_map
+                : histogram_desc_map SPACE histogram_desc_item
+                {
+                  $$ = *(yylex.(*parser).mergeMaps(&$1,&$3))
+                }
+                | histogram_desc_item
+                {
+                  $$ = $1
+                }
+                | histogram_desc_map error {
+                  yylex.(*parser).unexpected("histogram description", "histogram description key, e.g. buckets:[5 10 7]")
+                }
+                ;
+
+histogram_desc_item
+                : SCHEMA_DESC COLON int
+                {
+                   $$ = yylex.(*parser).newMap()
+                   $$["schema"] = $3
+                }
+                | SUM_DESC COLON signed_or_unsigned_number
+                {
+                   $$ = yylex.(*parser).newMap()
+                   $$["sum"] = $3
+                }
+                | COUNT_DESC COLON signed_or_unsigned_number
+                {
+                   $$ = yylex.(*parser).newMap()
+                   $$["count"] = $3
+                }
+                | ZERO_BUCKET_DESC COLON signed_or_unsigned_number
+                {
+                   $$ = yylex.(*parser).newMap()
+                   $$["z_bucket"] = $3
+                }
+                | ZERO_BUCKET_WIDTH_DESC COLON number
+                {
+                   $$ = yylex.(*parser).newMap()
+                   $$["z_bucket_w"] = $3
+                }
+                | CUSTOM_VALUES_DESC COLON bucket_set
+                {
+                   $$ = yylex.(*parser).newMap()
+                   $$["custom_values"] = $3
+                }
+                | BUCKETS_DESC COLON bucket_set
+                {
+                   $$ = yylex.(*parser).newMap()
+                   $$["buckets"] = $3
+                }
+                | OFFSET_DESC COLON int
+                {
+                   $$ = yylex.(*parser).newMap()
+                   $$["offset"] = $3
+                }
+                | NEGATIVE_BUCKETS_DESC COLON bucket_set
+                {
+                   $$ = yylex.(*parser).newMap()
+                   $$["n_buckets"] = $3
+                }
+                | NEGATIVE_OFFSET_DESC COLON int
+                {
+                   $$ = yylex.(*parser).newMap()
+                   $$["n_offset"] = $3
+                }
+                | COUNTER_RESET_HINT_DESC COLON counter_reset_hint
+                {
+                   $$ = yylex.(*parser).newMap()
+                   $$["counter_reset_hint"] = $3
+                }
+                ;
+
+bucket_set      : LEFT_BRACKET bucket_set_list SPACE RIGHT_BRACKET
+                {
+                  $$ = $2
+                }
+                | LEFT_BRACKET bucket_set_list RIGHT_BRACKET
+                {
+                  $$ = $2
+                }
+                ;
+
+bucket_set_list : bucket_set_list SPACE signed_or_unsigned_number
+                {
+                  $$ = append($1, $3)
+                }
+                | signed_or_unsigned_number
+                {
+                  $$ = []float64{$1}
+                }
+                | bucket_set_list error
+                ;
+
+counter_reset_hint : UNKNOWN_COUNTER_RESET | COUNTER_RESET | NOT_COUNTER_RESET | GAUGE_TYPE;
 
 /*
  * Keyword lists.
  */
 
-aggregate_op    : AVG | BOTTOMK | COUNT | COUNT_VALUES | GROUP | MAX | MIN | QUANTILE | STDDEV | STDVAR | SUM | TOPK ;
+aggregate_op    : AVG | BOTTOMK | COUNT | COUNT_VALUES | GROUP | MAX | MIN | QUANTILE | STDDEV | STDVAR | SUM | TOPK | LIMITK | LIMIT_RATIO;
 
-// inside of grouping options label names can be recognized as keywords by the lexer. This is a list of keywords that could also be a label name.
-maybe_label     : AVG | BOOL | BOTTOMK | BY | COUNT | COUNT_VALUES | GROUP | GROUP_LEFT | GROUP_RIGHT | IDENTIFIER | IGNORING | LAND | LOR | LUNLESS | MAX | METRIC_IDENTIFIER | MIN | OFFSET | ON | QUANTILE | STDDEV | STDVAR | SUM | TOPK;
+// Inside of grouping options label names can be recognized as keywords by the lexer. This is a list of keywords that could also be a label name.
+maybe_label     : AVG | BOOL | BOTTOMK | BY | COUNT | COUNT_VALUES | GROUP | GROUP_LEFT | GROUP_RIGHT | IDENTIFIER | IGNORING | LAND | LOR | LUNLESS | MAX | METRIC_IDENTIFIER | MIN | OFFSET | ON | QUANTILE | STDDEV | STDVAR | SUM | TOPK | START | END | ATAN2 | LIMITK | LIMIT_RATIO;
 
 unary_op        : ADD | SUB;
 
@@ -649,21 +912,50 @@ match_op        : EQL | NEQ | EQL_REGEX | NEQ_REGEX ;
  * Literals.
  */
 
-number_literal  : NUMBER
+number_duration_literal  : NUMBER
                         {
-                        $$ = &NumberLiteral{
+                            $$ = &NumberLiteral{
                                 Val:           yylex.(*parser).number($1.Val),
                                 PosRange: $1.PositionRange(),
-                        }
-                        }
+                            }
+                        }
+                        | DURATION
+			{
+                            var err error
+                            var dur time.Duration
+                            dur, err = parseDuration($1.Val)
+                            if err != nil {
+                                    yylex.(*parser).addParseErr($1.PositionRange(), err)
+                            }
+                            $$ = &NumberLiteral{
+			            Val:      dur.Seconds(),
+			            PosRange: $1.PositionRange(),
+                            }
+                        }
+                ;
+
+number          : NUMBER
+                {
+		  $$ = yylex.(*parser).number($1.Val)
+		}
+                | DURATION
+		{
+		  var err error
+		  var dur time.Duration
+		  dur, err = parseDuration($1.Val)
+		  if err != nil {
+		      yylex.(*parser).addParseErr($1.PositionRange(), err)
+		  }
+		  $$ = dur.Seconds()
+		}
                 ;
 
-number          : NUMBER { $$ = yylex.(*parser).number($1.Val) } ;
-
 signed_number   : ADD number { $$ = $2 }
                 | SUB number { $$ = -$2 }
                 ;
 
+signed_or_unsigned_number: number | signed_number ;
+
 uint            : NUMBER
                         {
                         var err error
@@ -674,17 +966,10 @@ uint            : NUMBER
                         }
                 ;
 
-duration        : DURATION
-                        {
-                        var err error
-                        $$, err = parseDuration($1.Val)
-                        if err != nil {
-                                yylex.(*parser).addParseErr($1.PositionRange(), err)
-                        }
-                        }
+int             : SUB uint { $$ = -int64($2) }
+                | uint { $$ = int64($1) }
                 ;
 
-
 string_literal  : STRING
                         {
                         $$ = &StringLiteral{
@@ -692,17 +977,22 @@ string_literal  : STRING
                                 PosRange: $1.PositionRange(),
                         }
                         }
-                        ;
+                ;
+
+string_identifier  : STRING
+                        {
+                        $$ = Item{
+                                Typ: METRIC_IDENTIFIER,
+                                Pos: $1.PositionRange().Start,
+                                Val: yylex.(*parser).unquoteString($1.Val),
+                        }
+                        }
+                ;
 
 /*
  * Wrappers for optional arguments.
  */
 
-maybe_duration  : /* empty */
-                        {$$ = 0}
-                | duration
-                ;
-
 maybe_grouping_labels: /* empty */ { $$ = nil }
                 | grouping_labels
                 ;
diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y.go b/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y.go
index 4117f0e715077828d016943f4098e24b4e52773c..78d5e15245e9731eb269b96b239518746e094a49 100644
--- a/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y.go
+++ b/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y.go
@@ -1,33 +1,38 @@
-// Code generated by goyacc -o generated_parser.y.go generated_parser.y. DO NOT EDIT.
-
-//line generated_parser.y:15
+// Code generated by goyacc -l -o promql/parser/generated_parser.y.go promql/parser/generated_parser.y. DO NOT EDIT.
 package parser
 
+import __yyfmt__ "fmt"
+
 import (
-	__yyfmt__ "fmt"
 	"math"
-	"sort"
 	"strconv"
 	"time"
 
-	"github.com/prometheus/prometheus/pkg/labels"
-	"github.com/prometheus/prometheus/pkg/value"
-) //line generated_parser.y:15
+	"github.com/prometheus/prometheus/model/histogram"
+	"github.com/prometheus/prometheus/model/labels"
+	"github.com/prometheus/prometheus/model/value"
+	"github.com/prometheus/prometheus/promql/parser/posrange"
+
+	"github.com/prometheus/common/model"
+)
 
-//line generated_parser.y:28
 type yySymType struct {
-	yys      int
-	node     Node
-	item     Item
-	matchers []*labels.Matcher
-	matcher  *labels.Matcher
-	label    labels.Label
-	labels   labels.Labels
-	strings  []string
-	series   []SequenceValue
-	uint     uint64
-	float    float64
-	duration time.Duration
+	yys         int
+	node        Node
+	item        Item
+	matchers    []*labels.Matcher
+	matcher     *labels.Matcher
+	label       labels.Label
+	labels      labels.Labels
+	lblList     []labels.Label
+	strings     []string
+	series      []SequenceValue
+	histogram   *histogram.FloatHistogram
+	descriptors map[string]interface{}
+	bucket_set  []float64
+	int         int64
+	uint        uint64
+	float       float64
 }
 
 const EQL = 57346
@@ -42,64 +47,93 @@ const IDENTIFIER = 57354
 const LEFT_BRACE = 57355
 const LEFT_BRACKET = 57356
 const LEFT_PAREN = 57357
-const METRIC_IDENTIFIER = 57358
-const NUMBER = 57359
-const RIGHT_BRACE = 57360
-const RIGHT_BRACKET = 57361
-const RIGHT_PAREN = 57362
-const SEMICOLON = 57363
-const SPACE = 57364
-const STRING = 57365
-const TIMES = 57366
-const operatorsStart = 57367
-const ADD = 57368
-const DIV = 57369
-const EQLC = 57370
-const EQL_REGEX = 57371
-const GTE = 57372
-const GTR = 57373
-const LAND = 57374
-const LOR = 57375
-const LSS = 57376
-const LTE = 57377
-const LUNLESS = 57378
-const MOD = 57379
-const MUL = 57380
-const NEQ = 57381
-const NEQ_REGEX = 57382
-const POW = 57383
-const SUB = 57384
-const operatorsEnd = 57385
-const aggregatorsStart = 57386
-const AVG = 57387
-const BOTTOMK = 57388
-const COUNT = 57389
-const COUNT_VALUES = 57390
-const GROUP = 57391
-const MAX = 57392
-const MIN = 57393
-const QUANTILE = 57394
-const STDDEV = 57395
-const STDVAR = 57396
-const SUM = 57397
-const TOPK = 57398
-const aggregatorsEnd = 57399
-const keywordsStart = 57400
-const BOOL = 57401
-const BY = 57402
-const GROUP_LEFT = 57403
-const GROUP_RIGHT = 57404
-const IGNORING = 57405
-const OFFSET = 57406
-const ON = 57407
-const WITHOUT = 57408
-const keywordsEnd = 57409
-const startSymbolsStart = 57410
-const START_METRIC = 57411
-const START_SERIES_DESCRIPTION = 57412
-const START_EXPRESSION = 57413
-const START_METRIC_SELECTOR = 57414
-const startSymbolsEnd = 57415
+const OPEN_HIST = 57358
+const CLOSE_HIST = 57359
+const METRIC_IDENTIFIER = 57360
+const NUMBER = 57361
+const RIGHT_BRACE = 57362
+const RIGHT_BRACKET = 57363
+const RIGHT_PAREN = 57364
+const SEMICOLON = 57365
+const SPACE = 57366
+const STRING = 57367
+const TIMES = 57368
+const histogramDescStart = 57369
+const SUM_DESC = 57370
+const COUNT_DESC = 57371
+const SCHEMA_DESC = 57372
+const OFFSET_DESC = 57373
+const NEGATIVE_OFFSET_DESC = 57374
+const BUCKETS_DESC = 57375
+const NEGATIVE_BUCKETS_DESC = 57376
+const ZERO_BUCKET_DESC = 57377
+const ZERO_BUCKET_WIDTH_DESC = 57378
+const CUSTOM_VALUES_DESC = 57379
+const COUNTER_RESET_HINT_DESC = 57380
+const histogramDescEnd = 57381
+const operatorsStart = 57382
+const ADD = 57383
+const DIV = 57384
+const EQLC = 57385
+const EQL_REGEX = 57386
+const GTE = 57387
+const GTR = 57388
+const LAND = 57389
+const LOR = 57390
+const LSS = 57391
+const LTE = 57392
+const LUNLESS = 57393
+const MOD = 57394
+const MUL = 57395
+const NEQ = 57396
+const NEQ_REGEX = 57397
+const POW = 57398
+const SUB = 57399
+const AT = 57400
+const ATAN2 = 57401
+const operatorsEnd = 57402
+const aggregatorsStart = 57403
+const AVG = 57404
+const BOTTOMK = 57405
+const COUNT = 57406
+const COUNT_VALUES = 57407
+const GROUP = 57408
+const MAX = 57409
+const MIN = 57410
+const QUANTILE = 57411
+const STDDEV = 57412
+const STDVAR = 57413
+const SUM = 57414
+const TOPK = 57415
+const LIMITK = 57416
+const LIMIT_RATIO = 57417
+const aggregatorsEnd = 57418
+const keywordsStart = 57419
+const BOOL = 57420
+const BY = 57421
+const GROUP_LEFT = 57422
+const GROUP_RIGHT = 57423
+const IGNORING = 57424
+const OFFSET = 57425
+const ON = 57426
+const WITHOUT = 57427
+const keywordsEnd = 57428
+const preprocessorStart = 57429
+const START = 57430
+const END = 57431
+const preprocessorEnd = 57432
+const counterResetHintsStart = 57433
+const UNKNOWN_COUNTER_RESET = 57434
+const COUNTER_RESET = 57435
+const NOT_COUNTER_RESET = 57436
+const GAUGE_TYPE = 57437
+const counterResetHintsEnd = 57438
+const startSymbolsStart = 57439
+const START_METRIC = 57440
+const START_SERIES_DESCRIPTION = 57441
+const START_EXPRESSION = 57442
+const START_METRIC_SELECTOR = 57443
+const startSymbolsEnd = 57444
 
 var yyToknames = [...]string{
 	"$end",
@@ -117,6 +151,8 @@ var yyToknames = [...]string{
 	"LEFT_BRACE",
 	"LEFT_BRACKET",
 	"LEFT_PAREN",
+	"OPEN_HIST",
+	"CLOSE_HIST",
 	"METRIC_IDENTIFIER",
 	"NUMBER",
 	"RIGHT_BRACE",
@@ -126,6 +162,19 @@ var yyToknames = [...]string{
 	"SPACE",
 	"STRING",
 	"TIMES",
+	"histogramDescStart",
+	"SUM_DESC",
+	"COUNT_DESC",
+	"SCHEMA_DESC",
+	"OFFSET_DESC",
+	"NEGATIVE_OFFSET_DESC",
+	"BUCKETS_DESC",
+	"NEGATIVE_BUCKETS_DESC",
+	"ZERO_BUCKET_DESC",
+	"ZERO_BUCKET_WIDTH_DESC",
+	"CUSTOM_VALUES_DESC",
+	"COUNTER_RESET_HINT_DESC",
+	"histogramDescEnd",
 	"operatorsStart",
 	"ADD",
 	"DIV",
@@ -144,6 +193,8 @@ var yyToknames = [...]string{
 	"NEQ_REGEX",
 	"POW",
 	"SUB",
+	"AT",
+	"ATAN2",
 	"operatorsEnd",
 	"aggregatorsStart",
 	"AVG",
@@ -158,6 +209,8 @@ var yyToknames = [...]string{
 	"STDVAR",
 	"SUM",
 	"TOPK",
+	"LIMITK",
+	"LIMIT_RATIO",
 	"aggregatorsEnd",
 	"keywordsStart",
 	"BOOL",
@@ -169,6 +222,16 @@ var yyToknames = [...]string{
 	"ON",
 	"WITHOUT",
 	"keywordsEnd",
+	"preprocessorStart",
+	"START",
+	"END",
+	"preprocessorEnd",
+	"counterResetHintsStart",
+	"UNKNOWN_COUNTER_RESET",
+	"COUNTER_RESET",
+	"NOT_COUNTER_RESET",
+	"GAUGE_TYPE",
+	"counterResetHintsEnd",
 	"startSymbolsStart",
 	"START_METRIC",
 	"START_SERIES_DESCRIPTION",
@@ -176,370 +239,450 @@ var yyToknames = [...]string{
 	"START_METRIC_SELECTOR",
 	"startSymbolsEnd",
 }
+
 var yyStatenames = [...]string{}
 
 const yyEofCode = 1
 const yyErrCode = 2
 const yyInitialStackSize = 16
 
-//line generated_parser.y:710
-
-//line yacctab:1
-var yyExca = [...]int{
+var yyExca = [...]int16{
 	-1, 1,
 	1, -1,
 	-2, 0,
-	-1, 33,
-	1, 121,
-	10, 121,
-	22, 121,
+	-1, 37,
+	1, 141,
+	10, 141,
+	24, 141,
 	-2, 0,
-	-1, 55,
-	2, 133,
-	15, 133,
-	60, 133,
-	66, 133,
-	-2, 89,
-	-1, 56,
-	2, 134,
-	15, 134,
-	60, 134,
-	66, 134,
-	-2, 90,
-	-1, 57,
-	2, 135,
-	15, 135,
-	60, 135,
-	66, 135,
-	-2, 92,
-	-1, 58,
-	2, 136,
-	15, 136,
-	60, 136,
-	66, 136,
-	-2, 93,
-	-1, 59,
-	2, 137,
-	15, 137,
-	60, 137,
-	66, 137,
-	-2, 94,
-	-1, 60,
-	2, 138,
-	15, 138,
-	60, 138,
-	66, 138,
-	-2, 99,
 	-1, 61,
-	2, 139,
-	15, 139,
-	60, 139,
-	66, 139,
-	-2, 101,
+	2, 184,
+	15, 184,
+	79, 184,
+	85, 184,
+	-2, 102,
 	-1, 62,
-	2, 140,
-	15, 140,
-	60, 140,
-	66, 140,
+	2, 185,
+	15, 185,
+	79, 185,
+	85, 185,
 	-2, 103,
 	-1, 63,
-	2, 141,
-	15, 141,
-	60, 141,
-	66, 141,
-	-2, 104,
-	-1, 64,
-	2, 142,
-	15, 142,
-	60, 142,
-	66, 142,
+	2, 186,
+	15, 186,
+	79, 186,
+	85, 186,
 	-2, 105,
-	-1, 65,
-	2, 143,
-	15, 143,
-	60, 143,
-	66, 143,
+	-1, 64,
+	2, 187,
+	15, 187,
+	79, 187,
+	85, 187,
 	-2, 106,
-	-1, 66,
-	2, 144,
-	15, 144,
-	60, 144,
-	66, 144,
+	-1, 65,
+	2, 188,
+	15, 188,
+	79, 188,
+	85, 188,
 	-2, 107,
-	-1, 176,
-	12, 184,
-	13, 184,
-	16, 184,
-	17, 184,
-	23, 184,
-	26, 184,
-	32, 184,
-	33, 184,
-	36, 184,
-	42, 184,
-	45, 184,
-	46, 184,
-	47, 184,
-	48, 184,
-	49, 184,
-	50, 184,
-	51, 184,
-	52, 184,
-	53, 184,
-	54, 184,
-	55, 184,
-	56, 184,
-	60, 184,
-	64, 184,
-	66, 184,
-	-2, 0,
-	-1, 177,
-	12, 184,
-	13, 184,
-	16, 184,
-	17, 184,
-	23, 184,
-	26, 184,
-	32, 184,
-	33, 184,
-	36, 184,
-	42, 184,
-	45, 184,
-	46, 184,
-	47, 184,
-	48, 184,
-	49, 184,
-	50, 184,
-	51, 184,
-	52, 184,
-	53, 184,
-	54, 184,
-	55, 184,
-	56, 184,
-	60, 184,
-	64, 184,
-	66, 184,
-	-2, 0,
-	-1, 193,
-	19, 182,
+	-1, 66,
+	2, 189,
+	15, 189,
+	79, 189,
+	85, 189,
+	-2, 112,
+	-1, 67,
+	2, 190,
+	15, 190,
+	79, 190,
+	85, 190,
+	-2, 114,
+	-1, 68,
+	2, 191,
+	15, 191,
+	79, 191,
+	85, 191,
+	-2, 116,
+	-1, 69,
+	2, 192,
+	15, 192,
+	79, 192,
+	85, 192,
+	-2, 117,
+	-1, 70,
+	2, 193,
+	15, 193,
+	79, 193,
+	85, 193,
+	-2, 118,
+	-1, 71,
+	2, 194,
+	15, 194,
+	79, 194,
+	85, 194,
+	-2, 119,
+	-1, 72,
+	2, 195,
+	15, 195,
+	79, 195,
+	85, 195,
+	-2, 120,
+	-1, 73,
+	2, 196,
+	15, 196,
+	79, 196,
+	85, 196,
+	-2, 124,
+	-1, 74,
+	2, 197,
+	15, 197,
+	79, 197,
+	85, 197,
+	-2, 125,
+	-1, 204,
+	9, 246,
+	12, 246,
+	13, 246,
+	18, 246,
+	19, 246,
+	25, 246,
+	41, 246,
+	47, 246,
+	48, 246,
+	51, 246,
+	57, 246,
+	62, 246,
+	63, 246,
+	64, 246,
+	65, 246,
+	66, 246,
+	67, 246,
+	68, 246,
+	69, 246,
+	70, 246,
+	71, 246,
+	72, 246,
+	73, 246,
+	74, 246,
+	75, 246,
+	79, 246,
+	83, 246,
+	85, 246,
+	88, 246,
+	89, 246,
 	-2, 0,
-	-1, 241,
-	19, 183,
+	-1, 205,
+	9, 246,
+	12, 246,
+	13, 246,
+	18, 246,
+	19, 246,
+	25, 246,
+	41, 246,
+	47, 246,
+	48, 246,
+	51, 246,
+	57, 246,
+	62, 246,
+	63, 246,
+	64, 246,
+	65, 246,
+	66, 246,
+	67, 246,
+	68, 246,
+	69, 246,
+	70, 246,
+	71, 246,
+	72, 246,
+	73, 246,
+	74, 246,
+	75, 246,
+	79, 246,
+	83, 246,
+	85, 246,
+	88, 246,
+	89, 246,
 	-2, 0,
 }
 
 const yyPrivate = 57344
 
-const yyLast = 598
-
-var yyAct = [...]int{
-
-	247, 197, 35, 136, 237, 238, 168, 169, 108, 74,
-	97, 96, 99, 174, 121, 175, 98, 250, 100, 176,
-	177, 230, 95, 54, 231, 229, 171, 48, 69, 101,
-	50, 22, 49, 163, 245, 148, 251, 225, 51, 244,
-	116, 67, 172, 6, 248, 170, 228, 18, 19, 92,
-	224, 20, 243, 103, 162, 104, 69, 68, 117, 102,
-	55, 56, 57, 58, 59, 60, 61, 62, 63, 64,
-	65, 66, 94, 95, 99, 13, 101, 105, 31, 24,
-	100, 30, 7, 252, 8, 79, 80, 81, 33, 82,
-	83, 84, 85, 86, 87, 88, 89, 90, 91, 139,
-	92, 93, 145, 78, 149, 143, 146, 141, 110, 142,
-	2, 3, 4, 5, 242, 144, 32, 115, 109, 114,
-	173, 138, 161, 94, 226, 178, 179, 180, 181, 182,
-	183, 184, 185, 186, 187, 188, 189, 190, 191, 122,
-	123, 124, 125, 126, 127, 128, 129, 130, 131, 132,
-	133, 134, 135, 153, 46, 140, 10, 137, 152, 1,
-	70, 227, 138, 155, 138, 156, 71, 240, 45, 151,
-	34, 95, 48, 69, 232, 50, 22, 49, 233, 234,
-	235, 236, 239, 51, 80, 53, 67, 194, 9, 9,
-	158, 193, 18, 19, 89, 90, 20, 241, 92, 44,
-	157, 159, 68, 43, 192, 55, 56, 57, 58, 59,
-	60, 61, 62, 63, 64, 65, 66, 42, 165, 76,
-	13, 94, 120, 41, 24, 167, 30, 40, 246, 75,
-	170, 39, 249, 48, 69, 160, 50, 22, 49, 171,
-	113, 118, 110, 147, 51, 112, 254, 67, 38, 76,
-	119, 255, 109, 18, 19, 172, 111, 20, 107, 75,
-	37, 36, 166, 68, 77, 73, 55, 56, 57, 58,
-	59, 60, 61, 62, 63, 64, 65, 66, 199, 164,
-	195, 13, 72, 52, 198, 24, 154, 30, 209, 47,
-	106, 0, 215, 0, 0, 0, 253, 0, 0, 0,
-	0, 0, 0, 0, 0, 0, 0, 0, 211, 212,
-	0, 0, 213, 0, 0, 0, 0, 0, 0, 0,
-	0, 200, 202, 204, 205, 206, 214, 216, 219, 220,
-	221, 222, 223, 199, 0, 201, 203, 207, 208, 210,
-	217, 218, 0, 209, 0, 0, 0, 215, 0, 0,
-	0, 196, 0, 0, 0, 0, 0, 0, 0, 0,
-	0, 0, 0, 211, 212, 0, 0, 213, 0, 0,
-	0, 0, 0, 0, 0, 0, 200, 202, 204, 205,
-	206, 214, 216, 219, 220, 221, 222, 223, 0, 0,
-	201, 203, 207, 208, 210, 217, 218, 17, 69, 0,
-	0, 22, 0, 0, 0, 0, 0, 0, 0, 0,
-	0, 0, 0, 0, 0, 0, 0, 18, 19, 0,
-	0, 20, 17, 31, 0, 0, 22, 0, 0, 0,
-	11, 12, 14, 15, 16, 21, 23, 25, 26, 27,
-	28, 29, 18, 19, 0, 13, 20, 0, 0, 24,
-	0, 30, 0, 0, 0, 11, 12, 14, 15, 16,
-	21, 23, 25, 26, 27, 28, 29, 95, 0, 0,
-	13, 0, 0, 150, 24, 0, 30, 0, 0, 79,
-	80, 81, 0, 82, 83, 84, 85, 86, 87, 88,
-	89, 90, 91, 0, 92, 93, 0, 0, 95, 0,
-	0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-	79, 80, 81, 0, 82, 83, 84, 94, 86, 87,
-	88, 89, 90, 91, 0, 92, 93, 0, 0, 95,
+const yyLast = 803
+
+var yyAct = [...]int16{
+	154, 338, 336, 157, 343, 230, 39, 196, 280, 44,
+	295, 294, 84, 120, 82, 233, 180, 109, 108, 350,
+	351, 352, 353, 110, 111, 243, 202, 158, 203, 135,
+	112, 249, 361, 6, 333, 329, 113, 332, 232, 204,
+	205, 308, 271, 60, 130, 270, 297, 268, 162, 315,
+	156, 360, 153, 306, 359, 344, 200, 162, 161, 55,
+	245, 246, 222, 115, 247, 116, 107, 161, 269, 54,
+	267, 114, 260, 306, 182, 234, 236, 238, 239, 240,
+	248, 250, 253, 254, 255, 256, 257, 261, 262, 163,
+	122, 235, 237, 241, 242, 244, 251, 252, 192, 328,
+	111, 258, 259, 117, 190, 164, 112, 152, 103, 55,
+	106, 337, 77, 113, 184, 151, 35, 165, 327, 54,
+	175, 191, 169, 172, 183, 185, 167, 189, 168, 2,
+	3, 4, 5, 107, 198, 105, 159, 160, 201, 186,
+	188, 7, 326, 206, 207, 208, 209, 210, 211, 212,
+	213, 214, 215, 216, 217, 218, 219, 220, 199, 194,
+	89, 91, 221, 162, 264, 325, 197, 223, 224, 171,
+	200, 100, 101, 161, 162, 103, 104, 106, 90, 263,
+	233, 324, 170, 162, 161, 323, 362, 322, 321, 274,
+	243, 122, 266, 161, 131, 163, 249, 272, 123, 320,
+	229, 319, 105, 232, 275, 318, 163, 317, 121, 85,
+	316, 164, 163, 292, 293, 163, 265, 296, 129, 83,
+	276, 86, 164, 273, 10, 245, 246, 187, 164, 247,
+	88, 164, 86, 50, 79, 36, 298, 260, 1, 78,
+	234, 236, 238, 239, 240, 248, 250, 253, 254, 255,
+	256, 257, 261, 262, 123, 49, 235, 237, 241, 242,
+	244, 251, 252, 181, 121, 182, 258, 259, 128, 48,
+	127, 304, 119, 305, 307, 59, 309, 86, 9, 9,
+	47, 46, 134, 310, 311, 136, 137, 138, 139, 140,
+	141, 142, 143, 144, 145, 146, 147, 148, 149, 150,
+	45, 43, 132, 173, 179, 184, 166, 85, 330, 178,
+	331, 42, 133, 55, 41, 183, 185, 83, 339, 340,
+	341, 335, 177, 54, 342, 81, 346, 345, 348, 347,
+	86, 303, 40, 314, 354, 355, 302, 55, 51, 356,
+	53, 77, 300, 56, 195, 358, 22, 54, 313, 55,
+	174, 301, 227, 57, 8, 312, 226, 357, 37, 54,
+	363, 299, 126, 277, 87, 193, 228, 125, 80, 75,
+	349, 225, 155, 58, 231, 18, 19, 52, 118, 20,
+	124, 0, 0, 0, 0, 76, 0, 0, 0, 0,
+	61, 62, 63, 64, 65, 66, 67, 68, 69, 70,
+	71, 72, 73, 74, 0, 0, 0, 13, 0, 0,
+	0, 24, 0, 30, 0, 0, 31, 32, 55, 38,
+	107, 53, 77, 0, 56, 279, 0, 22, 54, 0,
+	0, 0, 278, 0, 57, 0, 282, 283, 281, 288,
+	290, 287, 289, 284, 285, 286, 291, 0, 91, 0,
+	75, 0, 0, 0, 0, 0, 18, 19, 100, 101,
+	20, 0, 103, 0, 106, 90, 76, 0, 0, 0,
+	0, 61, 62, 63, 64, 65, 66, 67, 68, 69,
+	70, 71, 72, 73, 74, 0, 0, 0, 13, 105,
+	0, 0, 24, 0, 30, 0, 55, 31, 32, 53,
+	77, 0, 56, 334, 0, 22, 54, 0, 0, 0,
+	0, 0, 57, 0, 282, 283, 281, 288, 290, 287,
+	289, 284, 285, 286, 291, 0, 0, 0, 75, 0,
+	0, 0, 0, 0, 18, 19, 0, 0, 20, 0,
+	0, 0, 17, 77, 76, 0, 0, 0, 22, 61,
+	62, 63, 64, 65, 66, 67, 68, 69, 70, 71,
+	72, 73, 74, 0, 0, 0, 13, 0, 0, 0,
+	24, 0, 30, 0, 0, 31, 32, 18, 19, 0,
+	0, 20, 0, 0, 0, 17, 35, 0, 0, 0,
+	0, 22, 11, 12, 14, 15, 16, 21, 23, 25,
+	26, 27, 28, 29, 33, 34, 0, 0, 0, 13,
+	0, 0, 0, 24, 0, 30, 0, 0, 31, 32,
+	18, 19, 0, 0, 20, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 11, 12, 14, 15, 16,
+	21, 23, 25, 26, 27, 28, 29, 33, 34, 107,
+	0, 0, 13, 0, 0, 0, 24, 176, 30, 0,
+	0, 31, 32, 0, 0, 0, 0, 0, 107, 0,
+	0, 0, 0, 0, 0, 0, 89, 91, 92, 0,
+	93, 94, 95, 96, 97, 98, 99, 100, 101, 102,
+	0, 103, 104, 106, 90, 89, 91, 92, 0, 93,
+	94, 95, 96, 97, 98, 99, 100, 101, 102, 0,
+	103, 104, 106, 90, 107, 0, 0, 0, 105, 0,
 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-	0, 79, 80, 81, 0, 82, 83, 95, 94, 86,
-	87, 0, 89, 90, 91, 0, 92, 93, 0, 79,
-	80, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-	89, 90, 0, 0, 92, 93, 0, 0, 0, 94,
+	0, 0, 0, 107, 0, 0, 0, 105, 0, 0,
+	0, 89, 91, 92, 0, 93, 94, 95, 0, 97,
+	98, 99, 100, 101, 102, 0, 103, 104, 106, 90,
+	89, 91, 92, 0, 93, 94, 0, 0, 97, 98,
+	0, 100, 101, 102, 0, 103, 104, 106, 90, 0,
+	0, 0, 0, 105, 0, 0, 0, 0, 0, 0,
 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-	0, 0, 0, 0, 0, 0, 0, 94,
+	0, 0, 105,
 }
-var yyPact = [...]int{
 
-	41, 72, 410, 410, 160, 385, -1000, -1000, -1000, 65,
+var yyPact = [...]int16{
+	31, 131, 573, 573, 409, 530, -1000, -1000, -1000, 103,
+	-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
+	-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
+	-1000, -1000, -1000, -1000, -1000, 305, -1000, 228, -1000, 654,
 	-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
+	-1000, -1000, 21, 98, -1000, -1000, 487, -1000, 487, 99,
 	-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
-	-1000, 247, -1000, 101, -1000, 59, -1000, -1000, -1000, -1000,
-	-1000, -1000, -1000, -1000, -1000, -1000, -1000, 14, 61, -1000,
-	221, -1000, 221, 43, -1000, -1000, -1000, -1000, -1000, -1000,
-	-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 240,
-	-1000, -1000, 238, -1000, -1000, 115, -1000, 18, -1000, -45,
-	-45, -45, -45, -45, -45, -45, -45, -45, -45, -45,
-	-45, -45, -45, -45, 155, 153, 61, -48, -1000, 100,
-	100, 15, -1000, 453, 8, -1000, 151, -1000, -1000, 161,
-	-1000, -1000, 217, -1000, 31, -1000, 213, 221, -1000, -50,
-	-42, -1000, 221, 221, 221, 221, 221, 221, 221, 221,
-	221, 221, 221, 221, 221, 221, -1000, -1000, -1000, 185,
-	-1000, -1000, -1000, -1000, 331, -1000, -1000, 30, -1000, 59,
-	-1000, -1000, 106, -1000, 23, -1000, -1000, -1000, -1000, -1000,
-	-1000, -1000, -1000, -1000, -1000, -3, 0, -1000, -1000, -1000,
-	-1000, 28, 28, 157, 100, 100, 100, 100, 8, 533,
-	533, 533, 515, 484, 533, 533, 515, 8, 8, 533,
-	8, 157, -1000, 112, -1000, 32, -1000, -1000, -1000, -1000,
+	-1000, -1000, -1000, -1000, -1000, -1000, -1000, 252, -1000, -1000,
+	360, -1000, -1000, 266, 214, -1000, -1000, 20, -1000, -49,
+	-49, -49, -49, -49, -49, -49, -49, -49, -49, -49,
+	-49, -49, -49, -49, -49, 50, 48, 304, 98, -55,
+	-1000, 167, 167, 328, -1000, 635, 52, -1000, 302, -1000,
+	-1000, 261, 70, -1000, -1000, 207, -1000, 102, -1000, 96,
+	154, 487, -1000, -56, -41, -1000, 487, 487, 487, 487,
+	487, 487, 487, 487, 487, 487, 487, 487, 487, 487,
+	487, -1000, 100, -1000, -1000, 47, -1000, -1000, -1000, -1000,
+	-1000, -1000, -1000, 39, 39, 350, -1000, -1000, -1000, -1000,
+	178, -1000, -1000, 157, -1000, 654, -1000, -1000, 196, -1000,
+	45, -1000, -1000, -1000, -1000, -1000, 43, -1000, -1000, -1000,
+	-1000, -1000, -1000, -1000, 16, 171, 163, -1000, -1000, -1000,
+	408, 406, 167, 167, 167, 167, 52, 52, 119, 119,
+	119, 719, 700, 119, 119, 719, 52, 52, 119, 52,
+	406, -1000, 24, -1000, -1000, -1000, 340, -1000, 329, -1000,
 	-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
 	-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
-	-1000, -1000, -1000, -1000, -1000, 221, -1000, -1000, -1000, -1000,
-	27, 27, -7, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
-	17, 81, -1000, -1000, 276, -1000, 59, -1000, -1000, -1000,
-	27, -1000, -1000, -1000, -1000, -1000,
+	-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
+	-1000, -1000, -1000, -1000, 487, -1000, -1000, -1000, -1000, -1000,
+	-1000, 34, 34, 15, 34, 40, 40, 331, 32, -1000,
+	-1000, 204, 201, 199, 195, 193, 182, 181, 179, 175,
+	159, 136, -1000, -1000, -1000, -1000, -1000, -1000, 97, -1000,
+	-1000, -1000, 13, -1000, 654, -1000, -1000, -1000, 34, -1000,
+	11, 8, 486, -1000, -1000, -1000, 54, 174, 174, 174,
+	39, 41, 41, 54, 41, 54, -73, -1000, -1000, -1000,
+	-1000, -1000, 34, 34, -1000, -1000, -1000, 34, -1000, -1000,
+	-1000, -1000, -1000, -1000, 174, -1000, -1000, -1000, -1000, -1000,
+	-1000, -1000, -1000, -1000, -1000, -1000, -1000, 30, -1000, 165,
+	-1000, -1000, -1000, -1000,
 }
-var yyPgo = [...]int{
 
-	0, 290, 8, 289, 1, 286, 284, 185, 283, 156,
-	282, 84, 9, 280, 5, 4, 279, 264, 0, 6,
-	262, 7, 261, 11, 58, 260, 250, 2, 248, 243,
-	10, 241, 23, 231, 227, 223, 222, 217, 203, 199,
-	168, 154, 3, 167, 159, 116,
+var yyPgo = [...]int16{
+	0, 378, 13, 377, 5, 16, 374, 275, 373, 372,
+	12, 370, 224, 354, 368, 14, 366, 10, 11, 365,
+	364, 7, 363, 8, 4, 357, 2, 1, 3, 344,
+	27, 0, 338, 332, 18, 194, 314, 312, 6, 311,
+	303, 17, 302, 43, 301, 9, 300, 282, 281, 280,
+	269, 255, 233, 238, 235,
 }
-var yyR1 = [...]int{
-
-	0, 44, 44, 44, 44, 44, 44, 44, 27, 27,
-	27, 27, 27, 27, 27, 27, 27, 27, 27, 22,
-	22, 22, 22, 23, 23, 25, 25, 25, 25, 25,
-	25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
-	24, 26, 26, 36, 36, 31, 31, 31, 31, 14,
-	14, 14, 14, 13, 13, 13, 4, 4, 28, 30,
-	30, 29, 29, 29, 37, 35, 35, 33, 39, 39,
-	39, 39, 39, 40, 41, 41, 41, 32, 32, 32,
-	1, 1, 1, 2, 2, 2, 2, 11, 11, 7,
+
+var yyR1 = [...]int8{
+	0, 53, 53, 53, 53, 53, 53, 53, 38, 38,
+	38, 38, 38, 38, 38, 38, 38, 38, 38, 38,
+	33, 33, 33, 33, 34, 34, 36, 36, 36, 36,
+	36, 36, 36, 36, 36, 36, 36, 36, 36, 36,
+	36, 36, 35, 37, 37, 47, 47, 42, 42, 42,
+	42, 17, 17, 17, 17, 16, 16, 16, 4, 4,
+	4, 39, 41, 41, 40, 40, 40, 48, 46, 46,
+	46, 32, 32, 32, 9, 9, 44, 50, 50, 50,
+	50, 50, 50, 51, 52, 52, 52, 43, 43, 43,
+	1, 1, 1, 2, 2, 2, 2, 2, 2, 2,
+	13, 13, 7, 7, 7, 7, 7, 7, 7, 7,
 	7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
-	7, 7, 7, 7, 7, 7, 7, 7, 7, 9,
-	9, 9, 9, 10, 10, 10, 12, 12, 12, 12,
-	45, 17, 17, 17, 17, 16, 16, 16, 16, 16,
-	20, 20, 20, 3, 3, 3, 3, 3, 3, 3,
-	3, 3, 3, 3, 3, 6, 6, 6, 6, 6,
+	7, 7, 7, 7, 7, 7, 12, 12, 12, 12,
+	14, 14, 14, 15, 15, 15, 15, 15, 15, 15,
+	54, 20, 20, 20, 20, 19, 19, 19, 19, 19,
+	19, 19, 19, 19, 29, 29, 29, 21, 21, 21,
+	21, 22, 22, 22, 23, 23, 23, 23, 23, 23,
+	23, 23, 23, 23, 23, 24, 24, 25, 25, 25,
+	11, 11, 11, 11, 3, 3, 3, 3, 3, 3,
+	3, 3, 3, 3, 3, 3, 3, 3, 6, 6,
 	6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
-	6, 6, 6, 6, 6, 6, 6, 6, 6, 8,
-	8, 5, 5, 5, 5, 34, 19, 21, 21, 18,
-	42, 38, 43, 43, 15, 15,
+	6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
+	6, 6, 6, 6, 6, 6, 6, 8, 8, 5,
+	5, 5, 5, 45, 45, 28, 28, 30, 30, 31,
+	31, 27, 26, 26, 49, 10, 18, 18,
 }
-var yyR2 = [...]int{
 
+var yyR2 = [...]int8{
 	0, 2, 2, 2, 2, 2, 2, 1, 1, 1,
-	1, 1, 1, 1, 1, 1, 1, 1, 1, 3,
-	3, 2, 2, 2, 2, 4, 4, 4, 4, 4,
+	1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+	3, 3, 2, 2, 2, 2, 4, 4, 4, 4,
 	4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
-	1, 0, 1, 3, 3, 1, 1, 3, 3, 3,
-	4, 2, 1, 3, 1, 2, 1, 1, 2, 3,
-	2, 3, 1, 2, 3, 3, 3, 4, 6, 6,
+	4, 4, 1, 0, 1, 3, 3, 1, 1, 3,
+	3, 3, 4, 2, 1, 3, 1, 2, 1, 1,
+	1, 2, 3, 2, 3, 1, 2, 3, 3, 4,
+	3, 3, 5, 3, 1, 1, 4, 6, 5, 6,
 	5, 4, 3, 2, 2, 1, 1, 3, 4, 2,
-	3, 1, 2, 3, 3, 2, 1, 2, 1, 1,
+	3, 1, 2, 3, 3, 1, 3, 3, 2, 1,
+	2, 1, 1, 1, 1, 1, 1, 1, 1, 1,
 	1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
-	1, 1, 1, 1, 1, 1, 1, 1, 1, 3,
-	4, 2, 0, 3, 1, 2, 3, 3, 2, 1,
+	1, 1, 1, 1, 1, 1, 3, 4, 2, 0,
+	3, 1, 2, 3, 3, 1, 3, 3, 2, 1,
 	2, 0, 3, 2, 1, 1, 3, 1, 3, 4,
+	1, 3, 5, 5, 1, 1, 1, 4, 3, 3,
+	2, 3, 1, 2, 3, 3, 3, 3, 3, 3,
+	3, 3, 3, 3, 3, 4, 3, 3, 1, 2,
+	1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
 	1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
 	1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
 	1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
 	1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
 	1, 1, 1, 1, 1, 1, 1, 2, 2, 1,
-	1, 1, 0, 1, 0, 1,
+	1, 1, 2, 1, 1, 1, 0, 1,
 }
-var yyChk = [...]int{
-
-	-1000, -44, 69, 70, 71, 72, 2, 10, -11, -7,
-	-9, 45, 46, 60, 47, 48, 49, 12, 32, 33,
-	36, 50, 16, 51, 64, 52, 53, 54, 55, 56,
-	66, 13, -45, -11, 10, -27, -22, -25, -28, -33,
-	-34, -35, -37, -38, -39, -40, -41, -3, 12, 17,
-	15, 23, -8, -7, -32, 45, 46, 47, 48, 49,
-	50, 51, 52, 53, 54, 55, 56, 26, 42, 13,
-	-41, -9, -10, 18, -12, 12, 2, -17, 2, 26,
-	27, 28, 30, 31, 32, 33, 34, 35, 36, 37,
-	38, 39, 41, 42, 64, 14, -23, -30, 2, 60,
-	66, 15, -30, -27, -27, -32, -1, 18, -2, 12,
-	2, 18, 7, 2, 4, 2, 22, -24, -31, -26,
-	-36, 59, -24, -24, -24, -24, -24, -24, -24, -24,
-	-24, -24, -24, -24, -24, -24, -42, 2, 9, -42,
-	2, -30, -23, -14, 15, 2, -14, -29, 20, -27,
-	20, 18, 7, 2, -5, 2, 4, 39, 29, 40,
-	18, -12, 23, 2, -16, 5, -20, 12, -19, -21,
-	17, 26, 42, -27, 63, 65, 61, 62, -27, -27,
-	-27, -27, -27, -27, -27, -27, -27, -27, -27, -27,
-	-27, -27, 19, 6, 2, -13, 20, -4, -6, 2,
-	45, 59, 46, 60, 47, 48, 49, 61, 62, 12,
-	63, 32, 33, 36, 50, 16, 51, 64, 65, 52,
-	53, 54, 55, 56, 20, 7, 18, -2, 23, 2,
-	24, 24, -21, -19, -19, -14, -14, -15, -14, -15,
-	-43, -42, 2, 20, 7, 2, -27, -18, 17, -18,
-	24, 19, 2, 20, -4, -18,
+
+var yyChk = [...]int16{
+	-1000, -53, 98, 99, 100, 101, 2, 10, -13, -7,
+	-12, 62, 63, 79, 64, 65, 66, 12, 47, 48,
+	51, 67, 18, 68, 83, 69, 70, 71, 72, 73,
+	85, 88, 89, 74, 75, 13, -54, -13, 10, -38,
+	-33, -36, -39, -44, -45, -46, -48, -49, -50, -51,
+	-52, -32, -3, 12, 19, 9, 15, 25, -8, -7,
+	-43, 62, 63, 64, 65, 66, 67, 68, 69, 70,
+	71, 72, 73, 74, 75, 41, 57, 13, -52, -12,
+	-14, 20, -15, 12, -10, 2, 25, -20, 2, 41,
+	59, 42, 43, 45, 46, 47, 48, 49, 50, 51,
+	52, 53, 54, 56, 57, 83, 58, 14, -34, -41,
+	2, 79, 85, 15, -41, -38, -38, -43, -1, 20,
+	-2, 12, -10, 2, 20, 7, 2, 4, 2, 4,
+	24, -35, -42, -37, -47, 78, -35, -35, -35, -35,
+	-35, -35, -35, -35, -35, -35, -35, -35, -35, -35,
+	-35, -45, 57, 2, -31, -9, 2, -28, -30, 88,
+	89, 19, 9, 41, 57, -45, 2, -41, -34, -17,
+	15, 2, -17, -40, 22, -38, 22, 20, 7, 2,
+	-5, 2, 4, 54, 44, 55, -5, 20, -15, 25,
+	2, 25, 2, -19, 5, -29, -21, 12, -28, -30,
+	16, -38, 82, 84, 80, 81, -38, -38, -38, -38,
+	-38, -38, -38, -38, -38, -38, -38, -38, -38, -38,
+	-38, -45, 15, -28, -28, 21, 6, 2, -16, 22,
+	-4, -6, 25, 2, 62, 78, 63, 79, 64, 65,
+	66, 80, 81, 12, 82, 47, 48, 51, 67, 18,
+	68, 83, 84, 69, 70, 71, 72, 73, 88, 89,
+	59, 74, 75, 22, 7, 20, -2, 25, 2, 25,
+	2, 26, 26, -30, 26, 41, 57, -22, 24, 17,
+	-23, 30, 28, 29, 35, 36, 37, 33, 31, 34,
+	32, 38, -17, -17, -18, -17, -18, 22, -45, 21,
+	2, 22, 7, 2, -38, -27, 19, -27, 26, -27,
+	-21, -21, 24, 17, 2, 17, 6, 6, 6, 6,
+	6, 6, 6, 6, 6, 6, 6, 21, 2, 22,
+	-4, -27, 26, 26, 17, -23, -26, 57, -27, -31,
+	-31, -31, -28, -24, 14, -24, -26, -24, -26, -11,
+	92, 93, 94, 95, -27, -27, -27, -25, -31, 24,
+	21, 2, 21, -31,
 }
-var yyDef = [...]int{
-
-	0, -2, 112, 112, 0, 0, 7, 6, 1, 112,
-	88, 89, 90, 91, 92, 93, 94, 95, 96, 97,
-	98, 99, 100, 101, 102, 103, 104, 105, 106, 107,
-	108, 0, 2, -2, 3, 4, 8, 9, 10, 11,
-	12, 13, 14, 15, 16, 17, 18, 0, 95, 175,
-	0, 181, 0, 75, 76, -2, -2, -2, -2, -2,
-	-2, -2, -2, -2, -2, -2, -2, 169, 170, 0,
-	5, 87, 0, 111, 114, 0, 119, 120, 124, 41,
-	41, 41, 41, 41, 41, 41, 41, 41, 41, 41,
-	41, 41, 41, 41, 0, 0, 0, 21, 22, 0,
-	0, 0, 58, 0, 73, 74, 0, 79, 81, 0,
-	86, 109, 0, 115, 0, 118, 123, 0, 40, 45,
-	46, 42, 0, 0, 0, 0, 0, 0, 0, 0,
-	0, 0, 0, 0, 0, 0, 65, 66, 180, 0,
-	72, 19, 20, 23, 0, 52, 24, 0, 60, 62,
-	64, 77, 0, 82, 0, 85, 171, 172, 173, 174,
-	110, 113, 116, 117, 122, 125, 127, 130, 131, 132,
-	176, 0, 0, 25, 0, 0, -2, -2, 26, 27,
-	28, 29, 30, 31, 32, 33, 34, 35, 36, 37,
-	38, 39, 67, -2, 71, 0, 51, 54, 56, 57,
-	145, 146, 147, 148, 149, 150, 151, 152, 153, 154,
-	155, 156, 157, 158, 159, 160, 161, 162, 163, 164,
-	165, 166, 167, 168, 59, 63, 78, 80, 83, 84,
-	0, 0, 0, 177, 178, 43, 44, 47, 185, 48,
-	0, -2, 70, 49, 0, 55, 61, 126, 179, 128,
-	0, 68, 69, 50, 53, 129,
+
+var yyDef = [...]int16{
+	0, -2, 129, 129, 0, 0, 7, 6, 1, 129,
+	101, 102, 103, 104, 105, 106, 107, 108, 109, 110,
+	111, 112, 113, 114, 115, 116, 117, 118, 119, 120,
+	121, 122, 123, 124, 125, 0, 2, -2, 3, 4,
+	8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
+	18, 19, 0, 108, 233, 234, 0, 244, 0, 85,
+	86, -2, -2, -2, -2, -2, -2, -2, -2, -2,
+	-2, -2, -2, -2, -2, 227, 228, 0, 5, 100,
+	0, 128, 131, 0, 135, 139, 245, 140, 144, 43,
+	43, 43, 43, 43, 43, 43, 43, 43, 43, 43,
+	43, 43, 43, 43, 43, 0, 0, 0, 0, 22,
+	23, 0, 0, 0, 61, 0, 83, 84, 0, 89,
+	91, 0, 95, 99, 126, 0, 132, 0, 138, 0,
+	143, 0, 42, 47, 48, 44, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	0, 68, 0, 70, 71, 0, 73, 239, 240, 74,
+	75, 235, 236, 0, 0, 0, 82, 20, 21, 24,
+	0, 54, 25, 0, 63, 65, 67, 87, 0, 92,
+	0, 98, 229, 230, 231, 232, 0, 127, 130, 133,
+	136, 134, 137, 142, 145, 147, 150, 154, 155, 156,
+	0, 26, 0, 0, -2, -2, 27, 28, 29, 30,
+	31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
+	41, 69, 0, 237, 238, 76, 0, 81, 0, 53,
+	56, 58, 59, 60, 198, 199, 200, 201, 202, 203,
+	204, 205, 206, 207, 208, 209, 210, 211, 212, 213,
+	214, 215, 216, 217, 218, 219, 220, 221, 222, 223,
+	224, 225, 226, 62, 66, 88, 90, 93, 97, 94,
+	96, 0, 0, 0, 0, 0, 0, 0, 0, 160,
+	162, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 45, 46, 49, 247, 50, 72, 0, 78,
+	80, 51, 0, 57, 64, 146, 241, 148, 0, 151,
+	0, 0, 0, 158, 163, 159, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 77, 79, 52,
+	55, 149, 0, 0, 157, 161, 164, 0, 243, 165,
+	166, 167, 168, 169, 0, 170, 171, 172, 173, 174,
+	180, 181, 182, 183, 152, 153, 242, 0, 178, 0,
+	176, 179, 175, 177,
 }
-var yyTok1 = [...]int{
 
+var yyTok1 = [...]int8{
 	1,
 }
-var yyTok2 = [...]int{
 
+var yyTok2 = [...]int8{
 	2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
 	12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
 	22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
@@ -547,9 +690,13 @@ var yyTok2 = [...]int{
 	42, 43, 44, 45, 46, 47, 48, 49, 50, 51,
 	52, 53, 54, 55, 56, 57, 58, 59, 60, 61,
 	62, 63, 64, 65, 66, 67, 68, 69, 70, 71,
-	72, 73,
+	72, 73, 74, 75, 76, 77, 78, 79, 80, 81,
+	82, 83, 84, 85, 86, 87, 88, 89, 90, 91,
+	92, 93, 94, 95, 96, 97, 98, 99, 100, 101,
+	102,
 }
-var yyTok3 = [...]int{
+
+var yyTok3 = [...]int8{
 	0,
 }
 
@@ -559,8 +706,6 @@ var yyErrorMessages = [...]struct {
 	msg   string
 }{}
 
-//line yaccpar:1
-
 /*	parser for yacc output	*/
 
 var (
@@ -631,9 +776,9 @@ func yyErrorMessage(state, lookAhead int) string {
 	expected := make([]int, 0, 4)
 
 	// Look for shiftable tokens.
-	base := yyPact[state]
+	base := int(yyPact[state])
 	for tok := TOKSTART; tok-1 < len(yyToknames); tok++ {
-		if n := base + tok; n >= 0 && n < yyLast && yyChk[yyAct[n]] == tok {
+		if n := base + tok; n >= 0 && n < yyLast && int(yyChk[int(yyAct[n])]) == tok {
 			if len(expected) == cap(expected) {
 				return res
 			}
@@ -643,13 +788,13 @@ func yyErrorMessage(state, lookAhead int) string {
 
 	if yyDef[state] == -2 {
 		i := 0
-		for yyExca[i] != -1 || yyExca[i+1] != state {
+		for yyExca[i] != -1 || int(yyExca[i+1]) != state {
 			i += 2
 		}
 
 		// Look for tokens that we accept or reduce.
 		for i += 2; yyExca[i] >= 0; i += 2 {
-			tok := yyExca[i]
+			tok := int(yyExca[i])
 			if tok < TOKSTART || yyExca[i+1] == 0 {
 				continue
 			}
@@ -680,30 +825,30 @@ func yylex1(lex yyLexer, lval *yySymType) (char, token int) {
 	token = 0
 	char = lex.Lex(lval)
 	if char <= 0 {
-		token = yyTok1[0]
+		token = int(yyTok1[0])
 		goto out
 	}
 	if char < len(yyTok1) {
-		token = yyTok1[char]
+		token = int(yyTok1[char])
 		goto out
 	}
 	if char >= yyPrivate {
 		if char < yyPrivate+len(yyTok2) {
-			token = yyTok2[char-yyPrivate]
+			token = int(yyTok2[char-yyPrivate])
 			goto out
 		}
 	}
 	for i := 0; i < len(yyTok3); i += 2 {
-		token = yyTok3[i+0]
+		token = int(yyTok3[i+0])
 		if token == char {
-			token = yyTok3[i+1]
+			token = int(yyTok3[i+1])
 			goto out
 		}
 	}
 
 out:
 	if token == 0 {
-		token = yyTok2[1] /* unknown char */
+		token = int(yyTok2[1]) /* unknown char */
 	}
 	if yyDebug >= 3 {
 		__yyfmt__.Printf("lex %s(%d)\n", yyTokname(token), uint(char))
@@ -758,7 +903,7 @@ yystack:
 	yyS[yyp].yys = yystate
 
 yynewstate:
-	yyn = yyPact[yystate]
+	yyn = int(yyPact[yystate])
 	if yyn <= yyFlag {
 		goto yydefault /* simple state */
 	}
@@ -769,8 +914,8 @@ yynewstate:
 	if yyn < 0 || yyn >= yyLast {
 		goto yydefault
 	}
-	yyn = yyAct[yyn]
-	if yyChk[yyn] == yytoken { /* valid shift */
+	yyn = int(yyAct[yyn])
+	if int(yyChk[yyn]) == yytoken { /* valid shift */
 		yyrcvr.char = -1
 		yytoken = -1
 		yyVAL = yyrcvr.lval
@@ -783,7 +928,7 @@ yynewstate:
 
 yydefault:
 	/* default state action */
-	yyn = yyDef[yystate]
+	yyn = int(yyDef[yystate])
 	if yyn == -2 {
 		if yyrcvr.char < 0 {
 			yyrcvr.char, yytoken = yylex1(yylex, &yyrcvr.lval)
@@ -792,18 +937,18 @@ yydefault:
 		/* look through exception table */
 		xi := 0
 		for {
-			if yyExca[xi+0] == -1 && yyExca[xi+1] == yystate {
+			if yyExca[xi+0] == -1 && int(yyExca[xi+1]) == yystate {
 				break
 			}
 			xi += 2
 		}
 		for xi += 2; ; xi += 2 {
-			yyn = yyExca[xi+0]
+			yyn = int(yyExca[xi+0])
 			if yyn < 0 || yyn == yytoken {
 				break
 			}
 		}
-		yyn = yyExca[xi+1]
+		yyn = int(yyExca[xi+1])
 		if yyn < 0 {
 			goto ret0
 		}
@@ -825,10 +970,10 @@ yydefault:
 
 			/* find a state where "error" is a legal shift action */
 			for yyp >= 0 {
-				yyn = yyPact[yyS[yyp].yys] + yyErrCode
+				yyn = int(yyPact[yyS[yyp].yys]) + yyErrCode
 				if yyn >= 0 && yyn < yyLast {
-					yystate = yyAct[yyn] /* simulate a shift of "error" */
-					if yyChk[yystate] == yyErrCode {
+					yystate = int(yyAct[yyn]) /* simulate a shift of "error" */
+					if int(yyChk[yystate]) == yyErrCode {
 						goto yystack
 					}
 				}
@@ -864,7 +1009,7 @@ yydefault:
 	yypt := yyp
 	_ = yypt // guard against "declared and not used"
 
-	yyp -= yyR2[yyn]
+	yyp -= int(yyR2[yyn])
 	// yyp is now the index of $0. Perform the default action. Iff the
 	// reduced production is ε, $1 is possibly out of range.
 	if yyp+1 >= len(yyS) {
@@ -875,16 +1020,16 @@ yydefault:
 	yyVAL = yyS[yyp+1]
 
 	/* consult goto table to find next state */
-	yyn = yyR1[yyn]
-	yyg := yyPgo[yyn]
+	yyn = int(yyR1[yyn])
+	yyg := int(yyPgo[yyn])
 	yyj := yyg + yyS[yyp].yys + 1
 
 	if yyj >= yyLast {
-		yystate = yyAct[yyg]
+		yystate = int(yyAct[yyg])
 	} else {
-		yystate = yyAct[yyj]
-		if yyChk[yystate] != -yyn {
-			yystate = yyAct[yyg]
+		yystate = int(yyAct[yyj])
+		if int(yyChk[yystate]) != -yyn {
+			yystate = int(yyAct[yyg])
 		}
 	}
 	// dummy call; replaced with literal code
@@ -892,408 +1037,413 @@ yydefault:
 
 	case 1:
 		yyDollar = yyS[yypt-2 : yypt+1]
-//line generated_parser.y:165
 		{
 			yylex.(*parser).generatedParserResult = yyDollar[2].labels
 		}
 	case 3:
 		yyDollar = yyS[yypt-2 : yypt+1]
-//line generated_parser.y:168
 		{
-			yylex.(*parser).addParseErrf(PositionRange{}, "no expression found in input")
+			yylex.(*parser).addParseErrf(posrange.PositionRange{}, "no expression found in input")
 		}
 	case 4:
 		yyDollar = yyS[yypt-2 : yypt+1]
-//line generated_parser.y:170
 		{
 			yylex.(*parser).generatedParserResult = yyDollar[2].node
 		}
 	case 5:
 		yyDollar = yyS[yypt-2 : yypt+1]
-//line generated_parser.y:172
 		{
 			yylex.(*parser).generatedParserResult = yyDollar[2].node
 		}
 	case 7:
 		yyDollar = yyS[yypt-1 : yypt+1]
-//line generated_parser.y:175
 		{
 			yylex.(*parser).unexpected("", "")
 		}
-	case 19:
+	case 20:
 		yyDollar = yyS[yypt-3 : yypt+1]
-//line generated_parser.y:197
 		{
 			yyVAL.node = yylex.(*parser).newAggregateExpr(yyDollar[1].item, yyDollar[2].node, yyDollar[3].node)
 		}
-	case 20:
+	case 21:
 		yyDollar = yyS[yypt-3 : yypt+1]
-//line generated_parser.y:199
 		{
 			yyVAL.node = yylex.(*parser).newAggregateExpr(yyDollar[1].item, yyDollar[3].node, yyDollar[2].node)
 		}
-	case 21:
+	case 22:
 		yyDollar = yyS[yypt-2 : yypt+1]
-//line generated_parser.y:201
 		{
 			yyVAL.node = yylex.(*parser).newAggregateExpr(yyDollar[1].item, &AggregateExpr{}, yyDollar[2].node)
 		}
-	case 22:
+	case 23:
 		yyDollar = yyS[yypt-2 : yypt+1]
-//line generated_parser.y:203
 		{
 			yylex.(*parser).unexpected("aggregation", "")
 			yyVAL.node = yylex.(*parser).newAggregateExpr(yyDollar[1].item, &AggregateExpr{}, Expressions{})
 		}
-	case 23:
+	case 24:
 		yyDollar = yyS[yypt-2 : yypt+1]
-//line generated_parser.y:211
 		{
 			yyVAL.node = &AggregateExpr{
 				Grouping: yyDollar[2].strings,
 			}
 		}
-	case 24:
+	case 25:
 		yyDollar = yyS[yypt-2 : yypt+1]
-//line generated_parser.y:217
 		{
 			yyVAL.node = &AggregateExpr{
 				Grouping: yyDollar[2].strings,
 				Without:  true,
 			}
 		}
-	case 25:
-		yyDollar = yyS[yypt-4 : yypt+1]
-//line generated_parser.y:230
-		{
-			yyVAL.node = yylex.(*parser).newBinaryExpression(yyDollar[1].node, yyDollar[2].item, yyDollar[3].node, yyDollar[4].node)
-		}
 	case 26:
 		yyDollar = yyS[yypt-4 : yypt+1]
-//line generated_parser.y:231
 		{
 			yyVAL.node = yylex.(*parser).newBinaryExpression(yyDollar[1].node, yyDollar[2].item, yyDollar[3].node, yyDollar[4].node)
 		}
 	case 27:
 		yyDollar = yyS[yypt-4 : yypt+1]
-//line generated_parser.y:232
 		{
 			yyVAL.node = yylex.(*parser).newBinaryExpression(yyDollar[1].node, yyDollar[2].item, yyDollar[3].node, yyDollar[4].node)
 		}
 	case 28:
 		yyDollar = yyS[yypt-4 : yypt+1]
-//line generated_parser.y:233
 		{
 			yyVAL.node = yylex.(*parser).newBinaryExpression(yyDollar[1].node, yyDollar[2].item, yyDollar[3].node, yyDollar[4].node)
 		}
 	case 29:
 		yyDollar = yyS[yypt-4 : yypt+1]
-//line generated_parser.y:234
 		{
 			yyVAL.node = yylex.(*parser).newBinaryExpression(yyDollar[1].node, yyDollar[2].item, yyDollar[3].node, yyDollar[4].node)
 		}
 	case 30:
 		yyDollar = yyS[yypt-4 : yypt+1]
-//line generated_parser.y:235
 		{
 			yyVAL.node = yylex.(*parser).newBinaryExpression(yyDollar[1].node, yyDollar[2].item, yyDollar[3].node, yyDollar[4].node)
 		}
 	case 31:
 		yyDollar = yyS[yypt-4 : yypt+1]
-//line generated_parser.y:236
 		{
 			yyVAL.node = yylex.(*parser).newBinaryExpression(yyDollar[1].node, yyDollar[2].item, yyDollar[3].node, yyDollar[4].node)
 		}
 	case 32:
 		yyDollar = yyS[yypt-4 : yypt+1]
-//line generated_parser.y:237
 		{
 			yyVAL.node = yylex.(*parser).newBinaryExpression(yyDollar[1].node, yyDollar[2].item, yyDollar[3].node, yyDollar[4].node)
 		}
 	case 33:
 		yyDollar = yyS[yypt-4 : yypt+1]
-//line generated_parser.y:238
 		{
 			yyVAL.node = yylex.(*parser).newBinaryExpression(yyDollar[1].node, yyDollar[2].item, yyDollar[3].node, yyDollar[4].node)
 		}
 	case 34:
 		yyDollar = yyS[yypt-4 : yypt+1]
-//line generated_parser.y:239
 		{
 			yyVAL.node = yylex.(*parser).newBinaryExpression(yyDollar[1].node, yyDollar[2].item, yyDollar[3].node, yyDollar[4].node)
 		}
 	case 35:
 		yyDollar = yyS[yypt-4 : yypt+1]
-//line generated_parser.y:240
 		{
 			yyVAL.node = yylex.(*parser).newBinaryExpression(yyDollar[1].node, yyDollar[2].item, yyDollar[3].node, yyDollar[4].node)
 		}
 	case 36:
 		yyDollar = yyS[yypt-4 : yypt+1]
-//line generated_parser.y:241
 		{
 			yyVAL.node = yylex.(*parser).newBinaryExpression(yyDollar[1].node, yyDollar[2].item, yyDollar[3].node, yyDollar[4].node)
 		}
 	case 37:
 		yyDollar = yyS[yypt-4 : yypt+1]
-//line generated_parser.y:242
 		{
 			yyVAL.node = yylex.(*parser).newBinaryExpression(yyDollar[1].node, yyDollar[2].item, yyDollar[3].node, yyDollar[4].node)
 		}
 	case 38:
 		yyDollar = yyS[yypt-4 : yypt+1]
-//line generated_parser.y:243
 		{
 			yyVAL.node = yylex.(*parser).newBinaryExpression(yyDollar[1].node, yyDollar[2].item, yyDollar[3].node, yyDollar[4].node)
 		}
 	case 39:
 		yyDollar = yyS[yypt-4 : yypt+1]
-//line generated_parser.y:244
+		{
+			yyVAL.node = yylex.(*parser).newBinaryExpression(yyDollar[1].node, yyDollar[2].item, yyDollar[3].node, yyDollar[4].node)
+		}
+	case 40:
+		yyDollar = yyS[yypt-4 : yypt+1]
 		{
 			yyVAL.node = yylex.(*parser).newBinaryExpression(yyDollar[1].node, yyDollar[2].item, yyDollar[3].node, yyDollar[4].node)
 		}
 	case 41:
+		yyDollar = yyS[yypt-4 : yypt+1]
+		{
+			yyVAL.node = yylex.(*parser).newBinaryExpression(yyDollar[1].node, yyDollar[2].item, yyDollar[3].node, yyDollar[4].node)
+		}
+	case 43:
 		yyDollar = yyS[yypt-0 : yypt+1]
-//line generated_parser.y:252
 		{
 			yyVAL.node = &BinaryExpr{
 				VectorMatching: &VectorMatching{Card: CardOneToOne},
 			}
 		}
-	case 42:
+	case 44:
 		yyDollar = yyS[yypt-1 : yypt+1]
-//line generated_parser.y:257
 		{
 			yyVAL.node = &BinaryExpr{
 				VectorMatching: &VectorMatching{Card: CardOneToOne},
 				ReturnBool:     true,
 			}
 		}
-	case 43:
+	case 45:
 		yyDollar = yyS[yypt-3 : yypt+1]
-//line generated_parser.y:265
 		{
 			yyVAL.node = yyDollar[1].node
 			yyVAL.node.(*BinaryExpr).VectorMatching.MatchingLabels = yyDollar[3].strings
 		}
-	case 44:
+	case 46:
 		yyDollar = yyS[yypt-3 : yypt+1]
-//line generated_parser.y:270
 		{
 			yyVAL.node = yyDollar[1].node
 			yyVAL.node.(*BinaryExpr).VectorMatching.MatchingLabels = yyDollar[3].strings
 			yyVAL.node.(*BinaryExpr).VectorMatching.On = true
 		}
-	case 47:
+	case 49:
 		yyDollar = yyS[yypt-3 : yypt+1]
-//line generated_parser.y:280
 		{
 			yyVAL.node = yyDollar[1].node
 			yyVAL.node.(*BinaryExpr).VectorMatching.Card = CardManyToOne
 			yyVAL.node.(*BinaryExpr).VectorMatching.Include = yyDollar[3].strings
 		}
-	case 48:
+	case 50:
 		yyDollar = yyS[yypt-3 : yypt+1]
-//line generated_parser.y:286
 		{
 			yyVAL.node = yyDollar[1].node
 			yyVAL.node.(*BinaryExpr).VectorMatching.Card = CardOneToMany
 			yyVAL.node.(*BinaryExpr).VectorMatching.Include = yyDollar[3].strings
 		}
-	case 49:
+	case 51:
 		yyDollar = yyS[yypt-3 : yypt+1]
-//line generated_parser.y:295
 		{
 			yyVAL.strings = yyDollar[2].strings
 		}
-	case 50:
+	case 52:
 		yyDollar = yyS[yypt-4 : yypt+1]
-//line generated_parser.y:297
 		{
 			yyVAL.strings = yyDollar[2].strings
 		}
-	case 51:
+	case 53:
 		yyDollar = yyS[yypt-2 : yypt+1]
-//line generated_parser.y:299
 		{
 			yyVAL.strings = []string{}
 		}
-	case 52:
+	case 54:
 		yyDollar = yyS[yypt-1 : yypt+1]
-//line generated_parser.y:301
 		{
 			yylex.(*parser).unexpected("grouping opts", "\"(\"")
 			yyVAL.strings = nil
 		}
-	case 53:
+	case 55:
 		yyDollar = yyS[yypt-3 : yypt+1]
-//line generated_parser.y:307
 		{
 			yyVAL.strings = append(yyDollar[1].strings, yyDollar[3].item.Val)
 		}
-	case 54:
+	case 56:
 		yyDollar = yyS[yypt-1 : yypt+1]
-//line generated_parser.y:309
 		{
 			yyVAL.strings = []string{yyDollar[1].item.Val}
 		}
-	case 55:
+	case 57:
 		yyDollar = yyS[yypt-2 : yypt+1]
-//line generated_parser.y:311
 		{
 			yylex.(*parser).unexpected("grouping opts", "\",\" or \")\"")
 			yyVAL.strings = yyDollar[1].strings
 		}
-	case 56:
+	case 58:
 		yyDollar = yyS[yypt-1 : yypt+1]
-//line generated_parser.y:315
 		{
-			if !isLabel(yyDollar[1].item.Val) {
-				yylex.(*parser).unexpected("grouping opts", "label")
+			if !model.LabelName(yyDollar[1].item.Val).IsValid() {
+				yylex.(*parser).addParseErrf(yyDollar[1].item.PositionRange(), "invalid label name for grouping: %q", yyDollar[1].item.Val)
 			}
 			yyVAL.item = yyDollar[1].item
 		}
-	case 57:
+	case 59:
+		yyDollar = yyS[yypt-1 : yypt+1]
+		{
+			unquoted := yylex.(*parser).unquoteString(yyDollar[1].item.Val)
+			if !model.LabelName(unquoted).IsValid() {
+				yylex.(*parser).addParseErrf(yyDollar[1].item.PositionRange(), "invalid label name for grouping: %q", unquoted)
+			}
+			yyVAL.item = yyDollar[1].item
+			yyVAL.item.Pos++
+			yyVAL.item.Val = unquoted
+		}
+	case 60:
 		yyDollar = yyS[yypt-1 : yypt+1]
-//line generated_parser.y:322
 		{
 			yylex.(*parser).unexpected("grouping opts", "label")
 			yyVAL.item = Item{}
 		}
-	case 58:
+	case 61:
 		yyDollar = yyS[yypt-2 : yypt+1]
-//line generated_parser.y:330
 		{
-			fn, exist := getFunction(yyDollar[1].item.Val)
+			fn, exist := getFunction(yyDollar[1].item.Val, yylex.(*parser).functions)
 			if !exist {
 				yylex.(*parser).addParseErrf(yyDollar[1].item.PositionRange(), "unknown function with name %q", yyDollar[1].item.Val)
 			}
+			if fn != nil && fn.Experimental && !EnableExperimentalFunctions {
+				yylex.(*parser).addParseErrf(yyDollar[1].item.PositionRange(), "function %q is not enabled", yyDollar[1].item.Val)
+			}
 			yyVAL.node = &Call{
 				Func: fn,
 				Args: yyDollar[2].node.(Expressions),
-				PosRange: PositionRange{
+				PosRange: posrange.PositionRange{
 					Start: yyDollar[1].item.Pos,
 					End:   yylex.(*parser).lastClosing,
 				},
 			}
 		}
-	case 59:
+	case 62:
 		yyDollar = yyS[yypt-3 : yypt+1]
-//line generated_parser.y:347
 		{
 			yyVAL.node = yyDollar[2].node
 		}
-	case 60:
+	case 63:
 		yyDollar = yyS[yypt-2 : yypt+1]
-//line generated_parser.y:349
 		{
 			yyVAL.node = Expressions{}
 		}
-	case 61:
+	case 64:
 		yyDollar = yyS[yypt-3 : yypt+1]
-//line generated_parser.y:353
 		{
 			yyVAL.node = append(yyDollar[1].node.(Expressions), yyDollar[3].node.(Expr))
 		}
-	case 62:
+	case 65:
 		yyDollar = yyS[yypt-1 : yypt+1]
-//line generated_parser.y:355
 		{
 			yyVAL.node = Expressions{yyDollar[1].node.(Expr)}
 		}
-	case 63:
+	case 66:
 		yyDollar = yyS[yypt-2 : yypt+1]
-//line generated_parser.y:357
 		{
 			yylex.(*parser).addParseErrf(yyDollar[2].item.PositionRange(), "trailing commas not allowed in function call args")
 			yyVAL.node = yyDollar[1].node
 		}
-	case 64:
+	case 67:
 		yyDollar = yyS[yypt-3 : yypt+1]
-//line generated_parser.y:368
 		{
 			yyVAL.node = &ParenExpr{Expr: yyDollar[2].node.(Expr), PosRange: mergeRanges(&yyDollar[1].item, &yyDollar[3].item)}
 		}
-	case 65:
+	case 68:
 		yyDollar = yyS[yypt-3 : yypt+1]
-//line generated_parser.y:376
 		{
-			yylex.(*parser).addOffset(yyDollar[1].node, yyDollar[3].duration)
+			numLit, _ := yyDollar[3].node.(*NumberLiteral)
+			dur := time.Duration(numLit.Val*1000) * time.Millisecond
+			yylex.(*parser).addOffset(yyDollar[1].node, dur)
 			yyVAL.node = yyDollar[1].node
 		}
-	case 66:
+	case 69:
+		yyDollar = yyS[yypt-4 : yypt+1]
+		{
+			numLit, _ := yyDollar[4].node.(*NumberLiteral)
+			dur := time.Duration(numLit.Val*1000) * time.Millisecond
+			yylex.(*parser).addOffset(yyDollar[1].node, -dur)
+			yyVAL.node = yyDollar[1].node
+		}
+	case 70:
 		yyDollar = yyS[yypt-3 : yypt+1]
-//line generated_parser.y:381
 		{
-			yylex.(*parser).unexpected("offset", "duration")
+			yylex.(*parser).unexpected("offset", "number or duration")
 			yyVAL.node = yyDollar[1].node
 		}
-	case 67:
+	case 71:
+		yyDollar = yyS[yypt-3 : yypt+1]
+		{
+			yylex.(*parser).setTimestamp(yyDollar[1].node, yyDollar[3].float)
+			yyVAL.node = yyDollar[1].node
+		}
+	case 72:
+		yyDollar = yyS[yypt-5 : yypt+1]
+		{
+			yylex.(*parser).setAtModifierPreprocessor(yyDollar[1].node, yyDollar[3].item)
+			yyVAL.node = yyDollar[1].node
+		}
+	case 73:
+		yyDollar = yyS[yypt-3 : yypt+1]
+		{
+			yylex.(*parser).unexpected("@", "timestamp")
+			yyVAL.node = yyDollar[1].node
+		}
+	case 76:
 		yyDollar = yyS[yypt-4 : yypt+1]
-//line generated_parser.y:389
 		{
 			var errMsg string
 			vs, ok := yyDollar[1].node.(*VectorSelector)
 			if !ok {
 				errMsg = "ranges only allowed for vector selectors"
-			} else if vs.Offset != 0 {
+			} else if vs.OriginalOffset != 0 {
 				errMsg = "no offset modifiers allowed before range"
+			} else if vs.Timestamp != nil {
+				errMsg = "no @ modifiers allowed before range"
 			}
 
 			if errMsg != "" {
 				errRange := mergeRanges(&yyDollar[2].item, &yyDollar[4].item)
-				yylex.(*parser).addParseErrf(errRange, errMsg)
+				yylex.(*parser).addParseErrf(errRange, "%s", errMsg)
 			}
 
+			numLit, _ := yyDollar[3].node.(*NumberLiteral)
 			yyVAL.node = &MatrixSelector{
 				VectorSelector: yyDollar[1].node.(Expr),
-				Range:          yyDollar[3].duration,
+				Range:          time.Duration(numLit.Val*1000) * time.Millisecond,
 				EndPos:         yylex.(*parser).lastClosing,
 			}
 		}
-	case 68:
+	case 77:
 		yyDollar = yyS[yypt-6 : yypt+1]
-//line generated_parser.y:412
 		{
+			numLitRange, _ := yyDollar[3].node.(*NumberLiteral)
+			numLitStep, _ := yyDollar[5].node.(*NumberLiteral)
 			yyVAL.node = &SubqueryExpr{
-				Expr:  yyDollar[1].node.(Expr),
-				Range: yyDollar[3].duration,
-				Step:  yyDollar[5].duration,
-
+				Expr:   yyDollar[1].node.(Expr),
+				Range:  time.Duration(numLitRange.Val*1000) * time.Millisecond,
+				Step:   time.Duration(numLitStep.Val*1000) * time.Millisecond,
 				EndPos: yyDollar[6].item.Pos + 1,
 			}
 		}
-	case 69:
+	case 78:
+		yyDollar = yyS[yypt-5 : yypt+1]
+		{
+			numLitRange, _ := yyDollar[3].node.(*NumberLiteral)
+			yyVAL.node = &SubqueryExpr{
+				Expr:   yyDollar[1].node.(Expr),
+				Range:  time.Duration(numLitRange.Val*1000) * time.Millisecond,
+				Step:   0,
+				EndPos: yyDollar[5].item.Pos + 1,
+			}
+		}
+	case 79:
 		yyDollar = yyS[yypt-6 : yypt+1]
-//line generated_parser.y:422
 		{
 			yylex.(*parser).unexpected("subquery selector", "\"]\"")
 			yyVAL.node = yyDollar[1].node
 		}
-	case 70:
+	case 80:
 		yyDollar = yyS[yypt-5 : yypt+1]
-//line generated_parser.y:424
 		{
-			yylex.(*parser).unexpected("subquery selector", "duration or \"]\"")
+			yylex.(*parser).unexpected("subquery selector", "number or duration or \"]\"")
 			yyVAL.node = yyDollar[1].node
 		}
-	case 71:
+	case 81:
 		yyDollar = yyS[yypt-4 : yypt+1]
-//line generated_parser.y:426
 		{
 			yylex.(*parser).unexpected("subquery or range", "\":\" or \"]\"")
 			yyVAL.node = yyDollar[1].node
 		}
-	case 72:
+	case 82:
 		yyDollar = yyS[yypt-3 : yypt+1]
-//line generated_parser.y:428
 		{
-			yylex.(*parser).unexpected("subquery selector", "duration")
+			yylex.(*parser).unexpected("subquery selector", "number or duration")
 			yyVAL.node = yyDollar[1].node
 		}
-	case 73:
+	case 83:
 		yyDollar = yyS[yypt-2 : yypt+1]
-//line generated_parser.y:438
 		{
 			if nl, ok := yyDollar[2].node.(*NumberLiteral); ok {
 				if yyDollar[1].item.Typ == SUB {
@@ -1305,9 +1455,8 @@ yydefault:
 				yyVAL.node = &UnaryExpr{Op: yyDollar[1].item.Typ, Expr: yyDollar[2].node.(Expr), StartPos: yyDollar[1].item.Pos}
 			}
 		}
-	case 74:
+	case 84:
 		yyDollar = yyS[yypt-2 : yypt+1]
-//line generated_parser.y:456
 		{
 			vs := yyDollar[2].node.(*VectorSelector)
 			vs.PosRange = mergeRanges(&yyDollar[1].item, vs)
@@ -1315,9 +1464,8 @@ yydefault:
 			yylex.(*parser).assembleVectorSelector(vs)
 			yyVAL.node = vs
 		}
-	case 75:
+	case 85:
 		yyDollar = yyS[yypt-1 : yypt+1]
-//line generated_parser.y:464
 		{
 			vs := &VectorSelector{
 				Name:          yyDollar[1].item.Val,
@@ -1327,44 +1475,39 @@ yydefault:
 			yylex.(*parser).assembleVectorSelector(vs)
 			yyVAL.node = vs
 		}
-	case 76:
+	case 86:
 		yyDollar = yyS[yypt-1 : yypt+1]
-//line generated_parser.y:474
 		{
 			vs := yyDollar[1].node.(*VectorSelector)
 			yylex.(*parser).assembleVectorSelector(vs)
 			yyVAL.node = vs
 		}
-	case 77:
+	case 87:
 		yyDollar = yyS[yypt-3 : yypt+1]
-//line generated_parser.y:482
 		{
 			yyVAL.node = &VectorSelector{
 				LabelMatchers: yyDollar[2].matchers,
 				PosRange:      mergeRanges(&yyDollar[1].item, &yyDollar[3].item),
 			}
 		}
-	case 78:
+	case 88:
 		yyDollar = yyS[yypt-4 : yypt+1]
-//line generated_parser.y:489
 		{
 			yyVAL.node = &VectorSelector{
 				LabelMatchers: yyDollar[2].matchers,
 				PosRange:      mergeRanges(&yyDollar[1].item, &yyDollar[4].item),
 			}
 		}
-	case 79:
+	case 89:
 		yyDollar = yyS[yypt-2 : yypt+1]
-//line generated_parser.y:496
 		{
 			yyVAL.node = &VectorSelector{
 				LabelMatchers: []*labels.Matcher{},
 				PosRange:      mergeRanges(&yyDollar[1].item, &yyDollar[2].item),
 			}
 		}
-	case 80:
+	case 90:
 		yyDollar = yyS[yypt-3 : yypt+1]
-//line generated_parser.y:505
 		{
 			if yyDollar[1].matchers != nil {
 				yyVAL.matchers = append(yyDollar[1].matchers, yyDollar[3].matcher)
@@ -1372,242 +1515,423 @@ yydefault:
 				yyVAL.matchers = yyDollar[1].matchers
 			}
 		}
-	case 81:
+	case 91:
 		yyDollar = yyS[yypt-1 : yypt+1]
-//line generated_parser.y:513
 		{
 			yyVAL.matchers = []*labels.Matcher{yyDollar[1].matcher}
 		}
-	case 82:
+	case 92:
 		yyDollar = yyS[yypt-2 : yypt+1]
-//line generated_parser.y:515
 		{
 			yylex.(*parser).unexpected("label matching", "\",\" or \"}\"")
 			yyVAL.matchers = yyDollar[1].matchers
 		}
-	case 83:
+	case 93:
 		yyDollar = yyS[yypt-3 : yypt+1]
-//line generated_parser.y:519
 		{
 			yyVAL.matcher = yylex.(*parser).newLabelMatcher(yyDollar[1].item, yyDollar[2].item, yyDollar[3].item)
 		}
-	case 84:
+	case 94:
+		yyDollar = yyS[yypt-3 : yypt+1]
+		{
+			yyVAL.matcher = yylex.(*parser).newLabelMatcher(yyDollar[1].item, yyDollar[2].item, yyDollar[3].item)
+		}
+	case 95:
+		yyDollar = yyS[yypt-1 : yypt+1]
+		{
+			yyVAL.matcher = yylex.(*parser).newMetricNameMatcher(yyDollar[1].item)
+		}
+	case 96:
 		yyDollar = yyS[yypt-3 : yypt+1]
-//line generated_parser.y:521
 		{
 			yylex.(*parser).unexpected("label matching", "string")
 			yyVAL.matcher = nil
 		}
-	case 85:
+	case 97:
+		yyDollar = yyS[yypt-3 : yypt+1]
+		{
+			yylex.(*parser).unexpected("label matching", "string")
+			yyVAL.matcher = nil
+		}
+	case 98:
 		yyDollar = yyS[yypt-2 : yypt+1]
-//line generated_parser.y:523
 		{
 			yylex.(*parser).unexpected("label matching", "label matching operator")
 			yyVAL.matcher = nil
 		}
-	case 86:
+	case 99:
 		yyDollar = yyS[yypt-1 : yypt+1]
-//line generated_parser.y:525
 		{
 			yylex.(*parser).unexpected("label matching", "identifier or \"}\"")
 			yyVAL.matcher = nil
 		}
-	case 87:
+	case 100:
 		yyDollar = yyS[yypt-2 : yypt+1]
-//line generated_parser.y:533
 		{
-			yyVAL.labels = append(yyDollar[2].labels, labels.Label{Name: labels.MetricName, Value: yyDollar[1].item.Val})
-			sort.Sort(yyVAL.labels)
+			b := labels.NewBuilder(yyDollar[2].labels)
+			b.Set(labels.MetricName, yyDollar[1].item.Val)
+			yyVAL.labels = b.Labels()
 		}
-	case 88:
+	case 101:
 		yyDollar = yyS[yypt-1 : yypt+1]
-//line generated_parser.y:535
 		{
 			yyVAL.labels = yyDollar[1].labels
 		}
-	case 109:
+	case 126:
 		yyDollar = yyS[yypt-3 : yypt+1]
-//line generated_parser.y:542
 		{
-			yyVAL.labels = labels.New(yyDollar[2].labels...)
+			yyVAL.labels = labels.New(yyDollar[2].lblList...)
 		}
-	case 110:
+	case 127:
 		yyDollar = yyS[yypt-4 : yypt+1]
-//line generated_parser.y:544
 		{
-			yyVAL.labels = labels.New(yyDollar[2].labels...)
+			yyVAL.labels = labels.New(yyDollar[2].lblList...)
 		}
-	case 111:
+	case 128:
 		yyDollar = yyS[yypt-2 : yypt+1]
-//line generated_parser.y:546
 		{
 			yyVAL.labels = labels.New()
 		}
-	case 112:
+	case 129:
 		yyDollar = yyS[yypt-0 : yypt+1]
-//line generated_parser.y:548
 		{
 			yyVAL.labels = labels.New()
 		}
-	case 113:
+	case 130:
 		yyDollar = yyS[yypt-3 : yypt+1]
-//line generated_parser.y:552
 		{
-			yyVAL.labels = append(yyDollar[1].labels, yyDollar[3].label)
+			yyVAL.lblList = append(yyDollar[1].lblList, yyDollar[3].label)
 		}
-	case 114:
+	case 131:
 		yyDollar = yyS[yypt-1 : yypt+1]
-//line generated_parser.y:554
 		{
-			yyVAL.labels = []labels.Label{yyDollar[1].label}
+			yyVAL.lblList = []labels.Label{yyDollar[1].label}
 		}
-	case 115:
+	case 132:
 		yyDollar = yyS[yypt-2 : yypt+1]
-//line generated_parser.y:556
 		{
 			yylex.(*parser).unexpected("label set", "\",\" or \"}\"")
-			yyVAL.labels = yyDollar[1].labels
+			yyVAL.lblList = yyDollar[1].lblList
+		}
+	case 133:
+		yyDollar = yyS[yypt-3 : yypt+1]
+		{
+			yyVAL.label = labels.Label{Name: yyDollar[1].item.Val, Value: yylex.(*parser).unquoteString(yyDollar[3].item.Val)}
 		}
-	case 116:
+	case 134:
 		yyDollar = yyS[yypt-3 : yypt+1]
-//line generated_parser.y:561
 		{
 			yyVAL.label = labels.Label{Name: yyDollar[1].item.Val, Value: yylex.(*parser).unquoteString(yyDollar[3].item.Val)}
 		}
-	case 117:
+	case 135:
+		yyDollar = yyS[yypt-1 : yypt+1]
+		{
+			yyVAL.label = labels.Label{Name: labels.MetricName, Value: yyDollar[1].item.Val}
+		}
+	case 136:
 		yyDollar = yyS[yypt-3 : yypt+1]
-//line generated_parser.y:563
 		{
 			yylex.(*parser).unexpected("label set", "string")
 			yyVAL.label = labels.Label{}
 		}
-	case 118:
+	case 137:
+		yyDollar = yyS[yypt-3 : yypt+1]
+		{
+			yylex.(*parser).unexpected("label set", "string")
+			yyVAL.label = labels.Label{}
+		}
+	case 138:
 		yyDollar = yyS[yypt-2 : yypt+1]
-//line generated_parser.y:565
 		{
 			yylex.(*parser).unexpected("label set", "\"=\"")
 			yyVAL.label = labels.Label{}
 		}
-	case 119:
+	case 139:
 		yyDollar = yyS[yypt-1 : yypt+1]
-//line generated_parser.y:567
 		{
 			yylex.(*parser).unexpected("label set", "identifier or \"}\"")
 			yyVAL.label = labels.Label{}
 		}
-	case 120:
+	case 140:
 		yyDollar = yyS[yypt-2 : yypt+1]
-//line generated_parser.y:575
 		{
 			yylex.(*parser).generatedParserResult = &seriesDescription{
 				labels: yyDollar[1].labels,
 				values: yyDollar[2].series,
 			}
 		}
-	case 121:
+	case 141:
 		yyDollar = yyS[yypt-0 : yypt+1]
-//line generated_parser.y:584
 		{
 			yyVAL.series = []SequenceValue{}
 		}
-	case 122:
+	case 142:
 		yyDollar = yyS[yypt-3 : yypt+1]
-//line generated_parser.y:586
 		{
 			yyVAL.series = append(yyDollar[1].series, yyDollar[3].series...)
 		}
-	case 123:
+	case 143:
 		yyDollar = yyS[yypt-2 : yypt+1]
-//line generated_parser.y:588
 		{
 			yyVAL.series = yyDollar[1].series
 		}
-	case 124:
+	case 144:
 		yyDollar = yyS[yypt-1 : yypt+1]
-//line generated_parser.y:590
 		{
 			yylex.(*parser).unexpected("series values", "")
 			yyVAL.series = nil
 		}
-	case 125:
+	case 145:
 		yyDollar = yyS[yypt-1 : yypt+1]
-//line generated_parser.y:594
 		{
 			yyVAL.series = []SequenceValue{{Omitted: true}}
 		}
-	case 126:
+	case 146:
 		yyDollar = yyS[yypt-3 : yypt+1]
-//line generated_parser.y:596
 		{
 			yyVAL.series = []SequenceValue{}
 			for i := uint64(0); i < yyDollar[3].uint; i++ {
 				yyVAL.series = append(yyVAL.series, SequenceValue{Omitted: true})
 			}
 		}
-	case 127:
+	case 147:
 		yyDollar = yyS[yypt-1 : yypt+1]
-//line generated_parser.y:603
 		{
 			yyVAL.series = []SequenceValue{{Value: yyDollar[1].float}}
 		}
-	case 128:
+	case 148:
 		yyDollar = yyS[yypt-3 : yypt+1]
-//line generated_parser.y:605
 		{
 			yyVAL.series = []SequenceValue{}
+			// Add an additional value for time 0, which we ignore in tests.
 			for i := uint64(0); i <= yyDollar[3].uint; i++ {
 				yyVAL.series = append(yyVAL.series, SequenceValue{Value: yyDollar[1].float})
 			}
 		}
-	case 129:
+	case 149:
 		yyDollar = yyS[yypt-4 : yypt+1]
-//line generated_parser.y:612
 		{
 			yyVAL.series = []SequenceValue{}
+			// Add an additional value for time 0, which we ignore in tests.
 			for i := uint64(0); i <= yyDollar[4].uint; i++ {
 				yyVAL.series = append(yyVAL.series, SequenceValue{Value: yyDollar[1].float})
 				yyDollar[1].float += yyDollar[2].float
 			}
 		}
-	case 130:
+	case 150:
+		yyDollar = yyS[yypt-1 : yypt+1]
+		{
+			yyVAL.series = []SequenceValue{{Histogram: yyDollar[1].histogram}}
+		}
+	case 151:
+		yyDollar = yyS[yypt-3 : yypt+1]
+		{
+			yyVAL.series = []SequenceValue{}
+			// Add an additional value for time 0, which we ignore in tests.
+			for i := uint64(0); i <= yyDollar[3].uint; i++ {
+				yyVAL.series = append(yyVAL.series, SequenceValue{Histogram: yyDollar[1].histogram})
+				//$1 += $2
+			}
+		}
+	case 152:
+		yyDollar = yyS[yypt-5 : yypt+1]
+		{
+			val, err := yylex.(*parser).histogramsIncreaseSeries(yyDollar[1].histogram, yyDollar[3].histogram, yyDollar[5].uint)
+			if err != nil {
+				yylex.(*parser).addSemanticError(err)
+			}
+			yyVAL.series = val
+		}
+	case 153:
+		yyDollar = yyS[yypt-5 : yypt+1]
+		{
+			val, err := yylex.(*parser).histogramsDecreaseSeries(yyDollar[1].histogram, yyDollar[3].histogram, yyDollar[5].uint)
+			if err != nil {
+				yylex.(*parser).addSemanticError(err)
+			}
+			yyVAL.series = val
+		}
+	case 154:
 		yyDollar = yyS[yypt-1 : yypt+1]
-//line generated_parser.y:622
 		{
 			if yyDollar[1].item.Val != "stale" {
 				yylex.(*parser).unexpected("series values", "number or \"stale\"")
 			}
 			yyVAL.float = math.Float64frombits(value.StaleNaN)
 		}
+	case 157:
+		yyDollar = yyS[yypt-4 : yypt+1]
+		{
+			yyVAL.histogram = yylex.(*parser).buildHistogramFromMap(&yyDollar[2].descriptors)
+		}
+	case 158:
+		yyDollar = yyS[yypt-3 : yypt+1]
+		{
+			yyVAL.histogram = yylex.(*parser).buildHistogramFromMap(&yyDollar[2].descriptors)
+		}
+	case 159:
+		yyDollar = yyS[yypt-3 : yypt+1]
+		{
+			m := yylex.(*parser).newMap()
+			yyVAL.histogram = yylex.(*parser).buildHistogramFromMap(&m)
+		}
+	case 160:
+		yyDollar = yyS[yypt-2 : yypt+1]
+		{
+			m := yylex.(*parser).newMap()
+			yyVAL.histogram = yylex.(*parser).buildHistogramFromMap(&m)
+		}
+	case 161:
+		yyDollar = yyS[yypt-3 : yypt+1]
+		{
+			yyVAL.descriptors = *(yylex.(*parser).mergeMaps(&yyDollar[1].descriptors, &yyDollar[3].descriptors))
+		}
+	case 162:
+		yyDollar = yyS[yypt-1 : yypt+1]
+		{
+			yyVAL.descriptors = yyDollar[1].descriptors
+		}
+	case 163:
+		yyDollar = yyS[yypt-2 : yypt+1]
+		{
+			yylex.(*parser).unexpected("histogram description", "histogram description key, e.g. buckets:[5 10 7]")
+		}
+	case 164:
+		yyDollar = yyS[yypt-3 : yypt+1]
+		{
+			yyVAL.descriptors = yylex.(*parser).newMap()
+			yyVAL.descriptors["schema"] = yyDollar[3].int
+		}
+	case 165:
+		yyDollar = yyS[yypt-3 : yypt+1]
+		{
+			yyVAL.descriptors = yylex.(*parser).newMap()
+			yyVAL.descriptors["sum"] = yyDollar[3].float
+		}
+	case 166:
+		yyDollar = yyS[yypt-3 : yypt+1]
+		{
+			yyVAL.descriptors = yylex.(*parser).newMap()
+			yyVAL.descriptors["count"] = yyDollar[3].float
+		}
+	case 167:
+		yyDollar = yyS[yypt-3 : yypt+1]
+		{
+			yyVAL.descriptors = yylex.(*parser).newMap()
+			yyVAL.descriptors["z_bucket"] = yyDollar[3].float
+		}
+	case 168:
+		yyDollar = yyS[yypt-3 : yypt+1]
+		{
+			yyVAL.descriptors = yylex.(*parser).newMap()
+			yyVAL.descriptors["z_bucket_w"] = yyDollar[3].float
+		}
+	case 169:
+		yyDollar = yyS[yypt-3 : yypt+1]
+		{
+			yyVAL.descriptors = yylex.(*parser).newMap()
+			yyVAL.descriptors["custom_values"] = yyDollar[3].bucket_set
+		}
+	case 170:
+		yyDollar = yyS[yypt-3 : yypt+1]
+		{
+			yyVAL.descriptors = yylex.(*parser).newMap()
+			yyVAL.descriptors["buckets"] = yyDollar[3].bucket_set
+		}
+	case 171:
+		yyDollar = yyS[yypt-3 : yypt+1]
+		{
+			yyVAL.descriptors = yylex.(*parser).newMap()
+			yyVAL.descriptors["offset"] = yyDollar[3].int
+		}
+	case 172:
+		yyDollar = yyS[yypt-3 : yypt+1]
+		{
+			yyVAL.descriptors = yylex.(*parser).newMap()
+			yyVAL.descriptors["n_buckets"] = yyDollar[3].bucket_set
+		}
+	case 173:
+		yyDollar = yyS[yypt-3 : yypt+1]
+		{
+			yyVAL.descriptors = yylex.(*parser).newMap()
+			yyVAL.descriptors["n_offset"] = yyDollar[3].int
+		}
+	case 174:
+		yyDollar = yyS[yypt-3 : yypt+1]
+		{
+			yyVAL.descriptors = yylex.(*parser).newMap()
+			yyVAL.descriptors["counter_reset_hint"] = yyDollar[3].item
+		}
 	case 175:
+		yyDollar = yyS[yypt-4 : yypt+1]
+		{
+			yyVAL.bucket_set = yyDollar[2].bucket_set
+		}
+	case 176:
+		yyDollar = yyS[yypt-3 : yypt+1]
+		{
+			yyVAL.bucket_set = yyDollar[2].bucket_set
+		}
+	case 177:
+		yyDollar = yyS[yypt-3 : yypt+1]
+		{
+			yyVAL.bucket_set = append(yyDollar[1].bucket_set, yyDollar[3].float)
+		}
+	case 178:
+		yyDollar = yyS[yypt-1 : yypt+1]
+		{
+			yyVAL.bucket_set = []float64{yyDollar[1].float}
+		}
+	case 233:
 		yyDollar = yyS[yypt-1 : yypt+1]
-//line generated_parser.y:653
 		{
 			yyVAL.node = &NumberLiteral{
 				Val:      yylex.(*parser).number(yyDollar[1].item.Val),
 				PosRange: yyDollar[1].item.PositionRange(),
 			}
 		}
-	case 176:
+	case 234:
+		yyDollar = yyS[yypt-1 : yypt+1]
+		{
+			var err error
+			var dur time.Duration
+			dur, err = parseDuration(yyDollar[1].item.Val)
+			if err != nil {
+				yylex.(*parser).addParseErr(yyDollar[1].item.PositionRange(), err)
+			}
+			yyVAL.node = &NumberLiteral{
+				Val:      dur.Seconds(),
+				PosRange: yyDollar[1].item.PositionRange(),
+			}
+		}
+	case 235:
 		yyDollar = yyS[yypt-1 : yypt+1]
-//line generated_parser.y:661
 		{
 			yyVAL.float = yylex.(*parser).number(yyDollar[1].item.Val)
 		}
-	case 177:
+	case 236:
+		yyDollar = yyS[yypt-1 : yypt+1]
+		{
+			var err error
+			var dur time.Duration
+			dur, err = parseDuration(yyDollar[1].item.Val)
+			if err != nil {
+				yylex.(*parser).addParseErr(yyDollar[1].item.PositionRange(), err)
+			}
+			yyVAL.float = dur.Seconds()
+		}
+	case 237:
 		yyDollar = yyS[yypt-2 : yypt+1]
-//line generated_parser.y:663
 		{
 			yyVAL.float = yyDollar[2].float
 		}
-	case 178:
+	case 238:
 		yyDollar = yyS[yypt-2 : yypt+1]
-//line generated_parser.y:664
 		{
 			yyVAL.float = -yyDollar[2].float
 		}
-	case 179:
+	case 241:
 		yyDollar = yyS[yypt-1 : yypt+1]
-//line generated_parser.y:668
 		{
 			var err error
 			yyVAL.uint, err = strconv.ParseUint(yyDollar[1].item.Val, 10, 64)
@@ -1615,34 +1939,35 @@ yydefault:
 				yylex.(*parser).addParseErrf(yyDollar[1].item.PositionRange(), "invalid repetition in series values: %s", err)
 			}
 		}
-	case 180:
+	case 242:
+		yyDollar = yyS[yypt-2 : yypt+1]
+		{
+			yyVAL.int = -int64(yyDollar[2].uint)
+		}
+	case 243:
 		yyDollar = yyS[yypt-1 : yypt+1]
-//line generated_parser.y:678
 		{
-			var err error
-			yyVAL.duration, err = parseDuration(yyDollar[1].item.Val)
-			if err != nil {
-				yylex.(*parser).addParseErr(yyDollar[1].item.PositionRange(), err)
-			}
+			yyVAL.int = int64(yyDollar[1].uint)
 		}
-	case 181:
+	case 244:
 		yyDollar = yyS[yypt-1 : yypt+1]
-//line generated_parser.y:689
 		{
 			yyVAL.node = &StringLiteral{
 				Val:      yylex.(*parser).unquoteString(yyDollar[1].item.Val),
 				PosRange: yyDollar[1].item.PositionRange(),
 			}
 		}
-	case 182:
-		yyDollar = yyS[yypt-0 : yypt+1]
-//line generated_parser.y:702
+	case 245:
+		yyDollar = yyS[yypt-1 : yypt+1]
 		{
-			yyVAL.duration = 0
+			yyVAL.item = Item{
+				Typ: METRIC_IDENTIFIER,
+				Pos: yyDollar[1].item.PositionRange().Start,
+				Val: yylex.(*parser).unquoteString(yyDollar[1].item.Val),
+			}
 		}
-	case 184:
+	case 246:
 		yyDollar = yyS[yypt-0 : yypt+1]
-//line generated_parser.y:706
 		{
 			yyVAL.strings = nil
 		}
diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/lex.go b/vendor/github.com/prometheus/prometheus/promql/parser/lex.go
index ada5d70d1612ad41a7cf0e3eef426fcf5c673682..52658f318c51f62f90a60344e30a576416743d20 100644
--- a/vendor/github.com/prometheus/prometheus/promql/parser/lex.go
+++ b/vendor/github.com/prometheus/prometheus/promql/parser/lex.go
@@ -18,13 +18,15 @@ import (
 	"strings"
 	"unicode"
 	"unicode/utf8"
+
+	"github.com/prometheus/prometheus/promql/parser/posrange"
 )
 
 // Item represents a token or text string returned from the scanner.
 type Item struct {
-	Typ ItemType // The type of this Item.
-	Pos Pos      // The starting position, in bytes, of this Item in the input string.
-	Val string   // The value of this Item.
+	Typ ItemType     // The type of this Item.
+	Pos posrange.Pos // The starting position, in bytes, of this Item in the input string.
+	Val string       // The value of this Item.
 }
 
 // String returns a descriptive string for the Item.
@@ -48,18 +50,28 @@ func (i Item) String() string {
 	return fmt.Sprintf("%q", i.Val)
 }
 
+// Pretty returns the prettified form of an item.
+// This is same as the item's stringified format.
+func (i Item) Pretty(int) string { return i.String() }
+
 // IsOperator returns true if the Item corresponds to a arithmetic or set operator.
 // Returns false otherwise.
 func (i ItemType) IsOperator() bool { return i > operatorsStart && i < operatorsEnd }
 
 // IsAggregator returns true if the Item belongs to the aggregator functions.
-// Returns false otherwise
+// Returns false otherwise.
 func (i ItemType) IsAggregator() bool { return i > aggregatorsStart && i < aggregatorsEnd }
 
 // IsAggregatorWithParam returns true if the Item is an aggregator that takes a parameter.
-// Returns false otherwise
+// Returns false otherwise.
 func (i ItemType) IsAggregatorWithParam() bool {
-	return i == TOPK || i == BOTTOMK || i == COUNT_VALUES || i == QUANTILE
+	return i == TOPK || i == BOTTOMK || i == COUNT_VALUES || i == QUANTILE || i == LIMITK || i == LIMIT_RATIO
+}
+
+// IsExperimentalAggregator defines the experimental aggregation functions that are controlled
+// with EnableExperimentalFunctions.
+func (i ItemType) IsExperimentalAggregator() bool {
+	return i == LIMITK || i == LIMIT_RATIO
 }
 
 // IsKeyword returns true if the Item corresponds to a keyword.
@@ -97,6 +109,7 @@ var key = map[string]ItemType{
 	"and":    LAND,
 	"or":     LOR,
 	"unless": LUNLESS,
+	"atan2":  ATAN2,
 
 	// Aggregators.
 	"sum":          SUM,
@@ -111,6 +124,8 @@ var key = map[string]ItemType{
 	"bottomk":      BOTTOMK,
 	"count_values": COUNT_VALUES,
 	"quantile":     QUANTILE,
+	"limitk":       LIMITK,
+	"limit_ratio":  LIMIT_RATIO,
 
 	// Keywords.
 	"offset":      OFFSET,
@@ -121,11 +136,38 @@ var key = map[string]ItemType{
 	"group_left":  GROUP_LEFT,
 	"group_right": GROUP_RIGHT,
 	"bool":        BOOL,
+
+	// Preprocessors.
+	"start": START,
+	"end":   END,
+}
+
+var histogramDesc = map[string]ItemType{
+	"sum":                SUM_DESC,
+	"count":              COUNT_DESC,
+	"schema":             SCHEMA_DESC,
+	"offset":             OFFSET_DESC,
+	"n_offset":           NEGATIVE_OFFSET_DESC,
+	"buckets":            BUCKETS_DESC,
+	"n_buckets":          NEGATIVE_BUCKETS_DESC,
+	"z_bucket":           ZERO_BUCKET_DESC,
+	"z_bucket_w":         ZERO_BUCKET_WIDTH_DESC,
+	"custom_values":      CUSTOM_VALUES_DESC,
+	"counter_reset_hint": COUNTER_RESET_HINT_DESC,
+}
+
+var counterResetHints = map[string]ItemType{
+	"unknown":   UNKNOWN_COUNTER_RESET,
+	"reset":     COUNTER_RESET,
+	"not_reset": NOT_COUNTER_RESET,
+	"gauge":     GAUGE_TYPE,
 }
 
 // ItemTypeStr is the default string representations for common Items. It does not
 // imply that those are the only character sequences that can be lexed to such an Item.
 var ItemTypeStr = map[ItemType]string{
+	OPEN_HIST:     "{{",
+	CLOSE_HIST:    "}}",
 	LEFT_PAREN:    "(",
 	RIGHT_PAREN:   ")",
 	LEFT_BRACE:    "{",
@@ -210,20 +252,26 @@ const eof = -1
 // stateFn represents the state of the scanner as a function that returns the next state.
 type stateFn func(*Lexer) stateFn
 
-// Pos is the position in a string.
-// Negative numbers indicate undefined positions.
-type Pos int
+type histogramState int
+
+const (
+	histogramStateNone histogramState = iota
+	histogramStateOpen
+	histogramStateMul
+	histogramStateAdd
+	histogramStateSub
+)
 
 // Lexer holds the state of the scanner.
 type Lexer struct {
-	input       string  // The string being scanned.
-	state       stateFn // The next lexing function to enter.
-	pos         Pos     // Current position in the input.
-	start       Pos     // Start position of this Item.
-	width       Pos     // Width of last rune read from input.
-	lastPos     Pos     // Position of most recent Item returned by NextItem.
-	itemp       *Item   // Pointer to where the next scanned item should be placed.
-	scannedItem bool    // Set to true every time an item is scanned.
+	input       string       // The string being scanned.
+	state       stateFn      // The next lexing function to enter.
+	pos         posrange.Pos // Current position in the input.
+	start       posrange.Pos // Start position of this Item.
+	width       posrange.Pos // Width of last rune read from input.
+	lastPos     posrange.Pos // Position of most recent Item returned by NextItem.
+	itemp       *Item        // Pointer to where the next scanned item should be placed.
+	scannedItem bool         // Set to true every time an item is scanned.
 
 	parenDepth  int  // Nesting depth of ( ) exprs.
 	braceOpen   bool // Whether a { is opened.
@@ -231,9 +279,10 @@ type Lexer struct {
 	gotColon    bool // Whether we got a ':' after [ was opened.
 	stringOpen  rune // Quote rune of the string currently being read.
 
-	// seriesDesc is set when a series description for the testing
-	// language is lexed.
-	seriesDesc bool
+	// series description variables for internal PromQL testing framework as well as in promtool rules unit tests.
+	// see https://prometheus.io/docs/prometheus/latest/configuration/unit_testing_rules/#series
+	seriesDesc     bool           // Whether we are lexing a series description.
+	histogramState histogramState // Determines whether or not inside of a histogram description.
 }
 
 // next returns the next rune in the input.
@@ -243,7 +292,7 @@ func (l *Lexer) next() rune {
 		return eof
 	}
 	r, w := utf8.DecodeRuneInString(l.input[l.pos:])
-	l.width = Pos(w)
+	l.width = posrange.Pos(w)
 	l.pos += l.width
 	return r
 }
@@ -281,10 +330,15 @@ func (l *Lexer) accept(valid string) bool {
 	return false
 }
 
+// is peeks and returns true if the next rune is contained in the provided string.
+func (l *Lexer) is(valid string) bool {
+	return strings.ContainsRune(valid, l.peek())
+}
+
 // acceptRun consumes a run of runes from the valid set.
 func (l *Lexer) acceptRun(valid string) {
 	for strings.ContainsRune(valid, l.next()) {
-		// consume
+		// Consume.
 	}
 	l.backup()
 }
@@ -328,6 +382,9 @@ const lineComment = "#"
 
 // lexStatements is the top-level state for lexing.
 func lexStatements(l *Lexer) stateFn {
+	if l.histogramState != histogramStateNone {
+		return lexHistogram
+	}
 	if l.braceOpen {
 		return lexInsideBraces
 	}
@@ -337,9 +394,10 @@ func lexStatements(l *Lexer) stateFn {
 
 	switch r := l.next(); {
 	case r == eof:
-		if l.parenDepth != 0 {
+		switch {
+		case l.parenDepth != 0:
 			return l.errorf("unclosed left parenthesis")
-		} else if l.bracketOpen {
+		case l.bracketOpen:
 			return l.errorf("unclosed left bracket")
 		}
 		l.emit(EOF)
@@ -361,20 +419,20 @@ func lexStatements(l *Lexer) stateFn {
 	case r == '^':
 		l.emit(POW)
 	case r == '=':
-		if t := l.peek(); t == '=' {
+		switch t := l.peek(); t {
+		case '=':
 			l.next()
 			l.emit(EQLC)
-		} else if t == '~' {
+		case '~':
 			return l.errorf("unexpected character after '=': %q", t)
-		} else {
+		default:
 			l.emit(EQL)
 		}
 	case r == '!':
-		if t := l.next(); t == '=' {
-			l.emit(NEQ)
-		} else {
+		if t := l.next(); t != '=' {
 			return l.errorf("unexpected character after '!': %q", t)
 		}
+		l.emit(NEQ)
 	case r == '<':
 		if t := l.peek(); t == '=' {
 			l.next()
@@ -433,20 +491,155 @@ func lexStatements(l *Lexer) stateFn {
 			skipSpaces(l)
 		}
 		l.bracketOpen = true
-		return lexDuration
+		return lexNumberOrDuration
 	case r == ']':
 		if !l.bracketOpen {
 			return l.errorf("unexpected right bracket %q", r)
 		}
 		l.emit(RIGHT_BRACKET)
 		l.bracketOpen = false
-
+	case r == '@':
+		l.emit(AT)
 	default:
 		return l.errorf("unexpected character: %q", r)
 	}
 	return lexStatements
 }
 
+func lexHistogram(l *Lexer) stateFn {
+	switch l.histogramState {
+	case histogramStateMul:
+		l.histogramState = histogramStateNone
+		l.next()
+		l.emit(TIMES)
+		return lexNumber
+	case histogramStateAdd:
+		l.histogramState = histogramStateNone
+		l.next()
+		l.emit(ADD)
+		return lexValueSequence
+	case histogramStateSub:
+		l.histogramState = histogramStateNone
+		l.next()
+		l.emit(SUB)
+		return lexValueSequence
+	}
+
+	if l.bracketOpen {
+		return lexBuckets
+	}
+	switch r := l.next(); {
+	case isSpace(r):
+		l.emit(SPACE)
+		return lexSpace
+	case isAlpha(r):
+		l.backup()
+		return lexHistogramDescriptor
+	case r == ':':
+		l.emit(COLON)
+		return lexHistogram
+	case r == '-':
+		l.emit(SUB)
+		return lexHistogram
+	case r == 'x':
+		l.emit(TIMES)
+		return lexNumber
+	case isDigit(r):
+		l.backup()
+		return lexNumber
+	case r == '[':
+		l.bracketOpen = true
+		l.emit(LEFT_BRACKET)
+		return lexBuckets
+	case r == '}' && l.peek() == '}':
+		l.next()
+		l.emit(CLOSE_HIST)
+		switch l.peek() {
+		case 'x':
+			l.histogramState = histogramStateMul
+			return lexHistogram
+		case '+':
+			l.histogramState = histogramStateAdd
+			return lexHistogram
+		case '-':
+			l.histogramState = histogramStateSub
+			return lexHistogram
+		default:
+			l.histogramState = histogramStateNone
+			return lexValueSequence
+		}
+	default:
+		return l.errorf("histogram description incomplete unexpected: %q", r)
+	}
+}
+
+func lexHistogramDescriptor(l *Lexer) stateFn {
+Loop:
+	for {
+		switch r := l.next(); {
+		case isAlpha(r):
+			// absorb.
+		default:
+			l.backup()
+
+			word := l.input[l.start:l.pos]
+			if desc, ok := histogramDesc[strings.ToLower(word)]; ok {
+				if l.peek() == ':' {
+					l.emit(desc)
+					return lexHistogram
+				}
+				l.errorf("missing `:` for histogram descriptor")
+				break Loop
+			}
+			// Current word is Inf or NaN.
+			if desc, ok := key[strings.ToLower(word)]; ok {
+				if desc == NUMBER {
+					l.emit(desc)
+					return lexHistogram
+				}
+			}
+			if desc, ok := counterResetHints[strings.ToLower(word)]; ok {
+				l.emit(desc)
+				return lexHistogram
+			}
+
+			l.errorf("bad histogram descriptor found: %q", word)
+			break Loop
+		}
+	}
+	return lexStatements
+}
+
+func lexBuckets(l *Lexer) stateFn {
+	switch r := l.next(); {
+	case isSpace(r):
+		l.emit(SPACE)
+		return lexSpace
+	case r == '-':
+		l.emit(SUB)
+		return lexNumber
+	case isDigit(r):
+		l.backup()
+		return lexNumber
+	case r == ']':
+		l.bracketOpen = false
+		l.emit(RIGHT_BRACKET)
+		return lexHistogram
+	case isAlpha(r):
+		// Current word is Inf or NaN.
+		word := l.input[l.start:l.pos]
+		if desc, ok := key[strings.ToLower(word)]; ok {
+			if desc == NUMBER {
+				l.emit(desc)
+				return lexStatements
+			}
+		}
+		return lexBuckets
+	default:
+		return l.errorf("invalid character in buckets description: %q", r)
+	}
+}
+
 // lexInsideBraces scans the inside of a vector selector. Keywords are ignored and
 // scanned as identifiers.
 func lexInsideBraces(l *Lexer) stateFn {
@@ -504,9 +697,20 @@ func lexInsideBraces(l *Lexer) stateFn {
 
 // lexValueSequence scans a value sequence of a series description.
 func lexValueSequence(l *Lexer) stateFn {
+	if l.histogramState != histogramStateNone {
+		return lexHistogram
+	}
 	switch r := l.next(); {
 	case r == eof:
 		return lexStatements
+	case r == '{' && l.peek() == '{':
+		if l.histogramState != histogramStateNone {
+			return l.errorf("unexpected histogram opening {{")
+		}
+		l.histogramState = histogramStateOpen
+		l.next()
+		l.emit(OPEN_HIST)
+		return lexHistogram
 	case isSpace(r):
 		l.emit(SPACE)
 		lexSpace(l)
@@ -541,23 +745,23 @@ func lexValueSequence(l *Lexer) stateFn {
 // was only modified to integrate with our lexer.
 func lexEscape(l *Lexer) stateFn {
 	var n int
-	var base, max uint32
+	var base, maxVal uint32
 
 	ch := l.next()
 	switch ch {
 	case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', l.stringOpen:
 		return lexString
 	case '0', '1', '2', '3', '4', '5', '6', '7':
-		n, base, max = 3, 8, 255
+		n, base, maxVal = 3, 8, 255
 	case 'x':
 		ch = l.next()
-		n, base, max = 2, 16, 255
+		n, base, maxVal = 2, 16, 255
 	case 'u':
 		ch = l.next()
-		n, base, max = 4, 16, unicode.MaxRune
+		n, base, maxVal = 4, 16, unicode.MaxRune
 	case 'U':
 		ch = l.next()
-		n, base, max = 8, 16, unicode.MaxRune
+		n, base, maxVal = 8, 16, unicode.MaxRune
 	case eof:
 		l.errorf("escape sequence not terminated")
 		return lexString
@@ -578,11 +782,15 @@ func lexEscape(l *Lexer) stateFn {
 			return lexString
 		}
 		x = x*base + d
-		ch = l.next()
 		n--
+
+		// Don't seek after last rune.
+		if n > 0 {
+			ch = l.next()
+		}
 	}
 
-	if x > max || 0xD800 <= x && x < 0xE000 {
+	if x > maxVal || 0xD800 <= x && x < 0xE000 {
 		l.errorf("escape sequence is an invalid Unicode code point")
 	}
 	return lexString
@@ -660,7 +868,7 @@ func lexSpace(l *Lexer) stateFn {
 
 // lexLineComment scans a line comment. Left comment marker is known to be present.
 func lexLineComment(l *Lexer) stateFn {
-	l.pos += Pos(len(lineComment))
+	l.pos += posrange.Pos(len(lineComment))
 	for r := l.next(); !isEndOfLine(r) && r != eof; {
 		r = l.next()
 	}
@@ -669,18 +877,6 @@ func lexLineComment(l *Lexer) stateFn {
 	return lexStatements
 }
 
-func lexDuration(l *Lexer) stateFn {
-	if l.scanNumber() {
-		return l.errorf("missing unit character in duration")
-	}
-	if !acceptRemainingDuration(l) {
-		return l.errorf("bad duration syntax: %q", l.input[l.start:l.pos])
-	}
-	l.backup()
-	l.emit(DURATION)
-	return lexStatements
-}
-
 // lexNumber scans a number: decimal, hex, oct or float.
 func lexNumber(l *Lexer) stateFn {
 	if !l.scanNumber() {
@@ -732,18 +928,81 @@ func acceptRemainingDuration(l *Lexer) bool {
 // scanNumber scans numbers of different formats. The scanned Item is
 // not necessarily a valid number. This case is caught by the parser.
 func (l *Lexer) scanNumber() bool {
-	digits := "0123456789"
+	initialPos := l.pos
+	// Modify the digit pattern if the number is hexadecimal.
+	digitPattern := "0123456789"
 	// Disallow hexadecimal in series descriptions as the syntax is ambiguous.
-	if !l.seriesDesc && l.accept("0") && l.accept("xX") {
-		digits = "0123456789abcdefABCDEF"
+	if !l.seriesDesc &&
+		l.accept("0") && l.accept("xX") {
+		l.accept("_") // eg., 0X_1FFFP-16 == 0.1249847412109375
+		digitPattern = "0123456789abcdefABCDEF"
 	}
-	l.acceptRun(digits)
-	if l.accept(".") {
-		l.acceptRun(digits)
+	const (
+		// Define dot, exponent, and underscore patterns.
+		dotPattern        = "."
+		exponentPattern   = "eE"
+		underscorePattern = "_"
+		// Anti-patterns are rune sets that cannot follow their respective rune.
+		dotAntiPattern        = "_."
+		exponentAntiPattern   = "._eE" // and EOL.
+		underscoreAntiPattern = "._eE" // and EOL.
+	)
+	// All numbers follow the prefix: [.][d][d._eE]*
+	l.accept(dotPattern)
+	l.accept(digitPattern)
+	// [d._eE]* hereon.
+	dotConsumed := false
+	exponentConsumed := false
+	for l.is(digitPattern + dotPattern + underscorePattern + exponentPattern) {
+		// "." cannot repeat.
+		if l.is(dotPattern) {
+			if dotConsumed {
+				l.accept(dotPattern)
+				return false
+			}
+		}
+		// "eE" cannot repeat.
+		if l.is(exponentPattern) {
+			if exponentConsumed {
+				l.accept(exponentPattern)
+				return false
+			}
+		}
+		// Handle dots.
+		if l.accept(dotPattern) {
+			dotConsumed = true
+			if l.accept(dotAntiPattern) {
+				return false
+			}
+			// Fractional hexadecimal literals are not allowed.
+			if len(digitPattern) > 10 /* 0x[\da-fA-F].[\d]+p[\d] */ {
+				return false
+			}
+			continue
+		}
+		// Handle exponents.
+		if l.accept(exponentPattern) {
+			exponentConsumed = true
+			l.accept("+-")
+			if l.accept(exponentAntiPattern) || l.peek() == eof {
+				return false
+			}
+			continue
+		}
+		// Handle underscores.
+		if l.accept(underscorePattern) {
+			if l.accept(underscoreAntiPattern) || l.peek() == eof {
+				return false
+			}
+
+			continue
+		}
+		// Handle digits at the end since we already consumed before this loop.
+		l.acceptRun(digitPattern)
 	}
-	if l.accept("eE") {
-		l.accept("+-")
-		l.acceptRun("0123456789")
+	// Empty string is not a valid number.
+	if l.pos == initialPos {
+		return false
 	}
 	// Next thing must not be alphanumeric unless it's the times token
 	// for series repetitions.
@@ -776,11 +1035,12 @@ Loop:
 		default:
 			l.backup()
 			word := l.input[l.start:l.pos]
-			if kw, ok := key[strings.ToLower(word)]; ok {
+			switch kw, ok := key[strings.ToLower(word)]; {
+			case ok:
 				l.emit(kw)
-			} else if !strings.Contains(word, ":") {
+			case !strings.Contains(word, ":"):
 				l.emit(IDENTIFIER)
-			} else {
+			default:
 				l.emit(METRIC_IDENTIFIER)
 			}
 			break Loop
@@ -817,16 +1077,3 @@ func isDigit(r rune) bool {
 func isAlpha(r rune) bool {
 	return r == '_' || ('a' <= r && r <= 'z') || ('A' <= r && r <= 'Z')
 }
-
-// isLabel reports whether the string can be used as label.
-func isLabel(s string) bool {
-	if len(s) == 0 || !isAlpha(rune(s[0])) {
-		return false
-	}
-	for _, c := range s[1:] {
-		if !isAlphaNumeric(c) {
-			return false
-		}
-	}
-	return true
-}
diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/parse.go b/vendor/github.com/prometheus/prometheus/promql/parser/parse.go
index 50d69b21d27016d1da49b50816596726fd7535d6..5ace332d7186de698799945f83efc9108e70d05a 100644
--- a/vendor/github.com/prometheus/prometheus/promql/parser/parse.go
+++ b/vendor/github.com/prometheus/prometheus/promql/parser/parse.go
@@ -14,7 +14,9 @@
 package parser
 
 import (
+	"errors"
 	"fmt"
+	"math"
 	"os"
 	"runtime"
 	"strconv"
@@ -22,10 +24,12 @@ import (
 	"sync"
 	"time"
 
-	"github.com/pkg/errors"
 	"github.com/prometheus/common/model"
 
-	"github.com/prometheus/prometheus/pkg/labels"
+	"github.com/prometheus/prometheus/model/histogram"
+	"github.com/prometheus/prometheus/model/labels"
+	"github.com/prometheus/prometheus/model/timestamp"
+	"github.com/prometheus/prometheus/promql/parser/posrange"
 	"github.com/prometheus/prometheus/util/strutil"
 )
 
@@ -35,15 +39,23 @@ var parserPool = sync.Pool{
 	},
 }
 
+type Parser interface {
+	ParseExpr() (Expr, error)
+	Close()
+}
+
 type parser struct {
 	lex Lexer
 
 	inject    ItemType
 	injecting bool
 
+	// functions contains all functions supported by the parser instance.
+	functions map[string]*Function
+
 	// Everytime an Item is lexed that could be the end
 	// of certain expressions its end position is stored here.
-	lastClosing Pos
+	lastClosing posrange.Pos
 
 	yyParser yyParserImpl
 
@@ -51,9 +63,65 @@ type parser struct {
 	parseErrors           ParseErrors
 }
 
+type Opt func(p *parser)
+
+func WithFunctions(functions map[string]*Function) Opt {
+	return func(p *parser) {
+		p.functions = functions
+	}
+}
+
+// NewParser returns a new parser.
+func NewParser(input string, opts ...Opt) *parser { //nolint:revive // unexported-return
+	p := parserPool.Get().(*parser)
+
+	p.functions = Functions
+	p.injecting = false
+	p.parseErrors = nil
+	p.generatedParserResult = nil
+
+	// Clear lexer struct before reusing.
+	p.lex = Lexer{
+		input: input,
+		state: lexStatements,
+	}
+
+	// Apply user define options.
+	for _, opt := range opts {
+		opt(p)
+	}
+
+	return p
+}
+
+func (p *parser) ParseExpr() (expr Expr, err error) {
+	defer p.recover(&err)
+
+	parseResult := p.parseGenerated(START_EXPRESSION)
+
+	if parseResult != nil {
+		expr = parseResult.(Expr)
+	}
+
+	// Only typecheck when there are no syntax errors.
+	if len(p.parseErrors) == 0 {
+		p.checkAST(expr)
+	}
+
+	if len(p.parseErrors) != 0 {
+		err = p.parseErrors
+	}
+
+	return expr, err
+}
+
+func (p *parser) Close() {
+	defer parserPool.Put(p)
+}
+
 // ParseErr wraps a parsing error with line and position context.
 type ParseErr struct {
-	PositionRange PositionRange
+	PositionRange posrange.PositionRange
 	Err           error
 	Query         string
 
@@ -62,27 +130,7 @@ type ParseErr struct {
 }
 
 func (e *ParseErr) Error() string {
-	pos := int(e.PositionRange.Start)
-	lastLineBreak := -1
-	line := e.LineOffset + 1
-
-	var positionStr string
-
-	if pos < 0 || pos > len(e.Query) {
-		positionStr = "invalid position:"
-	} else {
-
-		for i, c := range e.Query[:pos] {
-			if c == '\n' {
-				lastLineBreak = i
-				line++
-			}
-		}
-
-		col := pos - lastLineBreak
-		positionStr = fmt.Sprintf("%d:%d:", line, col)
-	}
-	return fmt.Sprintf("%s parse error: %s", positionStr, e.Err)
+	return fmt.Sprintf("%s: parse error: %s", e.PositionRange.StartPosInput(e.Query, e.LineOffset), e.Err)
 }
 
 type ParseErrors []ParseErr
@@ -101,34 +149,32 @@ func (errs ParseErrors) Error() string {
 	return "error contains no error message"
 }
 
-// ParseExpr returns the expression parsed from the input.
-func ParseExpr(input string) (expr Expr, err error) {
-	p := newParser(input)
-	defer parserPool.Put(p)
-	defer p.recover(&err)
-
-	parseResult := p.parseGenerated(START_EXPRESSION)
-
-	if parseResult != nil {
-		expr = parseResult.(Expr)
+// EnrichParseError enriches a single or list of parse errors (used for unit tests and promtool).
+func EnrichParseError(err error, enrich func(parseErr *ParseErr)) {
+	var parseErr *ParseErr
+	if errors.As(err, &parseErr) {
+		enrich(parseErr)
 	}
-
-	// Only typecheck when there are no syntax errors.
-	if len(p.parseErrors) == 0 {
-		p.checkAST(expr)
-	}
-
-	if len(p.parseErrors) != 0 {
-		err = p.parseErrors
+	var parseErrors ParseErrors
+	if errors.As(err, &parseErrors) {
+		for i, e := range parseErrors {
+			enrich(&e)
+			parseErrors[i] = e
+		}
 	}
+}
 
-	return expr, err
+// ParseExpr returns the expression parsed from the input.
+func ParseExpr(input string) (expr Expr, err error) {
+	p := NewParser(input)
+	defer p.Close()
+	return p.ParseExpr()
 }
 
-// ParseMetric parses the input into a metric
+// ParseMetric parses the input into a metric.
 func ParseMetric(input string) (m labels.Labels, err error) {
-	p := newParser(input)
-	defer parserPool.Put(p)
+	p := NewParser(input)
+	defer p.Close()
 	defer p.recover(&err)
 
 	parseResult := p.parseGenerated(START_METRIC)
@@ -146,8 +192,8 @@ func ParseMetric(input string) (m labels.Labels, err error) {
 // ParseMetricSelector parses the provided textual metric selector into a list of
 // label matchers.
 func ParseMetricSelector(input string) (m []*labels.Matcher, err error) {
-	p := newParser(input)
-	defer parserPool.Put(p)
+	p := NewParser(input)
+	defer p.Close()
 	defer p.recover(&err)
 
 	parseResult := p.parseGenerated(START_METRIC_SELECTOR)
@@ -162,32 +208,34 @@ func ParseMetricSelector(input string) (m []*labels.Matcher, err error) {
 	return m, err
 }
 
-// newParser returns a new parser.
-func newParser(input string) *parser {
-	p := parserPool.Get().(*parser)
-
-	p.injecting = false
-	p.parseErrors = nil
-	p.generatedParserResult = nil
-
-	// Clear lexer struct before reusing.
-	p.lex = Lexer{
-		input: input,
-		state: lexStatements,
+// ParseMetricSelectors parses a list of provided textual metric selectors into lists of
+// label matchers.
+func ParseMetricSelectors(matchers []string) (m [][]*labels.Matcher, err error) {
+	var matcherSets [][]*labels.Matcher
+	for _, s := range matchers {
+		matchers, err := ParseMetricSelector(s)
+		if err != nil {
+			return nil, err
+		}
+		matcherSets = append(matcherSets, matchers)
 	}
-	return p
+	return matcherSets, nil
 }
 
 // SequenceValue is an omittable value in a sequence of time series values.
 type SequenceValue struct {
-	Value   float64
-	Omitted bool
+	Value     float64
+	Omitted   bool
+	Histogram *histogram.FloatHistogram
 }
 
 func (v SequenceValue) String() string {
 	if v.Omitted {
 		return "_"
 	}
+	if v.Histogram != nil {
+		return v.Histogram.String()
+	}
 	return fmt.Sprintf("%f", v.Value)
 }
 
@@ -196,12 +244,13 @@ type seriesDescription struct {
 	values []SequenceValue
 }
 
-// ParseSeriesDesc parses the description of a time series.
+// ParseSeriesDesc parses the description of a time series. It is only used in
+// the PromQL testing framework code.
 func ParseSeriesDesc(input string) (labels labels.Labels, values []SequenceValue, err error) {
-	p := newParser(input)
+	p := NewParser(input)
 	p.lex.seriesDesc = true
 
-	defer parserPool.Put(p)
+	defer p.Close()
 	defer p.recover(&err)
 
 	parseResult := p.parseGenerated(START_SERIES_DESCRIPTION)
@@ -210,7 +259,6 @@ func ParseSeriesDesc(input string) (labels labels.Labels, values []SequenceValue
 
 		labels = result.labels
 		values = result.values
-
 	}
 
 	if len(p.parseErrors) != 0 {
@@ -221,12 +269,12 @@ func ParseSeriesDesc(input string) (labels labels.Labels, values []SequenceValue
 }
 
 // addParseErrf formats the error and appends it to the list of parsing errors.
-func (p *parser) addParseErrf(positionRange PositionRange, format string, args ...interface{}) {
-	p.addParseErr(positionRange, errors.Errorf(format, args...))
+func (p *parser) addParseErrf(positionRange posrange.PositionRange, format string, args ...interface{}) {
+	p.addParseErr(positionRange, fmt.Errorf(format, args...))
 }
 
 // addParseErr appends the provided error to the list of parsing errors.
-func (p *parser) addParseErr(positionRange PositionRange, err error) {
+func (p *parser) addParseErr(positionRange posrange.PositionRange, err error) {
 	perr := ParseErr{
 		PositionRange: positionRange,
 		Err:           err,
@@ -236,10 +284,14 @@ func (p *parser) addParseErr(positionRange PositionRange, err error) {
 	p.parseErrors = append(p.parseErrors, perr)
 }
 
+func (p *parser) addSemanticError(err error) {
+	p.addParseErr(p.yyParser.lval.item.PositionRange(), err)
+}
+
 // unexpected creates a parser error complaining about an unexpected lexer item.
 // The item that is presented as unexpected is always the last item produced
 // by the lexer.
-func (p *parser) unexpected(context string, expected string) {
+func (p *parser) unexpected(context, expected string) {
 	var errMsg strings.Builder
 
 	// Do not report lexer errors twice
@@ -268,14 +320,15 @@ var errUnexpected = errors.New("unexpected error")
 // recover is the handler that turns panics into returns from the top level of Parse.
 func (p *parser) recover(errp *error) {
 	e := recover()
-	if _, ok := e.(runtime.Error); ok {
+	switch _, ok := e.(runtime.Error); {
+	case ok:
 		// Print the stack trace but do not inhibit the running application.
 		buf := make([]byte, 64<<10)
 		buf = buf[:runtime.Stack(buf, false)]
 
 		fmt.Fprintf(os.Stderr, "parser panic: %v\n%s", e, buf)
 		*errp = errUnexpected
-	} else if e != nil {
+	case e != nil:
 		*errp = e.(error)
 	}
 }
@@ -288,7 +341,7 @@ func (p *parser) recover(errp *error) {
 // the generated and non-generated parts to work together with regards to lookahead
 // and error handling.
 //
-// For more information, see https://godoc.org/golang.org/x/tools/cmd/goyacc.
+// For more information, see https://pkg.go.dev/golang.org/x/tools/cmd/goyacc.
 func (p *parser) Lex(lval *yySymType) int {
 	var typ ItemType
 
@@ -307,9 +360,9 @@ func (p *parser) Lex(lval *yySymType) int {
 
 	switch typ {
 	case ERROR:
-		pos := PositionRange{
+		pos := posrange.PositionRange{
 			Start: p.lex.start,
-			End:   Pos(len(p.lex.input)),
+			End:   posrange.Pos(len(p.lex.input)),
 		}
 		p.addParseErr(pos, errors.New(p.yyParser.lval.item.Val))
 
@@ -318,8 +371,8 @@ func (p *parser) Lex(lval *yySymType) int {
 	case EOF:
 		lval.item.Typ = EOF
 		p.InjectItem(0)
-	case RIGHT_BRACE, RIGHT_PAREN, RIGHT_BRACKET, DURATION:
-		p.lastClosing = lval.item.Pos + Pos(len(lval.item.Val))
+	case RIGHT_BRACE, RIGHT_PAREN, RIGHT_BRACKET, DURATION, NUMBER:
+		p.lastClosing = lval.item.Pos + posrange.Pos(len(lval.item.Val))
 	}
 
 	return int(typ)
@@ -329,8 +382,8 @@ func (p *parser) Lex(lval *yySymType) int {
 //
 // It is a no-op since the parsers error routines are triggered
 // by mechanisms that allow more fine-grained control
-// For more information, see https://godoc.org/golang.org/x/tools/cmd/goyacc.
-func (p *parser) Error(e string) {
+// For more information, see https://pkg.go.dev/golang.org/x/tools/cmd/goyacc.
+func (p *parser) Error(string) {
 }
 
 // InjectItem allows injecting a single Item at the beginning of the token stream
@@ -352,7 +405,8 @@ func (p *parser) InjectItem(typ ItemType) {
 	p.inject = typ
 	p.injecting = true
 }
-func (p *parser) newBinaryExpression(lhs Node, op Item, modifiers Node, rhs Node) *BinaryExpr {
+
+func (p *parser) newBinaryExpression(lhs Node, op Item, modifiers, rhs Node) *BinaryExpr {
 	ret := modifiers.(*BinaryExpr)
 
 	ret.LHS = lhs.(Expr)
@@ -363,6 +417,8 @@ func (p *parser) newBinaryExpression(lhs Node, op Item, modifiers Node, rhs Node
 }
 
 func (p *parser) assembleVectorSelector(vs *VectorSelector) {
+	// If the metric name was set outside the braces, add a matcher for it.
+	// If the metric name was inside the braces we don't need to do anything.
 	if vs.Name != "" {
 		nameMatcher, err := labels.NewMatcher(labels.MatchEqual, labels.MetricName, vs.Name)
 		if err != nil {
@@ -372,11 +428,11 @@ func (p *parser) assembleVectorSelector(vs *VectorSelector) {
 	}
 }
 
-func (p *parser) newAggregateExpr(op Item, modifier Node, args Node) (ret *AggregateExpr) {
+func (p *parser) newAggregateExpr(op Item, modifier, args Node) (ret *AggregateExpr) {
 	ret = modifier.(*AggregateExpr)
 	arguments := args.(Expressions)
 
-	ret.PosRange = PositionRange{
+	ret.PosRange = posrange.PositionRange{
 		Start: op.Pos,
 		End:   p.lastClosing,
 	}
@@ -392,6 +448,10 @@ func (p *parser) newAggregateExpr(op Item, modifier Node, args Node) (ret *Aggre
 
 	desiredArgs := 1
 	if ret.Op.IsAggregatorWithParam() {
+		if !EnableExperimentalFunctions && ret.Op.IsExperimentalAggregator() {
+			p.addParseErrf(ret.PositionRange(), "%s() is experimental and must be enabled with --enable-feature=promql-experimental-functions", ret.Op)
+			return
+		}
 		desiredArgs = 2
 
 		ret.Param = arguments[0]
@@ -407,6 +467,182 @@ func (p *parser) newAggregateExpr(op Item, modifier Node, args Node) (ret *Aggre
 	return ret
 }
 
+// newMap is used when building the FloatHistogram from a map.
+func (p *parser) newMap() (ret map[string]interface{}) {
+	return map[string]interface{}{}
+}
+
+// mergeMaps is used to combine maps as they're used to later build the Float histogram.
+// This will merge the right map into the left map.
+func (p *parser) mergeMaps(left, right *map[string]interface{}) (ret *map[string]interface{}) {
+	for key, value := range *right {
+		if _, ok := (*left)[key]; ok {
+			p.addParseErrf(posrange.PositionRange{}, "duplicate key \"%s\" in histogram", key)
+			continue
+		}
+		(*left)[key] = value
+	}
+	return left
+}
+
+func (p *parser) histogramsIncreaseSeries(base, inc *histogram.FloatHistogram, times uint64) ([]SequenceValue, error) {
+	return p.histogramsSeries(base, inc, times, func(a, b *histogram.FloatHistogram) (*histogram.FloatHistogram, error) {
+		return a.Add(b)
+	})
+}
+
+func (p *parser) histogramsDecreaseSeries(base, inc *histogram.FloatHistogram, times uint64) ([]SequenceValue, error) {
+	return p.histogramsSeries(base, inc, times, func(a, b *histogram.FloatHistogram) (*histogram.FloatHistogram, error) {
+		return a.Sub(b)
+	})
+}
+
+func (p *parser) histogramsSeries(base, inc *histogram.FloatHistogram, times uint64,
+	combine func(*histogram.FloatHistogram, *histogram.FloatHistogram) (*histogram.FloatHistogram, error),
+) ([]SequenceValue, error) {
+	ret := make([]SequenceValue, times+1)
+	// Add an additional value (the base) for time 0, which we ignore in tests.
+	ret[0] = SequenceValue{Histogram: base}
+	cur := base
+	for i := uint64(1); i <= times; i++ {
+		if cur.Schema > inc.Schema {
+			return nil, fmt.Errorf("error combining histograms: cannot merge from schema %d to %d", inc.Schema, cur.Schema)
+		}
+
+		var err error
+		cur, err = combine(cur.Copy(), inc)
+		if err != nil {
+			return ret, err
+		}
+		ret[i] = SequenceValue{Histogram: cur}
+	}
+
+	return ret, nil
+}
+
+// buildHistogramFromMap is used in the grammar to take then individual parts of the histogram and complete it.
+func (p *parser) buildHistogramFromMap(desc *map[string]interface{}) *histogram.FloatHistogram {
+	output := &histogram.FloatHistogram{}
+
+	val, ok := (*desc)["schema"]
+	if ok {
+		schema, ok := val.(int64)
+		if ok {
+			output.Schema = int32(schema)
+		} else {
+			p.addParseErrf(p.yyParser.lval.item.PositionRange(), "error parsing schema number: %v", val)
+		}
+	}
+
+	val, ok = (*desc)["sum"]
+	if ok {
+		sum, ok := val.(float64)
+		if ok {
+			output.Sum = sum
+		} else {
+			p.addParseErrf(p.yyParser.lval.item.PositionRange(), "error parsing sum number: %v", val)
+		}
+	}
+	val, ok = (*desc)["count"]
+	if ok {
+		count, ok := val.(float64)
+		if ok {
+			output.Count = count
+		} else {
+			p.addParseErrf(p.yyParser.lval.item.PositionRange(), "error parsing count number: %v", val)
+		}
+	}
+
+	val, ok = (*desc)["z_bucket"]
+	if ok {
+		bucket, ok := val.(float64)
+		if ok {
+			output.ZeroCount = bucket
+		} else {
+			p.addParseErrf(p.yyParser.lval.item.PositionRange(), "error parsing z_bucket number: %v", val)
+		}
+	}
+	val, ok = (*desc)["z_bucket_w"]
+	if ok {
+		bucketWidth, ok := val.(float64)
+		if ok {
+			output.ZeroThreshold = bucketWidth
+		} else {
+			p.addParseErrf(p.yyParser.lval.item.PositionRange(), "error parsing z_bucket_w number: %v", val)
+		}
+	}
+	val, ok = (*desc)["custom_values"]
+	if ok {
+		customValues, ok := val.([]float64)
+		if ok {
+			output.CustomValues = customValues
+		} else {
+			p.addParseErrf(p.yyParser.lval.item.PositionRange(), "error parsing custom_values: %v", val)
+		}
+	}
+
+	val, ok = (*desc)["counter_reset_hint"]
+	if ok {
+		resetHint, ok := val.(Item)
+
+		if ok {
+			switch resetHint.Typ {
+			case UNKNOWN_COUNTER_RESET:
+				output.CounterResetHint = histogram.UnknownCounterReset
+			case COUNTER_RESET:
+				output.CounterResetHint = histogram.CounterReset
+			case NOT_COUNTER_RESET:
+				output.CounterResetHint = histogram.NotCounterReset
+			case GAUGE_TYPE:
+				output.CounterResetHint = histogram.GaugeType
+			default:
+				p.addParseErrf(p.yyParser.lval.item.PositionRange(), "error parsing counter_reset_hint: unknown value %v", resetHint.Typ)
+			}
+		} else {
+			p.addParseErrf(p.yyParser.lval.item.PositionRange(), "error parsing counter_reset_hint: %v", val)
+		}
+	}
+
+	buckets, spans := p.buildHistogramBucketsAndSpans(desc, "buckets", "offset")
+	output.PositiveBuckets = buckets
+	output.PositiveSpans = spans
+
+	buckets, spans = p.buildHistogramBucketsAndSpans(desc, "n_buckets", "n_offset")
+	output.NegativeBuckets = buckets
+	output.NegativeSpans = spans
+
+	return output
+}
+
+func (p *parser) buildHistogramBucketsAndSpans(desc *map[string]interface{}, bucketsKey, offsetKey string,
+) (buckets []float64, spans []histogram.Span) {
+	bucketCount := 0
+	val, ok := (*desc)[bucketsKey]
+	if ok {
+		val, ok := val.([]float64)
+		if ok {
+			buckets = val
+			bucketCount = len(buckets)
+		} else {
+			p.addParseErrf(p.yyParser.lval.item.PositionRange(), "error parsing %s float array: %v", bucketsKey, val)
+		}
+	}
+	offset := int32(0)
+	val, ok = (*desc)[offsetKey]
+	if ok {
+		val, ok := val.(int64)
+		if ok {
+			offset = int32(val)
+		} else {
+			p.addParseErrf(p.yyParser.lval.item.PositionRange(), "error parsing %s number: %v", offsetKey, val)
+		}
+	}
+	if bucketCount > 0 {
+		spans = []histogram.Span{{Offset: offset, Length: uint32(bucketCount)}}
+	}
+	return
+}
+
 // number parses a number.
 func (p *parser) number(val string) float64 {
 	n, err := strconv.ParseInt(val, 0, 64)
@@ -429,7 +665,7 @@ func (p *parser) expectType(node Node, want ValueType, context string) {
 	}
 }
 
-// checkAST checks the sanity of the provided AST. This includes type checking.
+// checkAST checks the validity of the provided AST. This includes type checking.
 func (p *parser) checkAST(node Node) (typ ValueType) {
 	// For expressions the type is determined by their Type function.
 	// Lists do not have a type but are not invalid either.
@@ -463,7 +699,7 @@ func (p *parser) checkAST(node Node) (typ ValueType) {
 			p.addParseErrf(n.PositionRange(), "aggregation operator expected in aggregation expression but got %q", n.Op)
 		}
 		p.expectType(n.Expr, ValueTypeVector, "aggregation expression")
-		if n.Op == TOPK || n.Op == BOTTOMK || n.Op == QUANTILE {
+		if n.Op == TOPK || n.Op == BOTTOMK || n.Op == QUANTILE || n.Op == LIMITK || n.Op == LIMIT_RATIO {
 			p.expectType(n.Param, ValueTypeScalar, "aggregation parameter")
 		}
 		if n.Op == COUNT_VALUES {
@@ -476,7 +712,7 @@ func (p *parser) checkAST(node Node) (typ ValueType) {
 
 		// opRange returns the PositionRange of the operator part of the BinaryExpr.
 		// This is made a function instead of a variable, so it is lazily evaluated on demand.
-		opRange := func() (r PositionRange) {
+		opRange := func() (r posrange.PositionRange) {
 			// Remove whitespace at the beginning and end of the range.
 			for r.Start = n.LHS.PositionRange().End; isSpace(rune(p.lex.input[r.Start])); r.Start++ {
 			}
@@ -515,20 +751,18 @@ func (p *parser) checkAST(node Node) (typ ValueType) {
 			p.addParseErrf(n.RHS.PositionRange(), "binary expression must contain only scalar and instant vector types")
 		}
 
-		if (lt != ValueTypeVector || rt != ValueTypeVector) && n.VectorMatching != nil {
+		switch {
+		case (lt != ValueTypeVector || rt != ValueTypeVector) && n.VectorMatching != nil:
 			if len(n.VectorMatching.MatchingLabels) > 0 {
 				p.addParseErrf(n.PositionRange(), "vector matching only allowed between instant vectors")
 			}
 			n.VectorMatching = nil
-		} else {
-			// Both operands are Vectors.
-			if n.Op.IsSetOperator() {
-				if n.VectorMatching.Card == CardOneToMany || n.VectorMatching.Card == CardManyToOne {
-					p.addParseErrf(n.PositionRange(), "no grouping allowed for %q operation", n.Op)
-				}
-				if n.VectorMatching.Card != CardManyToMany {
-					p.addParseErrf(n.PositionRange(), "set operations must always be many-to-many")
-				}
+		case n.Op.IsSetOperator(): // Both operands are Vectors.
+			if n.VectorMatching.Card == CardOneToMany || n.VectorMatching.Card == CardManyToOne {
+				p.addParseErrf(n.PositionRange(), "no grouping allowed for %q operation", n.Op)
+			}
+			if n.VectorMatching.Card != CardManyToMany {
+				p.addParseErrf(n.PositionRange(), "set operations must always be many-to-many")
 			}
 		}
 
@@ -551,6 +785,19 @@ func (p *parser) checkAST(node Node) (typ ValueType) {
 			}
 		}
 
+		if n.Func.Name == "info" && len(n.Args) > 1 {
+			// Check the type is correct first
+			if n.Args[1].Type() != ValueTypeVector {
+				p.addParseErrf(node.PositionRange(), "expected type %s in %s, got %s", DocumentedType(ValueTypeVector), fmt.Sprintf("call to function %q", n.Func.Name), DocumentedType(n.Args[1].Type()))
+			}
+			// Check the vector selector in the input doesn't contain a metric name
+			if n.Args[1].(*VectorSelector).Name != "" {
+				p.addParseErrf(n.Args[1].PositionRange(), "expected label selectors only, got vector selector instead")
+			}
+			// Set Vector Selector flag to bypass empty matcher check
+			n.Args[1].(*VectorSelector).BypassEmptyMatcherCheck = true
+		}
+
 		for i, arg := range n.Args {
 			if i >= len(n.Func.ArgTypes) {
 				if n.Func.Variadic == 0 {
@@ -577,34 +824,39 @@ func (p *parser) checkAST(node Node) (typ ValueType) {
 	case *SubqueryExpr:
 		ty := p.checkAST(n.Expr)
 		if ty != ValueTypeVector {
-			p.addParseErrf(n.PositionRange(), "subquery is only allowed on instant vector, got %s in %q instead", ty, n.String())
+			p.addParseErrf(n.PositionRange(), "subquery is only allowed on instant vector, got %s instead", ty)
 		}
 	case *MatrixSelector:
 		p.checkAST(n.VectorSelector)
 
 	case *VectorSelector:
-		// A Vector selector must contain at least one non-empty matcher to prevent
-		// implicit selection of all metrics (e.g. by a typo).
-		notEmpty := false
-		for _, lm := range n.LabelMatchers {
-			if lm != nil && !lm.Matches("") {
-				notEmpty = true
-				break
-			}
-		}
-		if !notEmpty {
-			p.addParseErrf(n.PositionRange(), "vector selector must contain at least one non-empty matcher")
-		}
-
 		if n.Name != "" {
 			// In this case the last LabelMatcher is checking for the metric name
 			// set outside the braces. This checks if the name has already been set
-			// previously
+			// previously.
 			for _, m := range n.LabelMatchers[0 : len(n.LabelMatchers)-1] {
 				if m != nil && m.Name == labels.MetricName {
 					p.addParseErrf(n.PositionRange(), "metric name must not be set twice: %q or %q", n.Name, m.Value)
 				}
 			}
+
+			// Skip the check for non-empty matchers because an explicit
+			// metric name is a non-empty matcher.
+			break
+		}
+		if !n.BypassEmptyMatcherCheck {
+			// A Vector selector must contain at least one non-empty matcher to prevent
+			// implicit selection of all metrics (e.g. by a typo).
+			notEmpty := false
+			for _, lm := range n.LabelMatchers {
+				if lm != nil && !lm.Matches("") {
+					notEmpty = true
+					break
+				}
+			}
+			if !notEmpty {
+				p.addParseErrf(n.PositionRange(), "vector selector must contain at least one non-empty matcher")
+			}
 		}
 
 	case *NumberLiteral, *StringLiteral:
@@ -644,10 +896,9 @@ func (p *parser) parseGenerated(startSymbol ItemType) interface{} {
 	p.yyParser.Parse(p)
 
 	return p.generatedParserResult
-
 }
 
-func (p *parser) newLabelMatcher(label Item, operator Item, value Item) *labels.Matcher {
+func (p *parser) newLabelMatcher(label, operator, value Item) *labels.Matcher {
 	op := operator.Typ
 	val := p.unquoteString(value.Val)
 
@@ -676,34 +927,136 @@ func (p *parser) newLabelMatcher(label Item, operator Item, value Item) *labels.
 	return m
 }
 
+func (p *parser) newMetricNameMatcher(value Item) *labels.Matcher {
+	m, err := labels.NewMatcher(labels.MatchEqual, labels.MetricName, value.Val)
+	if err != nil {
+		p.addParseErr(value.PositionRange(), err)
+	}
+
+	return m
+}
+
+// addOffset is used to set the offset in the generated parser.
 func (p *parser) addOffset(e Node, offset time.Duration) {
-	var offsetp *time.Duration
-	var endPosp *Pos
+	var orgoffsetp *time.Duration
+	var endPosp *posrange.Pos
 
 	switch s := e.(type) {
 	case *VectorSelector:
-		offsetp = &s.Offset
+		orgoffsetp = &s.OriginalOffset
 		endPosp = &s.PosRange.End
 	case *MatrixSelector:
-		if vs, ok := s.VectorSelector.(*VectorSelector); ok {
-			offsetp = &vs.Offset
+		vs, ok := s.VectorSelector.(*VectorSelector)
+		if !ok {
+			p.addParseErrf(e.PositionRange(), "ranges only allowed for vector selectors")
+			return
 		}
+		orgoffsetp = &vs.OriginalOffset
 		endPosp = &s.EndPos
 	case *SubqueryExpr:
-		offsetp = &s.Offset
+		orgoffsetp = &s.OriginalOffset
 		endPosp = &s.EndPos
 	default:
-		p.addParseErrf(e.PositionRange(), "offset modifier must be preceded by an instant or range selector, but follows a %T instead", e)
+		p.addParseErrf(e.PositionRange(), "offset modifier must be preceded by an instant vector selector or range vector selector or a subquery")
 		return
 	}
 
 	// it is already ensured by parseDuration func that there never will be a zero offset modifier
-	if *offsetp != 0 {
+	switch {
+	case *orgoffsetp != 0:
 		p.addParseErrf(e.PositionRange(), "offset may not be set multiple times")
-	} else if offsetp != nil {
-		*offsetp = offset
+	case orgoffsetp != nil:
+		*orgoffsetp = offset
+	}
+
+	*endPosp = p.lastClosing
+}
+
+// setTimestamp is used to set the timestamp from the @ modifier in the generated parser.
+func (p *parser) setTimestamp(e Node, ts float64) {
+	if math.IsInf(ts, -1) || math.IsInf(ts, 1) || math.IsNaN(ts) ||
+		ts >= float64(math.MaxInt64) || ts <= float64(math.MinInt64) {
+		p.addParseErrf(e.PositionRange(), "timestamp out of bounds for @ modifier: %f", ts)
+	}
+	var timestampp **int64
+	var endPosp *posrange.Pos
+
+	timestampp, _, endPosp, ok := p.getAtModifierVars(e)
+	if !ok {
+		return
+	}
+
+	if timestampp != nil {
+		*timestampp = new(int64)
+		**timestampp = timestamp.FromFloatSeconds(ts)
+	}
+
+	*endPosp = p.lastClosing
+}
+
+// setAtModifierPreprocessor is used to set the preprocessor for the @ modifier.
+func (p *parser) setAtModifierPreprocessor(e Node, op Item) {
+	_, preprocp, endPosp, ok := p.getAtModifierVars(e)
+	if !ok {
+		return
+	}
+
+	if preprocp != nil {
+		*preprocp = op.Typ
 	}
 
 	*endPosp = p.lastClosing
+}
+
+func (p *parser) getAtModifierVars(e Node) (**int64, *ItemType, *posrange.Pos, bool) {
+	var (
+		timestampp **int64
+		preprocp   *ItemType
+		endPosp    *posrange.Pos
+	)
+	switch s := e.(type) {
+	case *VectorSelector:
+		timestampp = &s.Timestamp
+		preprocp = &s.StartOrEnd
+		endPosp = &s.PosRange.End
+	case *MatrixSelector:
+		vs, ok := s.VectorSelector.(*VectorSelector)
+		if !ok {
+			p.addParseErrf(e.PositionRange(), "ranges only allowed for vector selectors")
+			return nil, nil, nil, false
+		}
+		preprocp = &vs.StartOrEnd
+		timestampp = &vs.Timestamp
+		endPosp = &s.EndPos
+	case *SubqueryExpr:
+		preprocp = &s.StartOrEnd
+		timestampp = &s.Timestamp
+		endPosp = &s.EndPos
+	default:
+		p.addParseErrf(e.PositionRange(), "@ modifier must be preceded by an instant vector selector or range vector selector or a subquery")
+		return nil, nil, nil, false
+	}
+
+	if *timestampp != nil || (*preprocp) == START || (*preprocp) == END {
+		p.addParseErrf(e.PositionRange(), "@ <timestamp> may not be set multiple times")
+		return nil, nil, nil, false
+	}
+
+	return timestampp, preprocp, endPosp, true
+}
 
+func MustLabelMatcher(mt labels.MatchType, name, val string) *labels.Matcher {
+	m, err := labels.NewMatcher(mt, name, val)
+	if err != nil {
+		panic(err)
+	}
+	return m
+}
+
+func MustGetFunction(name string) *Function {
+	f, ok := getFunction(name, Functions)
+	if !ok {
+		panic(fmt.Errorf("function %q does not exist", name))
+	}
+	return f
 }
diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/posrange/posrange.go b/vendor/github.com/prometheus/prometheus/promql/parser/posrange/posrange.go
new file mode 100644
index 0000000000000000000000000000000000000000..531fd8a30c00859cd2ae088da29947954dff06d1
--- /dev/null
+++ b/vendor/github.com/prometheus/prometheus/promql/parser/posrange/posrange.go
@@ -0,0 +1,54 @@
+// Copyright 2023 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// posrange is used to report a position in query strings for error
+// and warning messages.
+package posrange
+
+import "fmt"
+
+// Pos is the position in a string.
+// Negative numbers indicate undefined positions.
+type Pos int
+
+// PositionRange describes a position in the input string of the parser.
+type PositionRange struct {
+	Start Pos
+	End   Pos
+}
+
+// StartPosInput uses the query string to convert the PositionRange into a
+// line:col string, indicating when this is not possible if the query is empty
+// or the position is invalid. When this is used to convert ParseErr to a string,
+// lineOffset is an additional line offset to be added, and is only used inside
+// unit tests.
+func (p PositionRange) StartPosInput(query string, lineOffset int) string {
+	if query == "" {
+		return "unknown position"
+	}
+	pos := int(p.Start)
+	if pos < 0 || pos > len(query) {
+		return "invalid position"
+	}
+
+	lastLineBreak := -1
+	line := lineOffset + 1
+	for i, c := range query[:pos] {
+		if c == '\n' {
+			lastLineBreak = i
+			line++
+		}
+	}
+	col := pos - lastLineBreak
+	return fmt.Sprintf("%d:%d", line, col)
+}
diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/prettier.go b/vendor/github.com/prometheus/prometheus/promql/parser/prettier.go
new file mode 100644
index 0000000000000000000000000000000000000000..9870d6da748fb2ab1c536025a48e82a2ebd74238
--- /dev/null
+++ b/vendor/github.com/prometheus/prometheus/promql/parser/prettier.go
@@ -0,0 +1,166 @@
+// Copyright 2022 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package parser
+
+import (
+	"fmt"
+	"strings"
+)
+
+// Approach
+// --------
+// When a PromQL query is parsed, it is converted into PromQL AST,
+// which is a nested structure of nodes. Each node has a depth/level
+// (distance from the root), that is passed by its parent.
+//
+// While prettifying, a Node considers 2 things:
+// 1. Did the current Node's parent add a new line?
+// 2. Does the current Node needs to be prettified?
+//
+// The level of a Node determines if it should be indented or not.
+// The answer to the 1 is NO if the level passed is 0. This means, the
+// parent Node did not apply a new line, so the current Node must not
+// apply any indentation as prefix.
+// If level > 1, a new line is applied by the parent. So, the current Node
+// should prefix an indentation before writing any of its content. This indentation
+// will be ([level/depth of current Node] * "  ").
+//
+// The answer to 2 is YES if the normalized length of the current Node exceeds
+// the maxCharactersPerLine limit. Hence, it applies the indentation equal to
+// its depth and increments the level by 1 before passing down the child.
+// If the answer is NO, the current Node returns the normalized string value of itself.
+
+var maxCharactersPerLine = 100
+
+func Prettify(n Node) string {
+	return n.Pretty(0)
+}
+
+func (e *AggregateExpr) Pretty(level int) string {
+	s := indent(level)
+	if !needsSplit(e) {
+		s += e.String()
+		return s
+	}
+
+	s += e.getAggOpStr()
+	s += "(\n"
+
+	if e.Op.IsAggregatorWithParam() {
+		s += fmt.Sprintf("%s,\n", e.Param.Pretty(level+1))
+	}
+	s += fmt.Sprintf("%s\n%s)", e.Expr.Pretty(level+1), indent(level))
+	return s
+}
+
+func (e *BinaryExpr) Pretty(level int) string {
+	s := indent(level)
+	if !needsSplit(e) {
+		s += e.String()
+		return s
+	}
+	returnBool := ""
+	if e.ReturnBool {
+		returnBool = " bool"
+	}
+
+	matching := e.getMatchingStr()
+	return fmt.Sprintf("%s\n%s%s%s%s\n%s", e.LHS.Pretty(level+1), indent(level), e.Op, returnBool, matching, e.RHS.Pretty(level+1))
+}
+
+func (e *Call) Pretty(level int) string {
+	s := indent(level)
+	if !needsSplit(e) {
+		s += e.String()
+		return s
+	}
+	s += fmt.Sprintf("%s(\n%s\n%s)", e.Func.Name, e.Args.Pretty(level+1), indent(level))
+	return s
+}
+
+func (e *EvalStmt) Pretty(_ int) string {
+	return "EVAL " + e.Expr.String()
+}
+
+func (e Expressions) Pretty(level int) string {
+	// Do not prefix the indent since respective nodes will indent itself.
+	s := ""
+	for i := range e {
+		s += fmt.Sprintf("%s,\n", e[i].Pretty(level))
+	}
+	return s[:len(s)-2]
+}
+
+func (e *ParenExpr) Pretty(level int) string {
+	s := indent(level)
+	if !needsSplit(e) {
+		s += e.String()
+		return s
+	}
+	return fmt.Sprintf("%s(\n%s\n%s)", s, e.Expr.Pretty(level+1), indent(level))
+}
+
+func (e *StepInvariantExpr) Pretty(level int) string {
+	return e.Expr.Pretty(level)
+}
+
+func (e *MatrixSelector) Pretty(level int) string {
+	return getCommonPrefixIndent(level, e)
+}
+
+func (e *SubqueryExpr) Pretty(level int) string {
+	if !needsSplit(e) {
+		return e.String()
+	}
+	return fmt.Sprintf("%s%s", e.Expr.Pretty(level), e.getSubqueryTimeSuffix())
+}
+
+func (e *VectorSelector) Pretty(level int) string {
+	return getCommonPrefixIndent(level, e)
+}
+
+func (e *NumberLiteral) Pretty(level int) string {
+	return getCommonPrefixIndent(level, e)
+}
+
+func (e *StringLiteral) Pretty(level int) string {
+	return getCommonPrefixIndent(level, e)
+}
+
+func (e *UnaryExpr) Pretty(level int) string {
+	child := e.Expr.Pretty(level)
+	// Remove the indent prefix from child since we attach the prefix indent before Op.
+	child = strings.TrimSpace(child)
+	return fmt.Sprintf("%s%s%s", indent(level), e.Op, child)
+}
+
+func getCommonPrefixIndent(level int, current Node) string {
+	return fmt.Sprintf("%s%s", indent(level), current.String())
+}
+
+// needsSplit normalizes the node and then checks if the node needs any split.
+// This is necessary to remove any trailing whitespaces.
+func needsSplit(n Node) bool {
+	if n == nil {
+		return false
+	}
+	return len(n.String()) > maxCharactersPerLine
+}
+
+const indentString = "  "
+
+// indent adds the indentString n number of times.
+func indent(n int) string {
+	return strings.Repeat(indentString, n)
+}
diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/prettier_rules.md b/vendor/github.com/prometheus/prometheus/promql/parser/prettier_rules.md
new file mode 100644
index 0000000000000000000000000000000000000000..46c5e51ef9ba560e9d3c3df2631a4496bd56b719
--- /dev/null
+++ b/vendor/github.com/prometheus/prometheus/promql/parser/prettier_rules.md
@@ -0,0 +1,16 @@
+# Prettifying PromQL expressions
+This files contains rules for prettifying PromQL expressions.
+
+Note: The current version of prettier does not preserve comments.
+
+### Keywords
+`max_characters_per_line`: Maximum number of characters that will be allowed on a single line in a prettified PromQL expression.
+
+### Rules
+1. A node exceeding the `max_characters_per_line` will qualify for split unless
+   1. It is a `MatrixSelector`
+   2. It is a `VectorSelector`. Label sets in a `VectorSelector` will be in the same line as metric_name, separated by commas and a space
+   Note: Label groupings like `by`, `without`, `on`, `ignoring` will remain on the same line as their parent node
+2. Nodes that are nested within another node will be prettified only if they exceed the `max_characters_per_line`
+3. Expressions like `sum(expression) without (label_matchers)` will be modified to `sum without(label_matchers) (expression)`
+4. Functional call args will be split to different lines if they exceed the `max_characters_per_line`
diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/printer.go b/vendor/github.com/prometheus/prometheus/promql/parser/printer.go
index eef4aa8e418a9613cb90b1b497381f881c95e8bc..6f234a029012e2e224debf13f43ff6f029a58314 100644
--- a/vendor/github.com/prometheus/prometheus/promql/parser/printer.go
+++ b/vendor/github.com/prometheus/prometheus/promql/parser/printer.go
@@ -14,14 +14,16 @@
 package parser
 
 import (
+	"bytes"
 	"fmt"
 	"sort"
+	"strconv"
 	"strings"
 	"time"
 
 	"github.com/prometheus/common/model"
 
-	"github.com/prometheus/prometheus/pkg/labels"
+	"github.com/prometheus/prometheus/model/labels"
 )
 
 // Tree returns a string of the tree structure of the given node.
@@ -62,16 +64,7 @@ func (es Expressions) String() (s string) {
 }
 
 func (node *AggregateExpr) String() string {
-	aggrString := node.Op.String()
-
-	if node.Without {
-		aggrString += fmt.Sprintf(" without(%s) ", strings.Join(node.Grouping, ", "))
-	} else {
-		if len(node.Grouping) > 0 {
-			aggrString += fmt.Sprintf(" by(%s) ", strings.Join(node.Grouping, ", "))
-		}
-	}
-
+	aggrString := node.getAggOpStr()
 	aggrString += "("
 	if node.Op.IsAggregatorWithParam() {
 		aggrString += fmt.Sprintf("%s, ", node.Param)
@@ -81,65 +74,165 @@ func (node *AggregateExpr) String() string {
 	return aggrString
 }
 
-func (node *BinaryExpr) String() string {
-	returnBool := ""
+func (node *AggregateExpr) ShortString() string {
+	aggrString := node.getAggOpStr()
+	return aggrString
+}
+
+func (node *AggregateExpr) getAggOpStr() string {
+	aggrString := node.Op.String()
+
+	switch {
+	case node.Without:
+		aggrString += fmt.Sprintf(" without (%s) ", joinLabels(node.Grouping))
+	case len(node.Grouping) > 0:
+		aggrString += fmt.Sprintf(" by (%s) ", joinLabels(node.Grouping))
+	}
+
+	return aggrString
+}
+
+func joinLabels(ss []string) string {
+	var bytea [1024]byte // On stack to avoid memory allocation while building the output.
+	b := bytes.NewBuffer(bytea[:0])
+
+	for i, s := range ss {
+		if i > 0 {
+			b.WriteString(", ")
+		}
+		if !model.IsValidLegacyMetricName(string(model.LabelValue(s))) {
+			b.Write(strconv.AppendQuote(b.AvailableBuffer(), s))
+		} else {
+			b.WriteString(s)
+		}
+	}
+	return b.String()
+}
+
+func (node *BinaryExpr) returnBool() string {
 	if node.ReturnBool {
-		returnBool = " bool"
+		return " bool"
 	}
+	return ""
+}
+
+func (node *BinaryExpr) String() string {
+	matching := node.getMatchingStr()
+	return fmt.Sprintf("%s %s%s%s %s", node.LHS, node.Op, node.returnBool(), matching, node.RHS)
+}
 
+func (node *BinaryExpr) ShortString() string {
+	return fmt.Sprintf("%s%s%s", node.Op, node.returnBool(), node.getMatchingStr())
+}
+
+func (node *BinaryExpr) getMatchingStr() string {
 	matching := ""
 	vm := node.VectorMatching
 	if vm != nil && (len(vm.MatchingLabels) > 0 || vm.On) {
+		vmTag := "ignoring"
 		if vm.On {
-			matching = fmt.Sprintf(" on(%s)", strings.Join(vm.MatchingLabels, ", "))
-		} else {
-			matching = fmt.Sprintf(" ignoring(%s)", strings.Join(vm.MatchingLabels, ", "))
+			vmTag = "on"
 		}
+		matching = fmt.Sprintf(" %s (%s)", vmTag, strings.Join(vm.MatchingLabels, ", "))
+
 		if vm.Card == CardManyToOne || vm.Card == CardOneToMany {
-			matching += " group_"
+			vmCard := "right"
 			if vm.Card == CardManyToOne {
-				matching += "left"
-			} else {
-				matching += "right"
+				vmCard = "left"
 			}
-			matching += fmt.Sprintf("(%s)", strings.Join(vm.Include, ", "))
+			matching += fmt.Sprintf(" group_%s (%s)", vmCard, strings.Join(vm.Include, ", "))
 		}
 	}
-	return fmt.Sprintf("%s %s%s%s %s", node.LHS, node.Op, returnBool, matching, node.RHS)
+	return matching
 }
 
 func (node *Call) String() string {
 	return fmt.Sprintf("%s(%s)", node.Func.Name, node.Args)
 }
 
-func (node *MatrixSelector) String() string {
+func (node *Call) ShortString() string {
+	return node.Func.Name
+}
+
+func (node *MatrixSelector) atOffset() (string, string) {
 	// Copy the Vector selector before changing the offset
-	vecSelector := *node.VectorSelector.(*VectorSelector)
+	vecSelector := node.VectorSelector.(*VectorSelector)
 	offset := ""
-	if vecSelector.Offset != time.Duration(0) {
-		offset = fmt.Sprintf(" offset %s", model.Duration(vecSelector.Offset))
+	switch {
+	case vecSelector.OriginalOffset > time.Duration(0):
+		offset = fmt.Sprintf(" offset %s", model.Duration(vecSelector.OriginalOffset))
+	case vecSelector.OriginalOffset < time.Duration(0):
+		offset = fmt.Sprintf(" offset -%s", model.Duration(-vecSelector.OriginalOffset))
+	}
+	at := ""
+	switch {
+	case vecSelector.Timestamp != nil:
+		at = fmt.Sprintf(" @ %.3f", float64(*vecSelector.Timestamp)/1000.0)
+	case vecSelector.StartOrEnd == START:
+		at = " @ start()"
+	case vecSelector.StartOrEnd == END:
+		at = " @ end()"
 	}
+	return at, offset
+}
+
+func (node *MatrixSelector) String() string {
+	at, offset := node.atOffset()
+	// Copy the Vector selector before changing the offset
+	vecSelector := *node.VectorSelector.(*VectorSelector)
+	// Do not print the @ and offset twice.
+	offsetVal, atVal, preproc := vecSelector.OriginalOffset, vecSelector.Timestamp, vecSelector.StartOrEnd
+	vecSelector.OriginalOffset = 0
+	vecSelector.Timestamp = nil
+	vecSelector.StartOrEnd = 0
+
+	str := fmt.Sprintf("%s[%s]%s%s", vecSelector.String(), model.Duration(node.Range), at, offset)
 
-	// Do not print the offset twice.
-	vecSelector.Offset = 0
+	vecSelector.OriginalOffset, vecSelector.Timestamp, vecSelector.StartOrEnd = offsetVal, atVal, preproc
+
+	return str
+}
 
-	return fmt.Sprintf("%s[%s]%s", vecSelector.String(), model.Duration(node.Range), offset)
+func (node *MatrixSelector) ShortString() string {
+	at, offset := node.atOffset()
+	return fmt.Sprintf("[%s]%s%s", model.Duration(node.Range), at, offset)
 }
 
 func (node *SubqueryExpr) String() string {
+	return fmt.Sprintf("%s%s", node.Expr.String(), node.getSubqueryTimeSuffix())
+}
+
+func (node *SubqueryExpr) ShortString() string {
+	return node.getSubqueryTimeSuffix()
+}
+
+// getSubqueryTimeSuffix returns the '[<range>:<step>] @ <timestamp> offset <offset>' suffix of the subquery.
+func (node *SubqueryExpr) getSubqueryTimeSuffix() string {
 	step := ""
 	if node.Step != 0 {
 		step = model.Duration(node.Step).String()
 	}
 	offset := ""
-	if node.Offset != time.Duration(0) {
-		offset = fmt.Sprintf(" offset %s", model.Duration(node.Offset))
+	switch {
+	case node.OriginalOffset > time.Duration(0):
+		offset = fmt.Sprintf(" offset %s", model.Duration(node.OriginalOffset))
+	case node.OriginalOffset < time.Duration(0):
+		offset = fmt.Sprintf(" offset -%s", model.Duration(-node.OriginalOffset))
+	}
+	at := ""
+	switch {
+	case node.Timestamp != nil:
+		at = fmt.Sprintf(" @ %.3f", float64(*node.Timestamp)/1000.0)
+	case node.StartOrEnd == START:
+		at = " @ start()"
+	case node.StartOrEnd == END:
+		at = " @ end()"
 	}
-	return fmt.Sprintf("%s[%s:%s]%s", node.Expr.String(), model.Duration(node.Range), step, offset)
+	return fmt.Sprintf("[%s:%s]%s%s", model.Duration(node.Range), step, at, offset)
 }
 
 func (node *NumberLiteral) String() string {
-	return fmt.Sprint(node.Val)
+	return strconv.FormatFloat(node.Val, 'f', -1, 64)
 }
 
 func (node *ParenExpr) String() string {
@@ -154,23 +247,42 @@ func (node *UnaryExpr) String() string {
 	return fmt.Sprintf("%s%s", node.Op, node.Expr)
 }
 
+func (node *UnaryExpr) ShortString() string {
+	return node.Op.String()
+}
+
 func (node *VectorSelector) String() string {
-	labelStrings := make([]string, 0, len(node.LabelMatchers)-1)
+	var labelStrings []string
+	if len(node.LabelMatchers) > 1 {
+		labelStrings = make([]string, 0, len(node.LabelMatchers)-1)
+	}
 	for _, matcher := range node.LabelMatchers {
-		// Only include the __name__ label if its equality matching and matches the name.
-		if matcher.Name == labels.MetricName && matcher.Type == labels.MatchEqual && matcher.Value == node.Name {
+		// Only include the __name__ label if its equality matching and matches the name, but don't skip if it's an explicit empty name matcher.
+		if matcher.Name == labels.MetricName && matcher.Type == labels.MatchEqual && matcher.Value == node.Name && matcher.Value != "" {
 			continue
 		}
 		labelStrings = append(labelStrings, matcher.String())
 	}
 	offset := ""
-	if node.Offset != time.Duration(0) {
-		offset = fmt.Sprintf(" offset %s", model.Duration(node.Offset))
+	switch {
+	case node.OriginalOffset > time.Duration(0):
+		offset = fmt.Sprintf(" offset %s", model.Duration(node.OriginalOffset))
+	case node.OriginalOffset < time.Duration(0):
+		offset = fmt.Sprintf(" offset -%s", model.Duration(-node.OriginalOffset))
+	}
+	at := ""
+	switch {
+	case node.Timestamp != nil:
+		at = fmt.Sprintf(" @ %.3f", float64(*node.Timestamp)/1000.0)
+	case node.StartOrEnd == START:
+		at = " @ start()"
+	case node.StartOrEnd == END:
+		at = " @ end()"
 	}
 
 	if len(labelStrings) == 0 {
-		return fmt.Sprintf("%s%s", node.Name, offset)
+		return fmt.Sprintf("%s%s%s", node.Name, at, offset)
 	}
 	sort.Strings(labelStrings)
-	return fmt.Sprintf("%s{%s}%s", node.Name, strings.Join(labelStrings, ","), offset)
+	return fmt.Sprintf("%s{%s}%s%s", node.Name, strings.Join(labelStrings, ","), at, offset)
 }
diff --git a/vendor/github.com/prometheus/prometheus/storage/buffer.go b/vendor/github.com/prometheus/prometheus/storage/buffer.go
index feca1d91ed4b2a395879d5ab342c6ed1318a1f89..e847c10e61a40682e856c732a00908b544116ff0 100644
--- a/vendor/github.com/prometheus/prometheus/storage/buffer.go
+++ b/vendor/github.com/prometheus/prometheus/storage/buffer.go
@@ -14,19 +14,25 @@
 package storage
 
 import (
+	"fmt"
 	"math"
 
+	"github.com/prometheus/prometheus/model/histogram"
 	"github.com/prometheus/prometheus/tsdb/chunkenc"
+	"github.com/prometheus/prometheus/tsdb/chunks"
 )
 
 // BufferedSeriesIterator wraps an iterator with a look-back buffer.
 type BufferedSeriesIterator struct {
+	hReader  histogram.Histogram
+	fhReader histogram.FloatHistogram
+
 	it    chunkenc.Iterator
 	buf   *sampleRing
 	delta int64
 
-	lastTime int64
-	ok       bool
+	lastTime  int64
+	valueType chunkenc.ValueType
 }
 
 // NewBuffer returns a new iterator that buffers the values within the time range
@@ -40,7 +46,7 @@ func NewBuffer(delta int64) *BufferedSeriesIterator {
 // time range of the current element and the duration of delta before.
 func NewBufferIterator(it chunkenc.Iterator, delta int64) *BufferedSeriesIterator {
 	bit := &BufferedSeriesIterator{
-		buf:   newSampleRing(delta, 16),
+		buf:   newSampleRing(delta, 0, chunkenc.ValNone),
 		delta: delta,
 	}
 	bit.Reset(it)
@@ -53,10 +59,9 @@ func NewBufferIterator(it chunkenc.Iterator, delta int64) *BufferedSeriesIterato
 func (b *BufferedSeriesIterator) Reset(it chunkenc.Iterator) {
 	b.it = it
 	b.lastTime = math.MinInt64
-	b.ok = true
 	b.buf.reset()
 	b.buf.delta = b.delta
-	it.Next()
+	b.valueType = it.Next()
 }
 
 // ReduceDelta lowers the buffered time delta, for the current SeriesIterator only.
@@ -66,99 +71,237 @@ func (b *BufferedSeriesIterator) ReduceDelta(delta int64) bool {
 
 // PeekBack returns the nth previous element of the iterator. If there is none buffered,
 // ok is false.
-func (b *BufferedSeriesIterator) PeekBack(n int) (t int64, v float64, ok bool) {
+func (b *BufferedSeriesIterator) PeekBack(n int) (sample chunks.Sample, ok bool) {
 	return b.buf.nthLast(n)
 }
 
 // Buffer returns an iterator over the buffered data. Invalidates previously
 // returned iterators.
-func (b *BufferedSeriesIterator) Buffer() chunkenc.Iterator {
+func (b *BufferedSeriesIterator) Buffer() *SampleRingIterator {
 	return b.buf.iterator()
 }
 
 // Seek advances the iterator to the element at time t or greater.
-func (b *BufferedSeriesIterator) Seek(t int64) bool {
+func (b *BufferedSeriesIterator) Seek(t int64) chunkenc.ValueType {
 	t0 := t - b.buf.delta
 
 	// If the delta would cause us to seek backwards, preserve the buffer
 	// and just continue regular advancement while filling the buffer on the way.
-	if t0 > b.lastTime {
+	if b.valueType != chunkenc.ValNone && t0 > b.lastTime {
 		b.buf.reset()
 
-		b.ok = b.it.Seek(t0)
-		if !b.ok {
-			return false
+		b.valueType = b.it.Seek(t0)
+		switch b.valueType {
+		case chunkenc.ValNone:
+			return chunkenc.ValNone
+		case chunkenc.ValFloat, chunkenc.ValHistogram, chunkenc.ValFloatHistogram:
+			b.lastTime = b.AtT()
+		default:
+			panic(fmt.Errorf("BufferedSeriesIterator: unknown value type %v", b.valueType))
 		}
-		b.lastTime, _ = b.Values()
 	}
 
 	if b.lastTime >= t {
-		return true
+		return b.valueType
 	}
-	for b.Next() {
-		if b.lastTime >= t {
-			return true
+	for {
+		if b.valueType = b.Next(); b.valueType == chunkenc.ValNone || b.lastTime >= t {
+			return b.valueType
 		}
 	}
-
-	return false
 }
 
 // Next advances the iterator to the next element.
-func (b *BufferedSeriesIterator) Next() bool {
-	if !b.ok {
-		return false
-	}
-
+func (b *BufferedSeriesIterator) Next() chunkenc.ValueType {
 	// Add current element to buffer before advancing.
-	b.buf.add(b.it.At())
-
-	b.ok = b.it.Next()
-	if b.ok {
-		b.lastTime, _ = b.Values()
+	switch b.valueType {
+	case chunkenc.ValNone:
+		return chunkenc.ValNone
+	case chunkenc.ValFloat:
+		t, f := b.it.At()
+		b.buf.addF(fSample{t: t, f: f})
+	case chunkenc.ValHistogram:
+		t, h := b.it.AtHistogram(&b.hReader)
+		b.buf.addH(hSample{t: t, h: h})
+	case chunkenc.ValFloatHistogram:
+		t, fh := b.it.AtFloatHistogram(&b.fhReader)
+		b.buf.addFH(fhSample{t: t, fh: fh})
+	default:
+		panic(fmt.Errorf("BufferedSeriesIterator: unknown value type %v", b.valueType))
 	}
 
-	return b.ok
+	b.valueType = b.it.Next()
+	if b.valueType != chunkenc.ValNone {
+		b.lastTime = b.AtT()
+	}
+	return b.valueType
 }
 
-// Values returns the current element of the iterator.
-func (b *BufferedSeriesIterator) Values() (int64, float64) {
+// At returns the current float element of the iterator.
+func (b *BufferedSeriesIterator) At() (int64, float64) {
 	return b.it.At()
 }
 
+// AtHistogram returns the current histogram element of the iterator.
+func (b *BufferedSeriesIterator) AtHistogram(fh *histogram.Histogram) (int64, *histogram.Histogram) {
+	return b.it.AtHistogram(fh)
+}
+
+// AtFloatHistogram returns the current float-histogram element of the iterator.
+func (b *BufferedSeriesIterator) AtFloatHistogram(fh *histogram.FloatHistogram) (int64, *histogram.FloatHistogram) {
+	return b.it.AtFloatHistogram(fh)
+}
+
+// AtT returns the current timestamp of the iterator.
+func (b *BufferedSeriesIterator) AtT() int64 {
+	return b.it.AtT()
+}
+
 // Err returns the last encountered error.
 func (b *BufferedSeriesIterator) Err() error {
 	return b.it.Err()
 }
 
-type sample struct {
+type fSample struct {
 	t int64
-	v float64
+	f float64
 }
 
-func (s sample) T() int64 {
+func (s fSample) T() int64 {
 	return s.t
 }
 
-func (s sample) V() float64 {
-	return s.v
+func (s fSample) F() float64 {
+	return s.f
+}
+
+func (s fSample) H() *histogram.Histogram {
+	panic("H() called for fSample")
+}
+
+func (s fSample) FH() *histogram.FloatHistogram {
+	panic("FH() called for fSample")
+}
+
+func (s fSample) Type() chunkenc.ValueType {
+	return chunkenc.ValFloat
+}
+
+func (s fSample) Copy() chunks.Sample {
+	return s
+}
+
+type hSample struct {
+	t int64
+	h *histogram.Histogram
+}
+
+func (s hSample) T() int64 {
+	return s.t
+}
+
+func (s hSample) F() float64 {
+	panic("F() called for hSample")
+}
+
+func (s hSample) H() *histogram.Histogram {
+	return s.h
+}
+
+func (s hSample) FH() *histogram.FloatHistogram {
+	return s.h.ToFloat(nil)
+}
+
+func (s hSample) Type() chunkenc.ValueType {
+	return chunkenc.ValHistogram
+}
+
+func (s hSample) Copy() chunks.Sample {
+	return hSample{t: s.t, h: s.h.Copy()}
+}
+
+type fhSample struct {
+	t  int64
+	fh *histogram.FloatHistogram
+}
+
+func (s fhSample) T() int64 {
+	return s.t
+}
+
+func (s fhSample) F() float64 {
+	panic("F() called for fhSample")
+}
+
+func (s fhSample) H() *histogram.Histogram {
+	panic("H() called for fhSample")
+}
+
+func (s fhSample) FH() *histogram.FloatHistogram {
+	return s.fh
+}
+
+func (s fhSample) Type() chunkenc.ValueType {
+	return chunkenc.ValFloatHistogram
+}
+
+func (s fhSample) Copy() chunks.Sample {
+	return fhSample{t: s.t, fh: s.fh.Copy()}
 }
 
 type sampleRing struct {
 	delta int64
 
-	buf []sample // lookback buffer
-	i   int      // position of most recent element in ring buffer
-	f   int      // position of first element in ring buffer
-	l   int      // number of elements in buffer
-
-	it sampleRingIterator
+	// Lookback buffers. We use iBuf for mixed samples, but one of the three
+	// concrete ones for homogeneous samples. (Only one of the four bufs is
+	// allowed to be populated!) This avoids the overhead of the interface
+	// wrapper for the happy (and by far most common) case of homogeneous
+	// samples.
+	iBuf     []chunks.Sample
+	fBuf     []fSample
+	hBuf     []hSample
+	fhBuf    []fhSample
+	bufInUse bufType
+
+	i int // Position of most recent element in ring buffer.
+	f int // Position of first element in ring buffer.
+	l int // Number of elements in buffer.
+
+	it SampleRingIterator
 }
 
-func newSampleRing(delta int64, sz int) *sampleRing {
-	r := &sampleRing{delta: delta, buf: make([]sample, sz)}
-	r.reset()
+type bufType int
+
+const (
+	noBuf bufType = iota // Nothing yet stored in sampleRing.
+	iBuf
+	fBuf
+	hBuf
+	fhBuf
+)
 
+// newSampleRing creates a new sampleRing. If you do not know the preferred
+// value type yet, use a size of 0 (in which case the provided typ doesn't
+// matter). On the first add, a buffer of size 16 will be allocated with the
+// preferred type being the type of the first added sample.
+func newSampleRing(delta int64, size int, typ chunkenc.ValueType) *sampleRing {
+	r := &sampleRing{delta: delta}
+	r.reset()
+	if size <= 0 {
+		// Will initialize on first add.
+		return r
+	}
+	switch typ {
+	case chunkenc.ValFloat:
+		r.fBuf = make([]fSample, size)
+	case chunkenc.ValHistogram:
+		r.hBuf = make([]hSample, size)
+	case chunkenc.ValFloatHistogram:
+		r.fhBuf = make([]fhSample, size)
+	default:
+		// Do not initialize anything because the 1st sample will be
+		// added to one of the other bufs anyway.
+	}
 	return r
 }
 
@@ -166,54 +309,259 @@ func (r *sampleRing) reset() {
 	r.l = 0
 	r.i = -1
 	r.f = 0
+	r.bufInUse = noBuf
+
+	// The first sample after the reset will always go to a specialized
+	// buffer. If we later need to change to the interface buffer, we'll
+	// copy from the specialized buffer to the interface buffer. For that to
+	// work properly, we have to reset the interface buffer here, too.
+	r.iBuf = r.iBuf[:0]
 }
 
-// Returns the current iterator. Invalidates previously returned iterators.
-func (r *sampleRing) iterator() chunkenc.Iterator {
-	r.it.r = r
-	r.it.i = -1
+// Resets and returns the iterator. Invalidates previously returned iterators.
+func (r *sampleRing) iterator() *SampleRingIterator {
+	r.it.reset(r)
 	return &r.it
 }
 
-type sampleRingIterator struct {
-	r *sampleRing
-	i int
+// SampleRingIterator is returned by BufferedSeriesIterator.Buffer() and can be
+// used to iterate samples buffered in the lookback window.
+type SampleRingIterator struct {
+	r  *sampleRing
+	i  int
+	t  int64
+	f  float64
+	h  *histogram.Histogram
+	fh *histogram.FloatHistogram
+}
+
+func (it *SampleRingIterator) reset(r *sampleRing) {
+	it.r = r
+	it.i = -1
+	it.h = nil
+	it.fh = nil
 }
 
-func (it *sampleRingIterator) Next() bool {
+func (it *SampleRingIterator) Next() chunkenc.ValueType {
 	it.i++
-	return it.i < it.r.l
+	if it.i >= it.r.l {
+		return chunkenc.ValNone
+	}
+	switch it.r.bufInUse {
+	case fBuf:
+		s := it.r.atF(it.i)
+		it.t = s.t
+		it.f = s.f
+		return chunkenc.ValFloat
+	case hBuf:
+		s := it.r.atH(it.i)
+		it.t = s.t
+		it.h = s.h
+		return chunkenc.ValHistogram
+	case fhBuf:
+		s := it.r.atFH(it.i)
+		it.t = s.t
+		it.fh = s.fh
+		return chunkenc.ValFloatHistogram
+	}
+	s := it.r.at(it.i)
+	it.t = s.T()
+	switch s.Type() {
+	case chunkenc.ValHistogram:
+		it.h = s.H()
+		it.fh = nil
+		return chunkenc.ValHistogram
+	case chunkenc.ValFloatHistogram:
+		it.fh = s.FH()
+		it.h = nil
+		return chunkenc.ValFloatHistogram
+	default:
+		it.f = s.F()
+		return chunkenc.ValFloat
+	}
+}
+
+// At returns the current float element of the iterator.
+func (it *SampleRingIterator) At() (int64, float64) {
+	return it.t, it.f
 }
 
-func (it *sampleRingIterator) Seek(int64) bool {
-	return false
+// AtHistogram returns the current histogram element of the iterator.
+func (it *SampleRingIterator) AtHistogram() (int64, *histogram.Histogram) {
+	return it.t, it.h
 }
 
-func (it *sampleRingIterator) Err() error {
-	return nil
+// AtFloatHistogram returns the current histogram element of the iterator. If the
+// current sample is an integer histogram, it will be converted to a float histogram.
+// An optional histogram.FloatHistogram can be provided to avoid allocating a new
+// object for the conversion.
+func (it *SampleRingIterator) AtFloatHistogram(fh *histogram.FloatHistogram) (int64, *histogram.FloatHistogram) {
+	if it.fh == nil {
+		return it.t, it.h.ToFloat(fh)
+	}
+	if fh != nil {
+		it.fh.CopyTo(fh)
+		return it.t, fh
+	}
+	return it.t, it.fh.Copy()
 }
 
-func (it *sampleRingIterator) At() (int64, float64) {
-	return it.r.at(it.i)
+func (it *SampleRingIterator) AtT() int64 {
+	return it.t
 }
 
-func (r *sampleRing) at(i int) (int64, float64) {
-	j := (r.f + i) % len(r.buf)
-	s := r.buf[j]
-	return s.t, s.v
+func (r *sampleRing) at(i int) chunks.Sample {
+	j := (r.f + i) % len(r.iBuf)
+	return r.iBuf[j]
 }
 
-// add adds a sample to the ring buffer and frees all samples that fall
-// out of the delta range.
-func (r *sampleRing) add(t int64, v float64) {
-	l := len(r.buf)
+func (r *sampleRing) atF(i int) fSample {
+	j := (r.f + i) % len(r.fBuf)
+	return r.fBuf[j]
+}
+
+func (r *sampleRing) atH(i int) hSample {
+	j := (r.f + i) % len(r.hBuf)
+	return r.hBuf[j]
+}
+
+func (r *sampleRing) atFH(i int) fhSample {
+	j := (r.f + i) % len(r.fhBuf)
+	return r.fhBuf[j]
+}
+
+// add adds a sample to the ring buffer and frees all samples that fall out of
+// the delta range. Note that this method works for any sample
+// implementation. If you know you are dealing with one of the implementations
+// from this package (fSample, hSample, fhSample), call one of the specialized
+// methods addF, addH, or addFH for better performance.
+func (r *sampleRing) add(s chunks.Sample) {
+	if r.bufInUse == noBuf {
+		// First sample.
+		switch s := s.(type) {
+		case fSample:
+			r.bufInUse = fBuf
+			r.fBuf = addF(s, r.fBuf, r)
+		case hSample:
+			r.bufInUse = hBuf
+			r.hBuf = addH(s, r.hBuf, r)
+		case fhSample:
+			r.bufInUse = fhBuf
+			r.fhBuf = addFH(s, r.fhBuf, r)
+		}
+		return
+	}
+	if r.bufInUse != iBuf {
+		// Nothing added to the interface buf yet. Let's check if we can
+		// stay specialized.
+		switch s := s.(type) {
+		case fSample:
+			if r.bufInUse == fBuf {
+				r.fBuf = addF(s, r.fBuf, r)
+				return
+			}
+		case hSample:
+			if r.bufInUse == hBuf {
+				r.hBuf = addH(s, r.hBuf, r)
+				return
+			}
+		case fhSample:
+			if r.bufInUse == fhBuf {
+				r.fhBuf = addFH(s, r.fhBuf, r)
+				return
+			}
+		}
+		// The new sample isn't a fit for the already existing
+		// ones. Copy the latter into the interface buffer where needed.
+		// The interface buffer is assumed to be of length zero at this point.
+		switch r.bufInUse {
+		case fBuf:
+			for _, s := range r.fBuf {
+				r.iBuf = append(r.iBuf, s)
+			}
+			r.fBuf = nil
+		case hBuf:
+			for _, s := range r.hBuf {
+				r.iBuf = append(r.iBuf, s)
+			}
+			r.hBuf = nil
+		case fhBuf:
+			for _, s := range r.fhBuf {
+				r.iBuf = append(r.iBuf, s)
+			}
+			r.fhBuf = nil
+		}
+		r.bufInUse = iBuf
+	}
+	r.iBuf = addSample(s, r.iBuf, r)
+}
+
+// addF is a version of the add method specialized for fSample.
+func (r *sampleRing) addF(s fSample) {
+	switch r.bufInUse {
+	case fBuf: // Add to existing fSamples.
+		r.fBuf = addF(s, r.fBuf, r)
+	case noBuf: // Add first sample.
+		r.fBuf = addF(s, r.fBuf, r)
+		r.bufInUse = fBuf
+	case iBuf: // Already have interface samples. Add to the interface buf.
+		r.iBuf = addSample(s, r.iBuf, r)
+	default:
+		// Already have specialized samples that are not fSamples.
+		// Need to call the checked add method for conversion.
+		r.add(s)
+	}
+}
+
+// addH is a version of the add method specialized for hSample.
+func (r *sampleRing) addH(s hSample) {
+	switch r.bufInUse {
+	case hBuf: // Add to existing hSamples.
+		r.hBuf = addH(s, r.hBuf, r)
+	case noBuf: // Add first sample.
+		r.hBuf = addH(s, r.hBuf, r)
+		r.bufInUse = hBuf
+	case iBuf: // Already have interface samples. Add to the interface buf.
+		r.iBuf = addSample(s, r.iBuf, r)
+	default:
+		// Already have specialized samples that are not hSamples.
+		// Need to call the checked add method for conversion.
+		r.add(s)
+	}
+}
+
+// addFH is a version of the add method specialized for fhSample.
+func (r *sampleRing) addFH(s fhSample) {
+	switch r.bufInUse {
+	case fhBuf: // Add to existing fhSamples.
+		r.fhBuf = addFH(s, r.fhBuf, r)
+	case noBuf: // Add first sample.
+		r.fhBuf = addFH(s, r.fhBuf, r)
+		r.bufInUse = fhBuf
+	case iBuf: // Already have interface samples. Add to the interface buf.
+		r.iBuf = addSample(s, r.iBuf, r)
+	default:
+		// Already have specialized samples that are not fhSamples.
+		// Need to call the checked add method for conversion.
+		r.add(s)
+	}
+}
+
+// addSample adds a sample to a buffer of chunks.Sample, i.e. the general case
+// using an interface as the type.
+func addSample(s chunks.Sample, buf []chunks.Sample, r *sampleRing) []chunks.Sample {
+	l := len(buf)
 	// Grow the ring buffer if it fits no more elements.
+	if l == 0 {
+		buf = make([]chunks.Sample, 16)
+		l = 16
+	}
 	if l == r.l {
-		buf := make([]sample, 2*l)
-		copy(buf[l+r.f:], r.buf[r.f:])
-		copy(buf, r.buf[:r.f])
+		newBuf := make([]chunks.Sample, 2*l)
+		copy(newBuf[l+r.f:], buf[r.f:])
+		copy(newBuf, buf[:r.f])
 
-		r.buf = buf
+		buf = newBuf
 		r.i = r.f
 		r.f += l
 		l = 2 * l
@@ -224,18 +572,146 @@ func (r *sampleRing) add(t int64, v float64) {
 		}
 	}
 
-	r.buf[r.i] = sample{t: t, v: v}
+	buf[r.i] = s.Copy()
 	r.l++
 
 	// Free head of the buffer of samples that just fell out of the range.
-	tmin := t - r.delta
-	for r.buf[r.f].t < tmin {
+	tmin := s.T() - r.delta
+	for buf[r.f].T() < tmin {
 		r.f++
 		if r.f >= l {
 			r.f -= l
 		}
 		r.l--
 	}
+	return buf
+}
+
+// addF adds an fSample to a (specialized) fSample buffer.
+func addF(s fSample, buf []fSample, r *sampleRing) []fSample {
+	l := len(buf)
+	// Grow the ring buffer if it fits no more elements.
+	if l == 0 {
+		buf = make([]fSample, 16)
+		l = 16
+	}
+	if l == r.l {
+		newBuf := make([]fSample, 2*l)
+		copy(newBuf[l+r.f:], buf[r.f:])
+		copy(newBuf, buf[:r.f])
+
+		buf = newBuf
+		r.i = r.f
+		r.f += l
+		l = 2 * l
+	} else {
+		r.i++
+		if r.i >= l {
+			r.i -= l
+		}
+	}
+
+	buf[r.i] = s
+	r.l++
+
+	// Free head of the buffer of samples that just fell out of the range.
+	tmin := s.T() - r.delta
+	for buf[r.f].T() < tmin {
+		r.f++
+		if r.f >= l {
+			r.f -= l
+		}
+		r.l--
+	}
+	return buf
+}
+
+// addH adds an hSample to a (specialized) hSample buffer.
+func addH(s hSample, buf []hSample, r *sampleRing) []hSample {
+	l := len(buf)
+	// Grow the ring buffer if it fits no more elements.
+	if l == 0 {
+		buf = make([]hSample, 16)
+		l = 16
+	}
+	if l == r.l {
+		newBuf := make([]hSample, 2*l)
+		copy(newBuf[l+r.f:], buf[r.f:])
+		copy(newBuf, buf[:r.f])
+
+		buf = newBuf
+		r.i = r.f
+		r.f += l
+		l = 2 * l
+	} else {
+		r.i++
+		if r.i >= l {
+			r.i -= l
+		}
+	}
+
+	buf[r.i].t = s.t
+	if buf[r.i].h == nil {
+		buf[r.i].h = s.h.Copy()
+	} else {
+		s.h.CopyTo(buf[r.i].h)
+	}
+	r.l++
+
+	// Free head of the buffer of samples that just fell out of the range.
+	tmin := s.T() - r.delta
+	for buf[r.f].T() < tmin {
+		r.f++
+		if r.f >= l {
+			r.f -= l
+		}
+		r.l--
+	}
+	return buf
+}
+
+// addFH adds an fhSample to a (specialized) fhSample buffer.
+func addFH(s fhSample, buf []fhSample, r *sampleRing) []fhSample {
+	l := len(buf)
+	// Grow the ring buffer if it fits no more elements.
+	if l == 0 {
+		buf = make([]fhSample, 16)
+		l = 16
+	}
+	if l == r.l {
+		newBuf := make([]fhSample, 2*l)
+		copy(newBuf[l+r.f:], buf[r.f:])
+		copy(newBuf, buf[:r.f])
+
+		buf = newBuf
+		r.i = r.f
+		r.f += l
+		l = 2 * l
+	} else {
+		r.i++
+		if r.i >= l {
+			r.i -= l
+		}
+	}
+
+	buf[r.i].t = s.t
+	if buf[r.i].fh == nil {
+		buf[r.i].fh = s.fh.Copy()
+	} else {
+		s.fh.CopyTo(buf[r.i].fh)
+	}
+	r.l++
+
+	// Free head of the buffer of samples that just fell out of the range.
+	tmin := s.T() - r.delta
+	for buf[r.f].T() < tmin {
+		r.f++
+		if r.f >= l {
+			r.f -= l
+		}
+		r.l--
+	}
+	return buf
 }
 
 // reduceDelta lowers the buffered time delta, dropping any samples that are
@@ -250,40 +726,98 @@ func (r *sampleRing) reduceDelta(delta int64) bool {
 		return true
 	}
 
+	switch r.bufInUse {
+	case fBuf:
+		genericReduceDelta(r.fBuf, r)
+	case hBuf:
+		genericReduceDelta(r.hBuf, r)
+	case fhBuf:
+		genericReduceDelta(r.fhBuf, r)
+	default:
+		genericReduceDelta(r.iBuf, r)
+	}
+	return true
+}
+
+func genericReduceDelta[T chunks.Sample](buf []T, r *sampleRing) {
 	// Free head of the buffer of samples that just fell out of the range.
-	l := len(r.buf)
-	tmin := r.buf[r.i].t - delta
-	for r.buf[r.f].t < tmin {
+	l := len(buf)
+	tmin := buf[r.i].T() - r.delta
+	for buf[r.f].T() < tmin {
 		r.f++
 		if r.f >= l {
 			r.f -= l
 		}
 		r.l--
 	}
-	return true
 }
 
 // nthLast returns the nth most recent element added to the ring.
-func (r *sampleRing) nthLast(n int) (int64, float64, bool) {
+func (r *sampleRing) nthLast(n int) (chunks.Sample, bool) {
 	if n > r.l {
-		return 0, 0, false
+		return fSample{}, false
+	}
+	i := r.l - n
+	switch r.bufInUse {
+	case fBuf:
+		return r.atF(i), true
+	case hBuf:
+		return r.atH(i), true
+	case fhBuf:
+		return r.atFH(i), true
+	default:
+		return r.at(i), true
 	}
-	t, v := r.at(r.l - n)
-	return t, v, true
 }
 
-func (r *sampleRing) samples() []sample {
-	res := make([]sample, r.l)
+func (r *sampleRing) samples() []chunks.Sample {
+	res := make([]chunks.Sample, r.l)
 
-	var k = r.f + r.l
+	k := r.f + r.l
 	var j int
-	if k > len(r.buf) {
-		k = len(r.buf)
-		j = r.l - k + r.f
-	}
 
-	n := copy(res, r.buf[r.f:k])
-	copy(res[n:], r.buf[:j])
+	switch r.bufInUse {
+	case iBuf:
+		if k > len(r.iBuf) {
+			k = len(r.iBuf)
+			j = r.l - k + r.f
+		}
+		n := copy(res, r.iBuf[r.f:k])
+		copy(res[n:], r.iBuf[:j])
+	case fBuf:
+		if k > len(r.fBuf) {
+			k = len(r.fBuf)
+			j = r.l - k + r.f
+		}
+		resF := make([]fSample, r.l)
+		n := copy(resF, r.fBuf[r.f:k])
+		copy(resF[n:], r.fBuf[:j])
+		for i, s := range resF {
+			res[i] = s
+		}
+	case hBuf:
+		if k > len(r.hBuf) {
+			k = len(r.hBuf)
+			j = r.l - k + r.f
+		}
+		resH := make([]hSample, r.l)
+		n := copy(resH, r.hBuf[r.f:k])
+		copy(resH[n:], r.hBuf[:j])
+		for i, s := range resH {
+			res[i] = s
+		}
+	case fhBuf:
+		if k > len(r.fhBuf) {
+			k = len(r.fhBuf)
+			j = r.l - k + r.f
+		}
+		resFH := make([]fhSample, r.l)
+		n := copy(resFH, r.fhBuf[r.f:k])
+		copy(resFH[n:], r.fhBuf[:j])
+		for i, s := range resFH {
+			res[i] = s
+		}
+	}
 
 	return res
 }
diff --git a/vendor/github.com/prometheus/prometheus/storage/errors.go b/vendor/github.com/prometheus/prometheus/storage/errors.go
new file mode 100644
index 0000000000000000000000000000000000000000..dd48066db600ff72dabf7c7c5e388596c34b89d7
--- /dev/null
+++ b/vendor/github.com/prometheus/prometheus/storage/errors.go
@@ -0,0 +1,62 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package storage
+
+import "fmt"
+
+type errDuplicateSampleForTimestamp struct {
+	timestamp           int64
+	existing            float64
+	existingIsHistogram bool
+	newValue            float64
+}
+
+func NewDuplicateFloatErr(t int64, existing, newValue float64) error {
+	return errDuplicateSampleForTimestamp{
+		timestamp: t,
+		existing:  existing,
+		newValue:  newValue,
+	}
+}
+
+// NewDuplicateHistogramToFloatErr describes an error where a new float sample is sent for same timestamp as previous histogram.
+func NewDuplicateHistogramToFloatErr(t int64, newValue float64) error {
+	return errDuplicateSampleForTimestamp{
+		timestamp:           t,
+		existingIsHistogram: true,
+		newValue:            newValue,
+	}
+}
+
+func (e errDuplicateSampleForTimestamp) Error() string {
+	if e.timestamp == 0 {
+		return "duplicate sample for timestamp"
+	}
+	if e.existingIsHistogram {
+		return fmt.Sprintf("duplicate sample for timestamp %d; overrides not allowed: existing is a histogram, new value %g", e.timestamp, e.newValue)
+	}
+	return fmt.Sprintf("duplicate sample for timestamp %d; overrides not allowed: existing %g, new value %g", e.timestamp, e.existing, e.newValue)
+}
+
+// Is implements the anonymous interface checked by errors.Is.
+// Every errDuplicateSampleForTimestamp compares equal to the global ErrDuplicateSampleForTimestamp.
+func (e errDuplicateSampleForTimestamp) Is(t error) bool {
+	if t == ErrDuplicateSampleForTimestamp {
+		return true
+	}
+	if v, ok := t.(errDuplicateSampleForTimestamp); ok {
+		return e == v
+	}
+	return false
+}
diff --git a/vendor/github.com/prometheus/prometheus/storage/fanout.go b/vendor/github.com/prometheus/prometheus/storage/fanout.go
index 82e65e91b383d64c16ee9beeb9fbf09aae7f0052..4d076788a7cca8657bfc14cb1956a568ca5e5d82 100644
--- a/vendor/github.com/prometheus/prometheus/storage/fanout.go
+++ b/vendor/github.com/prometheus/prometheus/storage/fanout.go
@@ -15,17 +15,19 @@ package storage
 
 import (
 	"context"
+	"log/slog"
 
-	"github.com/go-kit/kit/log"
-	"github.com/go-kit/kit/log/level"
 	"github.com/prometheus/common/model"
 
-	"github.com/prometheus/prometheus/pkg/labels"
+	"github.com/prometheus/prometheus/model/exemplar"
+	"github.com/prometheus/prometheus/model/histogram"
+	"github.com/prometheus/prometheus/model/labels"
+	"github.com/prometheus/prometheus/model/metadata"
 	tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
 )
 
 type fanout struct {
-	logger log.Logger
+	logger *slog.Logger
 
 	primary     Storage
 	secondaries []Storage
@@ -40,7 +42,7 @@ type fanout struct {
 // and the error from the secondary querier will be returned as a warning.
 //
 // NOTE: In the case of Prometheus, it treats all remote storages as secondary / best effort.
-func NewFanout(logger log.Logger, primary Storage, secondaries ...Storage) Storage {
+func NewFanout(logger *slog.Logger, primary Storage, secondaries ...Storage) Storage {
 	return &fanout{
 		logger:      logger,
 		primary:     primary,
@@ -69,42 +71,42 @@ func (f *fanout) StartTime() (int64, error) {
 	return firstTime, nil
 }
 
-func (f *fanout) Querier(ctx context.Context, mint, maxt int64) (Querier, error) {
-	primary, err := f.primary.Querier(ctx, mint, maxt)
+func (f *fanout) Querier(mint, maxt int64) (Querier, error) {
+	primary, err := f.primary.Querier(mint, maxt)
 	if err != nil {
 		return nil, err
 	}
 
 	secondaries := make([]Querier, 0, len(f.secondaries))
 	for _, storage := range f.secondaries {
-		querier, err := storage.Querier(ctx, mint, maxt)
+		querier, err := storage.Querier(mint, maxt)
 		if err != nil {
 			// Close already open Queriers, append potential errors to returned error.
-			errs := tsdb_errors.MultiError{err}
-			errs.Add(primary.Close())
+			errs := tsdb_errors.NewMulti(err, primary.Close())
 			for _, q := range secondaries {
 				errs.Add(q.Close())
 			}
 			return nil, errs.Err()
 		}
-		secondaries = append(secondaries, querier)
+		if _, ok := querier.(noopQuerier); !ok {
+			secondaries = append(secondaries, querier)
+		}
 	}
 	return NewMergeQuerier([]Querier{primary}, secondaries, ChainedSeriesMerge), nil
 }
 
-func (f *fanout) ChunkQuerier(ctx context.Context, mint, maxt int64) (ChunkQuerier, error) {
-	primary, err := f.primary.ChunkQuerier(ctx, mint, maxt)
+func (f *fanout) ChunkQuerier(mint, maxt int64) (ChunkQuerier, error) {
+	primary, err := f.primary.ChunkQuerier(mint, maxt)
 	if err != nil {
 		return nil, err
 	}
 
 	secondaries := make([]ChunkQuerier, 0, len(f.secondaries))
 	for _, storage := range f.secondaries {
-		querier, err := storage.ChunkQuerier(ctx, mint, maxt)
+		querier, err := storage.ChunkQuerier(mint, maxt)
 		if err != nil {
 			// Close already open Queriers, append potential errors to returned error.
-			errs := tsdb_errors.MultiError{err}
-			errs.Add(primary.Close())
+			errs := tsdb_errors.NewMulti(err, primary.Close())
 			for _, q := range secondaries {
 				errs.Add(q.Close())
 			}
@@ -130,8 +132,7 @@ func (f *fanout) Appender(ctx context.Context) Appender {
 
 // Close closes the storage and all its underlying resources.
 func (f *fanout) Close() error {
-	errs := tsdb_errors.MultiError{}
-	errs.Add(f.primary.Close())
+	errs := tsdb_errors.NewMulti(f.primary.Close())
 	for _, s := range f.secondaries {
 		errs.Add(s.Close())
 	}
@@ -140,37 +141,104 @@ func (f *fanout) Close() error {
 
 // fanoutAppender implements Appender.
 type fanoutAppender struct {
-	logger log.Logger
+	logger *slog.Logger
 
 	primary     Appender
 	secondaries []Appender
 }
 
-func (f *fanoutAppender) Add(l labels.Labels, t int64, v float64) (uint64, error) {
-	ref, err := f.primary.Add(l, t, v)
+// SetOptions propagates the hints to both primary and secondary appenders.
+func (f *fanoutAppender) SetOptions(opts *AppendOptions) {
+	if f.primary != nil {
+		f.primary.SetOptions(opts)
+	}
+	for _, appender := range f.secondaries {
+		appender.SetOptions(opts)
+	}
+}
+
+func (f *fanoutAppender) Append(ref SeriesRef, l labels.Labels, t int64, v float64) (SeriesRef, error) {
+	ref, err := f.primary.Append(ref, l, t, v)
 	if err != nil {
 		return ref, err
 	}
 
 	for _, appender := range f.secondaries {
-		if _, err := appender.Add(l, t, v); err != nil {
+		if _, err := appender.Append(ref, l, t, v); err != nil {
 			return 0, err
 		}
 	}
 	return ref, nil
 }
 
-func (f *fanoutAppender) AddFast(ref uint64, t int64, v float64) error {
-	if err := f.primary.AddFast(ref, t, v); err != nil {
-		return err
+func (f *fanoutAppender) AppendExemplar(ref SeriesRef, l labels.Labels, e exemplar.Exemplar) (SeriesRef, error) {
+	ref, err := f.primary.AppendExemplar(ref, l, e)
+	if err != nil {
+		return ref, err
 	}
 
 	for _, appender := range f.secondaries {
-		if err := appender.AddFast(ref, t, v); err != nil {
-			return err
+		if _, err := appender.AppendExemplar(ref, l, e); err != nil {
+			return 0, err
 		}
 	}
-	return nil
+	return ref, nil
+}
+
+func (f *fanoutAppender) AppendHistogram(ref SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (SeriesRef, error) {
+	ref, err := f.primary.AppendHistogram(ref, l, t, h, fh)
+	if err != nil {
+		return ref, err
+	}
+
+	for _, appender := range f.secondaries {
+		if _, err := appender.AppendHistogram(ref, l, t, h, fh); err != nil {
+			return 0, err
+		}
+	}
+	return ref, nil
+}
+
+func (f *fanoutAppender) AppendHistogramCTZeroSample(ref SeriesRef, l labels.Labels, t, ct int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (SeriesRef, error) {
+	ref, err := f.primary.AppendHistogramCTZeroSample(ref, l, t, ct, h, fh)
+	if err != nil {
+		return ref, err
+	}
+
+	for _, appender := range f.secondaries {
+		if _, err := appender.AppendHistogramCTZeroSample(ref, l, t, ct, h, fh); err != nil {
+			return 0, err
+		}
+	}
+	return ref, nil
+}
+
+func (f *fanoutAppender) UpdateMetadata(ref SeriesRef, l labels.Labels, m metadata.Metadata) (SeriesRef, error) {
+	ref, err := f.primary.UpdateMetadata(ref, l, m)
+	if err != nil {
+		return ref, err
+	}
+
+	for _, appender := range f.secondaries {
+		if _, err := appender.UpdateMetadata(ref, l, m); err != nil {
+			return 0, err
+		}
+	}
+	return ref, nil
+}
+
+func (f *fanoutAppender) AppendCTZeroSample(ref SeriesRef, l labels.Labels, t, ct int64) (SeriesRef, error) {
+	ref, err := f.primary.AppendCTZeroSample(ref, l, t, ct)
+	if err != nil {
+		return ref, err
+	}
+
+	for _, appender := range f.secondaries {
+		if _, err := appender.AppendCTZeroSample(ref, l, t, ct); err != nil {
+			return 0, err
+		}
+	}
+	return ref, nil
 }
 
 func (f *fanoutAppender) Commit() (err error) {
@@ -181,7 +249,7 @@ func (f *fanoutAppender) Commit() (err error) {
 			err = appender.Commit()
 		} else {
 			if rollbackErr := appender.Rollback(); rollbackErr != nil {
-				level.Error(f.logger).Log("msg", "Squashed rollback error on commit", "err", rollbackErr)
+				f.logger.Error("Squashed rollback error on commit", "err", rollbackErr)
 			}
 		}
 	}
@@ -193,10 +261,11 @@ func (f *fanoutAppender) Rollback() (err error) {
 
 	for _, appender := range f.secondaries {
 		rollbackErr := appender.Rollback()
-		if err == nil {
+		switch {
+		case err == nil:
 			err = rollbackErr
-		} else if rollbackErr != nil {
-			level.Error(f.logger).Log("msg", "Squashed rollback error on rollback", "err", rollbackErr)
+		case rollbackErr != nil:
+			f.logger.Error("Squashed rollback error on rollback", "err", rollbackErr)
 		}
 	}
 	return nil
diff --git a/vendor/github.com/prometheus/prometheus/storage/generic.go b/vendor/github.com/prometheus/prometheus/storage/generic.go
index 817122bc12bb165b7df7530f91e2ab83a2a429ec..e5f4b4d03abf27f871062329ef5ed85f534ae3ed 100644
--- a/vendor/github.com/prometheus/prometheus/storage/generic.go
+++ b/vendor/github.com/prometheus/prometheus/storage/generic.go
@@ -17,19 +17,22 @@
 package storage
 
 import (
-	"github.com/prometheus/prometheus/pkg/labels"
+	"context"
+
+	"github.com/prometheus/prometheus/model/labels"
+	"github.com/prometheus/prometheus/util/annotations"
 )
 
 type genericQuerier interface {
 	LabelQuerier
-	Select(bool, *SelectHints, ...*labels.Matcher) genericSeriesSet
+	Select(context.Context, bool, *SelectHints, ...*labels.Matcher) genericSeriesSet
 }
 
 type genericSeriesSet interface {
 	Next() bool
 	At() Labels
 	Err() error
-	Warnings() Warnings
+	Warnings() annotations.Annotations
 }
 
 type genericSeriesMergeFunc func(...Labels) Labels
@@ -58,11 +61,11 @@ type genericQuerierAdapter struct {
 	cq ChunkQuerier
 }
 
-func (q *genericQuerierAdapter) Select(sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) genericSeriesSet {
+func (q *genericQuerierAdapter) Select(ctx context.Context, sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) genericSeriesSet {
 	if q.q != nil {
-		return &genericSeriesSetAdapter{q.q.Select(sortSeries, hints, matchers...)}
+		return &genericSeriesSetAdapter{q.q.Select(ctx, sortSeries, hints, matchers...)}
 	}
-	return &genericChunkSeriesSetAdapter{q.cq.Select(sortSeries, hints, matchers...)}
+	return &genericChunkSeriesSetAdapter{q.cq.Select(ctx, sortSeries, hints, matchers...)}
 }
 
 func newGenericQuerierFrom(q Querier) genericQuerier {
@@ -85,8 +88,8 @@ func (a *seriesSetAdapter) At() Series {
 	return a.genericSeriesSet.At().(Series)
 }
 
-func (q *querierAdapter) Select(sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) SeriesSet {
-	return &seriesSetAdapter{q.genericQuerier.Select(sortSeries, hints, matchers...)}
+func (q *querierAdapter) Select(ctx context.Context, sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) SeriesSet {
+	return &seriesSetAdapter{q.genericQuerier.Select(ctx, sortSeries, hints, matchers...)}
 }
 
 type chunkQuerierAdapter struct {
@@ -101,8 +104,8 @@ func (a *chunkSeriesSetAdapter) At() ChunkSeries {
 	return a.genericSeriesSet.At().(ChunkSeries)
 }
 
-func (q *chunkQuerierAdapter) Select(sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) ChunkSeriesSet {
-	return &chunkSeriesSetAdapter{q.genericQuerier.Select(sortSeries, hints, matchers...)}
+func (q *chunkQuerierAdapter) Select(ctx context.Context, sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) ChunkSeriesSet {
+	return &chunkSeriesSetAdapter{q.genericQuerier.Select(ctx, sortSeries, hints, matchers...)}
 }
 
 type seriesMergerAdapter struct {
@@ -137,4 +140,4 @@ func (noopGenericSeriesSet) At() Labels { return nil }
 
 func (noopGenericSeriesSet) Err() error { return nil }
 
-func (noopGenericSeriesSet) Warnings() Warnings { return nil }
+func (noopGenericSeriesSet) Warnings() annotations.Annotations { return nil }
diff --git a/vendor/github.com/prometheus/prometheus/storage/interface.go b/vendor/github.com/prometheus/prometheus/storage/interface.go
index a10178aeed6917efdda69ebb12c3d15f73ee359a..32b90cc10aade3e81395da51b9bfbd996ad478db 100644
--- a/vendor/github.com/prometheus/prometheus/storage/interface.go
+++ b/vendor/github.com/prometheus/prometheus/storage/interface.go
@@ -16,20 +16,49 @@ package storage
 import (
 	"context"
 	"errors"
+	"fmt"
 
-	"github.com/prometheus/prometheus/pkg/labels"
+	"github.com/prometheus/prometheus/model/exemplar"
+	"github.com/prometheus/prometheus/model/histogram"
+	"github.com/prometheus/prometheus/model/labels"
+	"github.com/prometheus/prometheus/model/metadata"
 	"github.com/prometheus/prometheus/tsdb/chunkenc"
 	"github.com/prometheus/prometheus/tsdb/chunks"
+	"github.com/prometheus/prometheus/util/annotations"
 )
 
 // The errors exposed.
 var (
-	ErrNotFound                    = errors.New("not found")
-	ErrOutOfOrderSample            = errors.New("out of order sample")
-	ErrDuplicateSampleForTimestamp = errors.New("duplicate sample for timestamp")
-	ErrOutOfBounds                 = errors.New("out of bounds")
+	ErrNotFound = errors.New("not found")
+	// ErrOutOfOrderSample is when out of order support is disabled and the sample is out of order.
+	ErrOutOfOrderSample = errors.New("out of order sample")
+	// ErrOutOfBounds is when out of order support is disabled and the sample is older than the min valid time for the append.
+	ErrOutOfBounds = errors.New("out of bounds")
+	// ErrTooOldSample is when out of order support is enabled but the sample is outside the time window allowed.
+	ErrTooOldSample = errors.New("too old sample")
+	// ErrDuplicateSampleForTimestamp is when the sample has same timestamp but different value.
+	ErrDuplicateSampleForTimestamp = errDuplicateSampleForTimestamp{}
+	ErrOutOfOrderExemplar          = errors.New("out of order exemplar")
+	ErrDuplicateExemplar           = errors.New("duplicate exemplar")
+	ErrExemplarLabelLength         = fmt.Errorf("label length for exemplar exceeds maximum of %d UTF-8 characters", exemplar.ExemplarMaxLabelSetLength)
+	ErrExemplarsDisabled           = errors.New("exemplar storage is disabled or max exemplars is less than or equal to 0")
+	ErrNativeHistogramsDisabled    = errors.New("native histograms are disabled")
+	ErrOOONativeHistogramsDisabled = errors.New("out-of-order native histogram ingestion is disabled")
+
+	// ErrOutOfOrderCT indicates failed append of CT to the storage
+	// due to CT being older the then newer sample.
+	// NOTE(bwplotka): This can be both an instrumentation failure or commonly expected
+	// behaviour, and we currently don't have a way to determine this. As a result
+	// it's recommended to ignore this error for now.
+	ErrOutOfOrderCT      = errors.New("created timestamp out of order, ignoring")
+	ErrCTNewerThanSample = errors.New("CT is newer or the same as sample's timestamp, ignoring")
 )
 
+// SeriesRef is a generic series reference. In prometheus it is either a
+// HeadSeriesRef or BlockSeriesRef, though other implementations may have
+// their own reference types.
+type SeriesRef uint64
+
 // Appendable allows creating appenders.
 type Appendable interface {
 	// Appender returns a new appender for the storage. The implementation
@@ -45,7 +74,7 @@ type SampleAndChunkQueryable interface {
 }
 
 // Storage ingests and manages samples, along with various indexes. All methods
-// are goroutine-safe. Storage implements storage.SampleAppender.
+// are goroutine-safe. Storage implements storage.Appender.
 type Storage interface {
 	SampleAndChunkQueryable
 	Appendable
@@ -57,11 +86,27 @@ type Storage interface {
 	Close() error
 }
 
+// ExemplarStorage ingests and manages exemplars, along with various indexes. All methods are
+// goroutine-safe. ExemplarStorage implements storage.ExemplarAppender and storage.ExemplarQuerier.
+type ExemplarStorage interface {
+	ExemplarQueryable
+	ExemplarAppender
+}
+
 // A Queryable handles queries against a storage.
 // Use it when you need to have access to all samples without chunk encoding abstraction e.g promQL.
 type Queryable interface {
 	// Querier returns a new Querier on the storage.
-	Querier(ctx context.Context, mint, maxt int64) (Querier, error)
+	Querier(mint, maxt int64) (Querier, error)
+}
+
+// A MockQueryable is used for testing purposes so that a mock Querier can be used.
+type MockQueryable struct {
+	MockQuerier Querier
+}
+
+func (q *MockQueryable) Querier(int64, int64) (Querier, error) {
+	return q.MockQuerier, nil
 }
 
 // Querier provides querying access over time series data of a fixed time range.
@@ -69,16 +114,39 @@ type Querier interface {
 	LabelQuerier
 
 	// Select returns a set of series that matches the given label matchers.
+	// Results are not checked whether they match. Results that do not match
+	// may cause undefined behavior.
 	// Caller can specify if it requires returned series to be sorted. Prefer not requiring sorting for better performance.
 	// It allows passing hints that can help in optimising select, but it's up to implementation how this is used if used at all.
-	Select(sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) SeriesSet
+	Select(ctx context.Context, sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) SeriesSet
+}
+
+// MockQuerier is used for test purposes to mock the selected series that is returned.
+type MockQuerier struct {
+	SelectMockFunction func(sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) SeriesSet
+}
+
+func (q *MockQuerier) LabelValues(context.Context, string, *LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
+	return nil, nil, nil
+}
+
+func (q *MockQuerier) LabelNames(context.Context, *LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
+	return nil, nil, nil
+}
+
+func (q *MockQuerier) Close() error {
+	return nil
+}
+
+func (q *MockQuerier) Select(_ context.Context, sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) SeriesSet {
+	return q.SelectMockFunction(sortSeries, hints, matchers...)
 }
 
 // A ChunkQueryable handles queries against a storage.
 // Use it when you need to have access to samples in encoded format.
 type ChunkQueryable interface {
 	// ChunkQuerier returns a new ChunkQuerier on the storage.
-	ChunkQuerier(ctx context.Context, mint, maxt int64) (ChunkQuerier, error)
+	ChunkQuerier(mint, maxt int64) (ChunkQuerier, error)
 }
 
 // ChunkQuerier provides querying access over time series data of a fixed time range.
@@ -86,65 +154,116 @@ type ChunkQuerier interface {
 	LabelQuerier
 
 	// Select returns a set of series that matches the given label matchers.
+	// Results are not checked whether they match. Results that do not match
+	// may cause undefined behavior.
 	// Caller can specify if it requires returned series to be sorted. Prefer not requiring sorting for better performance.
 	// It allows passing hints that can help in optimising select, but it's up to implementation how this is used if used at all.
-	Select(sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) ChunkSeriesSet
+	Select(ctx context.Context, sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) ChunkSeriesSet
 }
 
 // LabelQuerier provides querying access over labels.
 type LabelQuerier interface {
-	// LabelValues returns all potential values for a label name.
-	// It is not safe to use the strings beyond the lifefime of the querier.
-	LabelValues(name string) ([]string, Warnings, error)
+	// LabelValues returns all potential values for a label name in sorted order.
+	// It is not safe to use the strings beyond the lifetime of the querier.
+	// If matchers are specified the returned result set is reduced
+	// to label values of metrics matching the matchers.
+	LabelValues(ctx context.Context, name string, hints *LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error)
 
 	// LabelNames returns all the unique label names present in the block in sorted order.
-	LabelNames() ([]string, Warnings, error)
+	// If matchers are specified the returned result set is reduced
+	// to label names of metrics matching the matchers.
+	LabelNames(ctx context.Context, hints *LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error)
 
 	// Close releases the resources of the Querier.
 	Close() error
 }
 
+type ExemplarQueryable interface {
+	// ExemplarQuerier returns a new ExemplarQuerier on the storage.
+	ExemplarQuerier(ctx context.Context) (ExemplarQuerier, error)
+}
+
+// ExemplarQuerier provides reading access to time series data.
+type ExemplarQuerier interface {
+	// Select all the exemplars that match the matchers.
+	// Within a single slice of matchers, it is an intersection. Between the slices, it is a union.
+	Select(start, end int64, matchers ...[]*labels.Matcher) ([]exemplar.QueryResult, error)
+}
+
 // SelectHints specifies hints passed for data selections.
 // This is used only as an option for implementation to use.
 type SelectHints struct {
 	Start int64 // Start time in milliseconds for this select.
 	End   int64 // End time in milliseconds for this select.
 
+	// Maximum number of results returned. Use a value of 0 to disable.
+	Limit int
+
 	Step int64  // Query step size in milliseconds.
 	Func string // String representation of surrounding function or aggregation.
 
 	Grouping []string // List of label names used in aggregation.
 	By       bool     // Indicate whether it is without or by.
 	Range    int64    // Range vector selector range in milliseconds.
+
+	// ShardCount is the total number of shards that series should be split into
+	// at query time. Then, only series in the ShardIndex shard will be returned
+	// by the query.
+	//
+	// ShardCount equal to 0 means that sharding is disabled.
+	ShardCount uint64
+
+	// ShardIndex is the series shard index to query. The index must be between 0 and ShardCount-1.
+	// When ShardCount is set to a value > 0, then a query will only process series within the
+	// ShardIndex's shard.
+	//
+	// Series are sharded by "labels stable hash" mod "ShardCount".
+	ShardIndex uint64
+
+	// DisableTrimming allows to disable trimming of matching series chunks based on query Start and End time.
+	// When disabled, the result may contain samples outside the queried time range but Select() performances
+	// may be improved.
+	DisableTrimming bool
+}
+
+// LabelHints specifies hints passed for label reads.
+// This is used only as an option for implementation to use.
+type LabelHints struct {
+	// Maximum number of results returned. Use a value of 0 to disable.
+	Limit int
 }
 
-// TODO(bwplotka): Move to promql/engine_test.go?
 // QueryableFunc is an adapter to allow the use of ordinary functions as
 // Queryables. It follows the idea of http.HandlerFunc.
-type QueryableFunc func(ctx context.Context, mint, maxt int64) (Querier, error)
+// TODO(bwplotka): Move to promql/engine_test.go?
+type QueryableFunc func(mint, maxt int64) (Querier, error)
 
 // Querier calls f() with the given parameters.
-func (f QueryableFunc) Querier(ctx context.Context, mint, maxt int64) (Querier, error) {
-	return f(ctx, mint, maxt)
+func (f QueryableFunc) Querier(mint, maxt int64) (Querier, error) {
+	return f(mint, maxt)
+}
+
+type AppendOptions struct {
+	DiscardOutOfOrder bool
 }
 
 // Appender provides batched appends against a storage.
 // It must be completed with a call to Commit or Rollback and must not be reused afterwards.
 //
 // Operations on the Appender interface are not goroutine-safe.
+//
+// The type of samples (float64, histogram, etc) appended for a given series must remain same within an Appender.
+// The behaviour is undefined if samples of different types are appended to the same series in a single Commit().
 type Appender interface {
-	// Add adds a sample pair for the given series. A reference number is
-	// returned which can be used to add further samples in the same or later
-	// transactions.
+	// Append adds a sample pair for the given series.
+	// An optional series reference can be provided to accelerate calls.
+	// A series reference number is returned which can be used to add further
+	// samples to the given series in the same or later transactions.
 	// Returned reference numbers are ephemeral and may be rejected in calls
-	// to AddFast() at any point. Adding the sample via Add() returns a new
+	// to Append() at any point. Adding the sample via Append() returns a new
 	// reference number.
 	// If the reference is 0 it must not be used for caching.
-	Add(l labels.Labels, t int64, v float64) (uint64, error)
-
-	// AddFast adds a sample pair for the referenced series. It is generally
-	// faster than adding a sample by providing its full label set.
-	AddFast(ref uint64, t int64, v float64) error
+	Append(ref SeriesRef, l labels.Labels, t int64, v float64) (SeriesRef, error)
 
 	// Commit submits the collected samples and purges the batch. If Commit
 	// returns a non-nil error, it also rolls back all modifications made in
@@ -155,19 +274,115 @@ type Appender interface {
 	// Rollback rolls back all modifications made in the appender so far.
 	// Appender has to be discarded after rollback.
 	Rollback() error
+
+	// SetOptions configures the appender with specific append options such as
+	// discarding out-of-order samples even if out-of-order is enabled in the TSDB.
+	SetOptions(opts *AppendOptions)
+
+	ExemplarAppender
+	HistogramAppender
+	MetadataUpdater
+	CreatedTimestampAppender
+}
+
+// GetRef is an extra interface on Appenders used by downstream projects
+// (e.g. Cortex) to avoid maintaining a parallel set of references.
+type GetRef interface {
+	// Returns reference number that can be used to pass to Appender.Append(),
+	// and a set of labels that will not cause another copy when passed to Appender.Append().
+	// 0 means the appender does not have a reference to this series.
+	// hash should be a hash of lset.
+	GetRef(lset labels.Labels, hash uint64) (SeriesRef, labels.Labels)
+}
+
+// ExemplarAppender provides an interface for adding samples to exemplar storage, which
+// within Prometheus is in-memory only.
+type ExemplarAppender interface {
+	// AppendExemplar adds an exemplar for the given series labels.
+	// An optional reference number can be provided to accelerate calls.
+	// A reference number is returned which can be used to add further
+	// exemplars in the same or later transactions.
+	// Returned reference numbers are ephemeral and may be rejected in calls
+	// to Append() at any point. Adding the sample via Append() returns a new
+	// reference number.
+	// If the reference is 0 it must not be used for caching.
+	// Note that in our current implementation of Prometheus' exemplar storage
+	// calls to Append should generate the reference numbers, AppendExemplar
+	// generating a new reference number should be considered possible erroneous behaviour and be logged.
+	AppendExemplar(ref SeriesRef, l labels.Labels, e exemplar.Exemplar) (SeriesRef, error)
+}
+
+// HistogramAppender provides an interface for appending histograms to the storage.
+type HistogramAppender interface {
+	// AppendHistogram adds a histogram for the given series labels. An
+	// optional reference number can be provided to accelerate calls. A
+	// reference number is returned which can be used to add further
+	// histograms in the same or later transactions. Returned reference
+	// numbers are ephemeral and may be rejected in calls to Append() at any
+	// point. Adding the sample via Append() returns a new reference number.
+	// If the reference is 0, it must not be used for caching.
+	//
+	// For efficiency reasons, the histogram is passed as a
+	// pointer. AppendHistogram won't mutate the histogram, but in turn
+	// depends on the caller to not mutate it either.
+	AppendHistogram(ref SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (SeriesRef, error)
+	// AppendHistogramCTZeroSample adds synthetic zero sample for the given ct timestamp,
+	// which will be associated with given series, labels and the incoming
+	// sample's t (timestamp). AppendHistogramCTZeroSample returns error if zero sample can't be
+	// appended, for example when ct is too old, or when it would collide with
+	// incoming sample (sample has priority).
+	//
+	// AppendHistogramCTZeroSample has to be called before the corresponding histogram AppendHistogram.
+	// A series reference number is returned which can be used to modify the
+	// CT for the given series in the same or later transactions.
+	// Returned reference numbers are ephemeral and may be rejected in calls
+	// to AppendHistogramCTZeroSample() at any point.
+	//
+	// If the reference is 0 it must not be used for caching.
+	AppendHistogramCTZeroSample(ref SeriesRef, l labels.Labels, t, ct int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (SeriesRef, error)
+}
+
+// MetadataUpdater provides an interface for associating metadata to stored series.
+type MetadataUpdater interface {
+	// UpdateMetadata updates a metadata entry for the given series and labels.
+	// A series reference number is returned which can be used to modify the
+	// metadata of the given series in the same or later transactions.
+	// Returned reference numbers are ephemeral and may be rejected in calls
+	// to UpdateMetadata() at any point. If the series does not exist,
+	// UpdateMetadata returns an error.
+	// If the reference is 0 it must not be used for caching.
+	UpdateMetadata(ref SeriesRef, l labels.Labels, m metadata.Metadata) (SeriesRef, error)
+}
+
+// CreatedTimestampAppender provides an interface for appending CT to storage.
+type CreatedTimestampAppender interface {
+	// AppendCTZeroSample adds synthetic zero sample for the given ct timestamp,
+	// which will be associated with given series, labels and the incoming
+	// sample's t (timestamp). AppendCTZeroSample returns error if zero sample can't be
+	// appended, for example when ct is too old, or when it would collide with
+	// incoming sample (sample has priority).
+	//
+	// AppendCTZeroSample has to be called before the corresponding sample Append.
+	// A series reference number is returned which can be used to modify the
+	// CT for the given series in the same or later transactions.
+	// Returned reference numbers are ephemeral and may be rejected in calls
+	// to AppendCTZeroSample() at any point.
+	//
+	// If the reference is 0 it must not be used for caching.
+	AppendCTZeroSample(ref SeriesRef, l labels.Labels, t, ct int64) (SeriesRef, error)
 }
 
 // SeriesSet contains a set of series.
 type SeriesSet interface {
 	Next() bool
-	// At returns full series. Returned series should be iteratable even after Next is called.
+	// At returns full series. Returned series should be iterable even after Next is called.
 	At() Series
 	// The error that iteration as failed with.
 	// When an error occurs, set cannot continue to iterate.
 	Err() error
 	// A collection of warnings for the whole set.
 	// Warnings could be return even iteration has not failed with error.
-	Warnings() Warnings
+	Warnings() annotations.Annotations
 }
 
 var emptySeriesSet = errSeriesSet{}
@@ -177,14 +392,28 @@ func EmptySeriesSet() SeriesSet {
 	return emptySeriesSet
 }
 
+type testSeriesSet struct {
+	series Series
+}
+
+func (s testSeriesSet) Next() bool                        { return true }
+func (s testSeriesSet) At() Series                        { return s.series }
+func (s testSeriesSet) Err() error                        { return nil }
+func (s testSeriesSet) Warnings() annotations.Annotations { return nil }
+
+// TestSeriesSet returns a mock series set.
+func TestSeriesSet(series Series) SeriesSet {
+	return testSeriesSet{series: series}
+}
+
 type errSeriesSet struct {
 	err error
 }
 
-func (s errSeriesSet) Next() bool         { return false }
-func (s errSeriesSet) At() Series         { return nil }
-func (s errSeriesSet) Err() error         { return s.err }
-func (s errSeriesSet) Warnings() Warnings { return nil }
+func (s errSeriesSet) Next() bool                        { return false }
+func (s errSeriesSet) At() Series                        { return nil }
+func (s errSeriesSet) Err() error                        { return s.err }
+func (s errSeriesSet) Warnings() annotations.Annotations { return nil }
 
 // ErrSeriesSet returns a series set that wraps an error.
 func ErrSeriesSet(err error) SeriesSet {
@@ -202,10 +431,10 @@ type errChunkSeriesSet struct {
 	err error
 }
 
-func (s errChunkSeriesSet) Next() bool         { return false }
-func (s errChunkSeriesSet) At() ChunkSeries    { return nil }
-func (s errChunkSeriesSet) Err() error         { return s.err }
-func (s errChunkSeriesSet) Warnings() Warnings { return nil }
+func (s errChunkSeriesSet) Next() bool                        { return false }
+func (s errChunkSeriesSet) At() ChunkSeries                   { return nil }
+func (s errChunkSeriesSet) Err() error                        { return s.err }
+func (s errChunkSeriesSet) Warnings() annotations.Annotations { return nil }
 
 // ErrChunkSeriesSet returns a chunk series set that wraps an error.
 func ErrChunkSeriesSet(err error) ChunkSeriesSet {
@@ -215,26 +444,49 @@ func ErrChunkSeriesSet(err error) ChunkSeriesSet {
 // Series exposes a single time series and allows iterating over samples.
 type Series interface {
 	Labels
-	SampleIteratable
+	SampleIterable
+}
+
+type mockSeries struct {
+	timestamps []int64
+	values     []float64
+	labelSet   []string
+}
+
+func (s mockSeries) Labels() labels.Labels {
+	return labels.FromStrings(s.labelSet...)
+}
+
+func (s mockSeries) Iterator(chunkenc.Iterator) chunkenc.Iterator {
+	return chunkenc.MockSeriesIterator(s.timestamps, s.values)
+}
+
+// MockSeries returns a series with custom timestamps, values and labelSet.
+func MockSeries(timestamps []int64, values []float64, labelSet []string) Series {
+	return mockSeries{
+		timestamps: timestamps,
+		values:     values,
+		labelSet:   labelSet,
+	}
 }
 
 // ChunkSeriesSet contains a set of chunked series.
 type ChunkSeriesSet interface {
 	Next() bool
-	// At returns full chunk series. Returned series should be iteratable even after Next is called.
+	// At returns full chunk series. Returned series should be iterable even after Next is called.
 	At() ChunkSeries
 	// The error that iteration has failed with.
 	// When an error occurs, set cannot continue to iterate.
 	Err() error
 	// A collection of warnings for the whole set.
 	// Warnings could be return even iteration has not failed with error.
-	Warnings() Warnings
+	Warnings() annotations.Annotations
 }
 
 // ChunkSeries exposes a single time series and allows iterating over chunks.
 type ChunkSeries interface {
 	Labels
-	ChunkIteratable
+	ChunkIterable
 }
 
 // Labels represents an item that has labels e.g. time series.
@@ -243,15 +495,16 @@ type Labels interface {
 	Labels() labels.Labels
 }
 
-type SampleIteratable interface {
-	// Iterator returns a new, independent iterator of the data of the series.
-	Iterator() chunkenc.Iterator
+type SampleIterable interface {
+	// Iterator returns an iterator of the data of the series.
+	// The iterator passed as argument is for re-use, if not nil.
+	// Depending on implementation, the iterator can
+	// be re-used or a new iterator can be allocated.
+	Iterator(chunkenc.Iterator) chunkenc.Iterator
 }
 
-type ChunkIteratable interface {
-	// Iterator returns a new, independent iterator that iterates over potentially overlapping
+type ChunkIterable interface {
+	// Iterator returns an iterator that iterates over potentially overlapping
 	// chunks of the series, sorted by min time.
-	Iterator() chunks.Iterator
+	Iterator(chunks.Iterator) chunks.Iterator
 }
-
-type Warnings []error
diff --git a/vendor/github.com/prometheus/prometheus/storage/lazy.go b/vendor/github.com/prometheus/prometheus/storage/lazy.go
index 62f76cb6ac5480af16e774e6bd50cc934aa33957..fab974c2863ec9c398465b1d0a2c3427fa22cc3c 100644
--- a/vendor/github.com/prometheus/prometheus/storage/lazy.go
+++ b/vendor/github.com/prometheus/prometheus/storage/lazy.go
@@ -13,6 +13,10 @@
 
 package storage
 
+import (
+	"github.com/prometheus/prometheus/util/annotations"
+)
+
 // lazyGenericSeriesSet is a wrapped series set that is initialised on first call to Next().
 type lazyGenericSeriesSet struct {
 	init func() (genericSeriesSet, bool)
@@ -43,25 +47,25 @@ func (c *lazyGenericSeriesSet) At() Labels {
 	return nil
 }
 
-func (c *lazyGenericSeriesSet) Warnings() Warnings {
+func (c *lazyGenericSeriesSet) Warnings() annotations.Annotations {
 	if c.set != nil {
 		return c.set.Warnings()
 	}
 	return nil
 }
 
-type warningsOnlySeriesSet Warnings
+type warningsOnlySeriesSet annotations.Annotations
 
-func (warningsOnlySeriesSet) Next() bool           { return false }
-func (warningsOnlySeriesSet) Err() error           { return nil }
-func (warningsOnlySeriesSet) At() Labels           { return nil }
-func (c warningsOnlySeriesSet) Warnings() Warnings { return Warnings(c) }
+func (warningsOnlySeriesSet) Next() bool                          { return false }
+func (warningsOnlySeriesSet) Err() error                          { return nil }
+func (warningsOnlySeriesSet) At() Labels                          { return nil }
+func (c warningsOnlySeriesSet) Warnings() annotations.Annotations { return annotations.Annotations(c) }
 
 type errorOnlySeriesSet struct {
 	err error
 }
 
-func (errorOnlySeriesSet) Next() bool         { return false }
-func (errorOnlySeriesSet) At() Labels         { return nil }
-func (s errorOnlySeriesSet) Err() error       { return s.err }
-func (errorOnlySeriesSet) Warnings() Warnings { return nil }
+func (errorOnlySeriesSet) Next() bool                        { return false }
+func (errorOnlySeriesSet) At() Labels                        { return nil }
+func (s errorOnlySeriesSet) Err() error                      { return s.err }
+func (errorOnlySeriesSet) Warnings() annotations.Annotations { return nil }
diff --git a/vendor/github.com/prometheus/prometheus/storage/memoized_iterator.go b/vendor/github.com/prometheus/prometheus/storage/memoized_iterator.go
new file mode 100644
index 0000000000000000000000000000000000000000..273b3caa1db9a7711500558148ae9aca1fd30f4b
--- /dev/null
+++ b/vendor/github.com/prometheus/prometheus/storage/memoized_iterator.go
@@ -0,0 +1,147 @@
+// Copyright 2021 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package storage
+
+import (
+	"math"
+
+	"github.com/prometheus/prometheus/model/histogram"
+	"github.com/prometheus/prometheus/tsdb/chunkenc"
+)
+
+// MemoizedSeriesIterator wraps an iterator with a buffer to look back the previous element.
+//
+// This iterator regards integer histograms as float histograms; calls to Seek() will never return chunkenc.Histogram.
+// This iterator deliberately does not implement chunkenc.Iterator.
+type MemoizedSeriesIterator struct {
+	it    chunkenc.Iterator
+	delta int64
+
+	lastTime  int64
+	valueType chunkenc.ValueType
+
+	// Keep track of the previously returned value.
+	prevTime           int64
+	prevValue          float64
+	prevFloatHistogram *histogram.FloatHistogram
+}
+
+// NewMemoizedEmptyIterator is like NewMemoizedIterator but it's initialised with an empty iterator.
+func NewMemoizedEmptyIterator(delta int64) *MemoizedSeriesIterator {
+	return NewMemoizedIterator(chunkenc.NewNopIterator(), delta)
+}
+
+// NewMemoizedIterator returns a new iterator that buffers the values within the
+// time range of the current element and the duration of delta before.
+func NewMemoizedIterator(it chunkenc.Iterator, delta int64) *MemoizedSeriesIterator {
+	bit := &MemoizedSeriesIterator{
+		delta:    delta,
+		prevTime: math.MinInt64,
+	}
+	bit.Reset(it)
+
+	return bit
+}
+
+// Reset the internal state to reuse the wrapper with the provided iterator.
+func (b *MemoizedSeriesIterator) Reset(it chunkenc.Iterator) {
+	b.it = it
+	b.lastTime = math.MinInt64
+	b.prevTime = math.MinInt64
+	b.valueType = it.Next()
+}
+
+// PeekPrev returns the previous element of the iterator. If there is none buffered,
+// ok is false.
+func (b *MemoizedSeriesIterator) PeekPrev() (t int64, v float64, fh *histogram.FloatHistogram, ok bool) {
+	if b.prevTime == math.MinInt64 {
+		return 0, 0, nil, false
+	}
+	return b.prevTime, b.prevValue, b.prevFloatHistogram, true
+}
+
+// Seek advances the iterator to the element at time t or greater.
+func (b *MemoizedSeriesIterator) Seek(t int64) chunkenc.ValueType {
+	t0 := t - b.delta
+
+	if b.valueType != chunkenc.ValNone && t0 > b.lastTime {
+		// Reset the previously stored element because the seek advanced
+		// more than the delta.
+		b.prevTime = math.MinInt64
+
+		b.valueType = b.it.Seek(t0)
+		switch b.valueType {
+		case chunkenc.ValNone:
+			return chunkenc.ValNone
+		case chunkenc.ValHistogram:
+			b.valueType = chunkenc.ValFloatHistogram
+		}
+		b.lastTime = b.it.AtT()
+	}
+	if b.lastTime >= t {
+		return b.valueType
+	}
+	for b.Next() != chunkenc.ValNone {
+		if b.lastTime >= t {
+			return b.valueType
+		}
+	}
+
+	return chunkenc.ValNone
+}
+
+// Next advances the iterator to the next element. Note that this does not check whether the element being buffered is
+// within the time range of the current element and the duration of delta before.
+func (b *MemoizedSeriesIterator) Next() chunkenc.ValueType {
+	// Keep track of the previous element.
+	switch b.valueType {
+	case chunkenc.ValNone:
+		return chunkenc.ValNone
+	case chunkenc.ValFloat:
+		b.prevTime, b.prevValue = b.it.At()
+		b.prevFloatHistogram = nil
+	case chunkenc.ValHistogram, chunkenc.ValFloatHistogram:
+		b.prevValue = 0
+		b.prevTime, b.prevFloatHistogram = b.it.AtFloatHistogram(nil)
+	}
+
+	b.valueType = b.it.Next()
+	if b.valueType != chunkenc.ValNone {
+		b.lastTime = b.it.AtT()
+	}
+	if b.valueType == chunkenc.ValHistogram {
+		b.valueType = chunkenc.ValFloatHistogram
+	}
+	return b.valueType
+}
+
+// At returns the current float element of the iterator.
+func (b *MemoizedSeriesIterator) At() (int64, float64) {
+	return b.it.At()
+}
+
+// AtFloatHistogram returns the current float-histogram element of the iterator.
+func (b *MemoizedSeriesIterator) AtFloatHistogram() (int64, *histogram.FloatHistogram) {
+	return b.it.AtFloatHistogram(nil)
+}
+
+// AtT returns the timestamp of the current element of the iterator.
+func (b *MemoizedSeriesIterator) AtT() int64 {
+	return b.it.AtT()
+}
+
+// Err returns the last encountered error.
+func (b *MemoizedSeriesIterator) Err() error {
+	return b.it.Err()
+}
diff --git a/vendor/github.com/prometheus/prometheus/storage/merge.go b/vendor/github.com/prometheus/prometheus/storage/merge.go
index 1670a119a13962d9435a2aec2f6b6c5a8b7cd9c1..bc70ceea55e5f46ff7d64392e11f256438c3ecda 100644
--- a/vendor/github.com/prometheus/prometheus/storage/merge.go
+++ b/vendor/github.com/prometheus/prometheus/storage/merge.go
@@ -16,17 +16,17 @@ package storage
 import (
 	"bytes"
 	"container/heap"
+	"context"
+	"fmt"
 	"math"
-	"sort"
-	"strings"
 	"sync"
 
-	"github.com/pkg/errors"
-
-	"github.com/prometheus/prometheus/pkg/labels"
+	"github.com/prometheus/prometheus/model/histogram"
+	"github.com/prometheus/prometheus/model/labels"
 	"github.com/prometheus/prometheus/tsdb/chunkenc"
 	"github.com/prometheus/prometheus/tsdb/chunks"
 	tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
+	"github.com/prometheus/prometheus/util/annotations"
 )
 
 type mergeGenericQuerier struct {
@@ -43,17 +43,25 @@ type mergeGenericQuerier struct {
 // See NewFanout commentary to learn more about primary vs secondary differences.
 //
 // In case of overlaps between the data given by primaries' and secondaries' Selects, merge function will be used.
-func NewMergeQuerier(primaries []Querier, secondaries []Querier, mergeFn VerticalSeriesMergeFunc) Querier {
+func NewMergeQuerier(primaries, secondaries []Querier, mergeFn VerticalSeriesMergeFunc) Querier {
+	primaries = filterQueriers(primaries)
+	secondaries = filterQueriers(secondaries)
+
+	switch {
+	case len(primaries) == 0 && len(secondaries) == 0:
+		return noopQuerier{}
+	case len(primaries) == 1 && len(secondaries) == 0:
+		return primaries[0]
+	case len(primaries) == 0 && len(secondaries) == 1:
+		return &querierAdapter{newSecondaryQuerierFrom(secondaries[0])}
+	}
+
 	queriers := make([]genericQuerier, 0, len(primaries)+len(secondaries))
 	for _, q := range primaries {
-		if _, ok := q.(noopQuerier); !ok && q != nil {
-			queriers = append(queriers, newGenericQuerierFrom(q))
-		}
+		queriers = append(queriers, newGenericQuerierFrom(q))
 	}
 	for _, q := range secondaries {
-		if _, ok := q.(noopQuerier); !ok && q != nil {
-			queriers = append(queriers, newSecondaryQuerierFrom(q))
-		}
+		queriers = append(queriers, newSecondaryQuerierFrom(q))
 	}
 
 	concurrentSelect := false
@@ -67,22 +75,40 @@ func NewMergeQuerier(primaries []Querier, secondaries []Querier, mergeFn Vertica
 	}}
 }
 
+func filterQueriers(qs []Querier) []Querier {
+	ret := make([]Querier, 0, len(qs))
+	for _, q := range qs {
+		if _, ok := q.(noopQuerier); !ok && q != nil {
+			ret = append(ret, q)
+		}
+	}
+	return ret
+}
+
 // NewMergeChunkQuerier returns a new Chunk Querier that merges results of given primary and secondary chunk queriers.
 // See NewFanout commentary to learn more about primary vs secondary differences.
 //
 // In case of overlaps between the data given by primaries' and secondaries' Selects, merge function will be used.
 // TODO(bwplotka): Currently merge will compact overlapping chunks with bigger chunk, without limit. Split it: https://github.com/prometheus/tsdb/issues/670
-func NewMergeChunkQuerier(primaries []ChunkQuerier, secondaries []ChunkQuerier, mergeFn VerticalChunkSeriesMergeFunc) ChunkQuerier {
+func NewMergeChunkQuerier(primaries, secondaries []ChunkQuerier, mergeFn VerticalChunkSeriesMergeFunc) ChunkQuerier {
+	primaries = filterChunkQueriers(primaries)
+	secondaries = filterChunkQueriers(secondaries)
+
+	switch {
+	case len(primaries) == 0 && len(secondaries) == 0:
+		return noopChunkQuerier{}
+	case len(primaries) == 1 && len(secondaries) == 0:
+		return primaries[0]
+	case len(primaries) == 0 && len(secondaries) == 1:
+		return &chunkQuerierAdapter{newSecondaryQuerierFromChunk(secondaries[0])}
+	}
+
 	queriers := make([]genericQuerier, 0, len(primaries)+len(secondaries))
 	for _, q := range primaries {
-		if _, ok := q.(noopChunkQuerier); !ok && q != nil {
-			queriers = append(queriers, newGenericQuerierFromChunk(q))
-		}
+		queriers = append(queriers, newGenericQuerierFromChunk(q))
 	}
-	for _, querier := range secondaries {
-		if _, ok := querier.(noopChunkQuerier); !ok && querier != nil {
-			queriers = append(queriers, newSecondaryQuerierFromChunk(querier))
-		}
+	for _, q := range secondaries {
+		queriers = append(queriers, newSecondaryQuerierFromChunk(q))
 	}
 
 	concurrentSelect := false
@@ -96,23 +122,30 @@ func NewMergeChunkQuerier(primaries []ChunkQuerier, secondaries []ChunkQuerier,
 	}}
 }
 
-// Select returns a set of series that matches the given label matchers.
-func (q *mergeGenericQuerier) Select(sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) genericSeriesSet {
-	if len(q.queriers) == 0 {
-		return noopGenericSeriesSet{}
-	}
-	if len(q.queriers) == 1 {
-		return q.queriers[0].Select(sortSeries, hints, matchers...)
+func filterChunkQueriers(qs []ChunkQuerier) []ChunkQuerier {
+	ret := make([]ChunkQuerier, 0, len(qs))
+	for _, q := range qs {
+		if _, ok := q.(noopChunkQuerier); !ok && q != nil {
+			ret = append(ret, q)
+		}
 	}
+	return ret
+}
 
-	var seriesSets = make([]genericSeriesSet, 0, len(q.queriers))
+// Select returns a set of series that matches the given label matchers.
+func (q *mergeGenericQuerier) Select(ctx context.Context, _ bool, hints *SelectHints, matchers ...*labels.Matcher) genericSeriesSet {
+	seriesSets := make([]genericSeriesSet, 0, len(q.queriers))
+	var limit int
+	if hints != nil {
+		limit = hints.Limit
+	}
 	if !q.concurrentSelect {
 		for _, querier := range q.queriers {
 			// We need to sort for merge  to work.
-			seriesSets = append(seriesSets, querier.Select(true, hints, matchers...))
+			seriesSets = append(seriesSets, querier.Select(ctx, true, hints, matchers...))
 		}
 		return &lazyGenericSeriesSet{init: func() (genericSeriesSet, bool) {
-			s := newGenericMergeSeriesSet(seriesSets, q.mergeFn)
+			s := newGenericMergeSeriesSet(seriesSets, limit, q.mergeFn)
 			return s, s.Next()
 		}}
 	}
@@ -123,13 +156,18 @@ func (q *mergeGenericQuerier) Select(sortSeries bool, hints *SelectHints, matche
 	)
 	// Schedule all Selects for all queriers we know about.
 	for _, querier := range q.queriers {
+		// copy the matchers as some queriers may alter the slice.
+		// See https://github.com/prometheus/prometheus/issues/14723
+		matchersCopy := make([]*labels.Matcher, len(matchers))
+		copy(matchersCopy, matchers)
+
 		wg.Add(1)
-		go func(qr genericQuerier) {
+		go func(qr genericQuerier, m []*labels.Matcher) {
 			defer wg.Done()
 
 			// We need to sort for NewMergeSeriesSet to work.
-			seriesSetChan <- qr.Select(true, hints, matchers...)
-		}(querier)
+			seriesSetChan <- qr.Select(ctx, true, hints, m...)
+		}(querier, matchersCopy)
 	}
 	go func() {
 		wg.Wait()
@@ -140,7 +178,7 @@ func (q *mergeGenericQuerier) Select(sortSeries bool, hints *SelectHints, matche
 		seriesSets = append(seriesSets, r)
 	}
 	return &lazyGenericSeriesSet{init: func() (genericSeriesSet, bool) {
-		s := newGenericMergeSeriesSet(seriesSets, q.mergeFn)
+		s := newGenericMergeSeriesSet(seriesSets, limit, q.mergeFn)
 		return s, s.Next()
 	}}
 }
@@ -155,36 +193,47 @@ func (l labelGenericQueriers) SplitByHalf() (labelGenericQueriers, labelGenericQ
 }
 
 // LabelValues returns all potential values for a label name.
-func (q *mergeGenericQuerier) LabelValues(name string) ([]string, Warnings, error) {
-	res, ws, err := q.lvals(q.queriers, name)
+// If matchers are specified the returned result set is reduced
+// to label values of metrics matching the matchers.
+func (q *mergeGenericQuerier) LabelValues(ctx context.Context, name string, hints *LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
+	res, ws, err := q.mergeResults(q.queriers, hints, func(q LabelQuerier) ([]string, annotations.Annotations, error) {
+		return q.LabelValues(ctx, name, hints, matchers...)
+	})
 	if err != nil {
-		return nil, nil, errors.Wrapf(err, "LabelValues() from merge generic querier for label %s", name)
+		return nil, nil, fmt.Errorf("LabelValues() from merge generic querier for label %s: %w", name, err)
 	}
 	return res, ws, nil
 }
 
-// lvals performs merge sort for LabelValues from multiple queriers.
-func (q *mergeGenericQuerier) lvals(lq labelGenericQueriers, n string) ([]string, Warnings, error) {
+// mergeResults performs merge sort on the results of invoking the resultsFn against multiple queriers.
+func (q *mergeGenericQuerier) mergeResults(lq labelGenericQueriers, hints *LabelHints, resultsFn func(q LabelQuerier) ([]string, annotations.Annotations, error)) ([]string, annotations.Annotations, error) {
 	if lq.Len() == 0 {
 		return nil, nil, nil
 	}
 	if lq.Len() == 1 {
-		return lq.Get(0).LabelValues(n)
+		return resultsFn(lq.Get(0))
 	}
 	a, b := lq.SplitByHalf()
 
-	var ws Warnings
-	s1, w, err := q.lvals(a, n)
-	ws = append(ws, w...)
+	var ws annotations.Annotations
+	s1, w, err := q.mergeResults(a, hints, resultsFn)
+	ws.Merge(w)
 	if err != nil {
 		return nil, ws, err
 	}
-	s2, ws, err := q.lvals(b, n)
-	ws = append(ws, w...)
+	s2, w, err := q.mergeResults(b, hints, resultsFn)
+	ws.Merge(w)
 	if err != nil {
 		return nil, ws, err
 	}
-	return mergeStrings(s1, s2), ws, nil
+
+	s1 = truncateToLimit(s1, hints)
+	s2 = truncateToLimit(s2, hints)
+
+	merged := mergeStrings(s1, s2)
+	merged = truncateToLimit(merged, hints)
+
+	return merged, ws, nil
 }
 
 func mergeStrings(a, b []string) []string {
@@ -195,15 +244,14 @@ func mergeStrings(a, b []string) []string {
 	res := make([]string, 0, maxl*10/9)
 
 	for len(a) > 0 && len(b) > 0 {
-		d := strings.Compare(a[0], b[0])
-
-		if d == 0 {
+		switch {
+		case a[0] == b[0]:
 			res = append(res, a[0])
 			a, b = a[1:], b[1:]
-		} else if d < 0 {
+		case a[0] < b[0]:
 			res = append(res, a[0])
 			a = a[1:]
-		} else if d > 0 {
+		default:
 			res = append(res, b[0])
 			b = b[1:]
 		}
@@ -216,39 +264,19 @@ func mergeStrings(a, b []string) []string {
 }
 
 // LabelNames returns all the unique label names present in all queriers in sorted order.
-func (q *mergeGenericQuerier) LabelNames() ([]string, Warnings, error) {
-	var (
-		labelNamesMap = make(map[string]struct{})
-		warnings      Warnings
-	)
-	for _, querier := range q.queriers {
-		names, wrn, err := querier.LabelNames()
-		if wrn != nil {
-			// TODO(bwplotka): We could potentially wrap warnings.
-			warnings = append(warnings, wrn...)
-		}
-		if err != nil {
-			return nil, nil, errors.Wrap(err, "LabelNames() from merge generic querier")
-		}
-		for _, name := range names {
-			labelNamesMap[name] = struct{}{}
-		}
-	}
-	if len(labelNamesMap) == 0 {
-		return nil, warnings, nil
-	}
-
-	labelNames := make([]string, 0, len(labelNamesMap))
-	for name := range labelNamesMap {
-		labelNames = append(labelNames, name)
+func (q *mergeGenericQuerier) LabelNames(ctx context.Context, hints *LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
+	res, ws, err := q.mergeResults(q.queriers, hints, func(q LabelQuerier) ([]string, annotations.Annotations, error) {
+		return q.LabelNames(ctx, hints, matchers...)
+	})
+	if err != nil {
+		return nil, nil, fmt.Errorf("LabelNames() from merge generic querier: %w", err)
 	}
-	sort.Strings(labelNames)
-	return labelNames, warnings, nil
+	return res, ws, nil
 }
 
 // Close releases the resources of the generic querier.
 func (q *mergeGenericQuerier) Close() error {
-	errs := tsdb_errors.MultiError{}
+	errs := tsdb_errors.NewMulti()
 	for _, querier := range q.queriers {
 		if err := querier.Close(); err != nil {
 			errs.Add(err)
@@ -257,18 +285,25 @@ func (q *mergeGenericQuerier) Close() error {
 	return errs.Err()
 }
 
+func truncateToLimit(s []string, hints *LabelHints) []string {
+	if hints != nil && hints.Limit > 0 && len(s) > hints.Limit {
+		s = s[:hints.Limit]
+	}
+	return s
+}
+
 // VerticalSeriesMergeFunc returns merged series implementation that merges series with same labels together.
 // It has to handle time-overlapped series as well.
 type VerticalSeriesMergeFunc func(...Series) Series
 
 // NewMergeSeriesSet returns a new SeriesSet that merges many SeriesSets together.
-func NewMergeSeriesSet(sets []SeriesSet, mergeFunc VerticalSeriesMergeFunc) SeriesSet {
+// If limit is set, the SeriesSet will be limited up-to the limit. 0 means disabled.
+func NewMergeSeriesSet(sets []SeriesSet, limit int, mergeFunc VerticalSeriesMergeFunc) SeriesSet {
 	genericSets := make([]genericSeriesSet, 0, len(sets))
 	for _, s := range sets {
 		genericSets = append(genericSets, &genericSeriesSetAdapter{s})
-
 	}
-	return &seriesSetAdapter{newGenericMergeSeriesSet(genericSets, (&seriesMergerAdapter{VerticalSeriesMergeFunc: mergeFunc}).Merge)}
+	return &seriesSetAdapter{newGenericMergeSeriesSet(genericSets, limit, (&seriesMergerAdapter{VerticalSeriesMergeFunc: mergeFunc}).Merge)}
 }
 
 // VerticalChunkSeriesMergeFunc returns merged chunk series implementation that merges potentially time-overlapping
@@ -278,13 +313,12 @@ func NewMergeSeriesSet(sets []SeriesSet, mergeFunc VerticalSeriesMergeFunc) Seri
 type VerticalChunkSeriesMergeFunc func(...ChunkSeries) ChunkSeries
 
 // NewMergeChunkSeriesSet returns a new ChunkSeriesSet that merges many SeriesSet together.
-func NewMergeChunkSeriesSet(sets []ChunkSeriesSet, mergeFunc VerticalChunkSeriesMergeFunc) ChunkSeriesSet {
+func NewMergeChunkSeriesSet(sets []ChunkSeriesSet, limit int, mergeFunc VerticalChunkSeriesMergeFunc) ChunkSeriesSet {
 	genericSets := make([]genericSeriesSet, 0, len(sets))
 	for _, s := range sets {
 		genericSets = append(genericSets, &genericChunkSeriesSetAdapter{s})
-
 	}
-	return &chunkSeriesSetAdapter{newGenericMergeSeriesSet(genericSets, (&chunkSeriesMergerAdapter{VerticalChunkSeriesMergeFunc: mergeFunc}).Merge)}
+	return &chunkSeriesSetAdapter{newGenericMergeSeriesSet(genericSets, limit, (&chunkSeriesMergerAdapter{VerticalChunkSeriesMergeFunc: mergeFunc}).Merge)}
 }
 
 // genericMergeSeriesSet implements genericSeriesSet.
@@ -292,9 +326,11 @@ type genericMergeSeriesSet struct {
 	currentLabels labels.Labels
 	mergeFunc     genericSeriesMergeFunc
 
-	heap        genericSeriesSetHeap
-	sets        []genericSeriesSet
-	currentSets []genericSeriesSet
+	heap         genericSeriesSetHeap
+	sets         []genericSeriesSet
+	currentSets  []genericSeriesSet
+	seriesLimit  int
+	mergedSeries int // tracks the total number of series merged and returned.
 }
 
 // newGenericMergeSeriesSet returns a new genericSeriesSet that merges (and deduplicates)
@@ -302,7 +338,8 @@ type genericMergeSeriesSet struct {
 // Each series set must return its series in labels order, otherwise
 // merged series set will be incorrect.
 // Overlapped situations are merged using provided mergeFunc.
-func newGenericMergeSeriesSet(sets []genericSeriesSet, mergeFunc genericSeriesMergeFunc) genericSeriesSet {
+// If seriesLimit is set, only limited series are returned.
+func newGenericMergeSeriesSet(sets []genericSeriesSet, seriesLimit int, mergeFunc genericSeriesMergeFunc) genericSeriesSet {
 	if len(sets) == 1 {
 		return sets[0]
 	}
@@ -322,13 +359,19 @@ func newGenericMergeSeriesSet(sets []genericSeriesSet, mergeFunc genericSeriesMe
 		}
 	}
 	return &genericMergeSeriesSet{
-		mergeFunc: mergeFunc,
-		sets:      sets,
-		heap:      h,
+		mergeFunc:   mergeFunc,
+		sets:        sets,
+		heap:        h,
+		seriesLimit: seriesLimit,
 	}
 }
 
 func (c *genericMergeSeriesSet) Next() bool {
+	if c.seriesLimit > 0 && c.mergedSeries >= c.seriesLimit {
+		// Exit early if seriesLimit is set.
+		return false
+	}
+
 	// Run in a loop because the "next" series sets may not be valid anymore.
 	// If, for the current label set, all the next series sets come from
 	// failed remote storage sources, we want to keep trying with the next label set.
@@ -346,7 +389,7 @@ func (c *genericMergeSeriesSet) Next() bool {
 		}
 
 		// Now, pop items of the heap that have equal label sets.
-		c.currentSets = nil
+		c.currentSets = c.currentSets[:0]
 		c.currentLabels = c.heap[0].At().Labels()
 		for len(c.heap) > 0 && labels.Equal(c.currentLabels, c.heap[0].At().Labels()) {
 			set := heap.Pop(&c.heap).(genericSeriesSet)
@@ -359,6 +402,7 @@ func (c *genericMergeSeriesSet) Next() bool {
 			break
 		}
 	}
+	c.mergedSeries++
 	return true
 }
 
@@ -382,10 +426,10 @@ func (c *genericMergeSeriesSet) Err() error {
 	return nil
 }
 
-func (c *genericMergeSeriesSet) Warnings() Warnings {
-	var ws Warnings
+func (c *genericMergeSeriesSet) Warnings() annotations.Annotations {
+	var ws annotations.Annotations
 	for _, set := range c.sets {
-		ws = append(ws, set.Warnings()...)
+		ws.Merge(set.Warnings())
 	}
 	return ws
 }
@@ -427,12 +471,8 @@ func ChainedSeriesMerge(series ...Series) Series {
 	}
 	return &SeriesEntry{
 		Lset: series[0].Labels(),
-		SampleIteratorFn: func() chunkenc.Iterator {
-			iterators := make([]chunkenc.Iterator, 0, len(series))
-			for _, s := range series {
-				iterators = append(iterators, s.Iterator())
-			}
-			return newChainSampleIterator(iterators)
+		SampleIteratorFn: func(it chunkenc.Iterator) chunkenc.Iterator {
+			return ChainSampleIteratorFromSeries(it, series)
 		},
 	}
 }
@@ -445,61 +485,173 @@ type chainSampleIterator struct {
 	h         samplesIteratorHeap
 
 	curr  chunkenc.Iterator
-	lastt int64
+	lastT int64
+
+	// Whether the previous and the current sample are direct neighbors
+	// within the same base iterator.
+	consecutive bool
+}
+
+// Return a chainSampleIterator initialized for length entries, re-using the memory from it if possible.
+func getChainSampleIterator(it chunkenc.Iterator, length int) *chainSampleIterator {
+	csi, ok := it.(*chainSampleIterator)
+	if !ok {
+		csi = &chainSampleIterator{}
+	}
+	if cap(csi.iterators) < length {
+		csi.iterators = make([]chunkenc.Iterator, length)
+	} else {
+		csi.iterators = csi.iterators[:length]
+	}
+	csi.h = nil
+	csi.lastT = math.MinInt64
+	return csi
 }
 
-func newChainSampleIterator(iterators []chunkenc.Iterator) chunkenc.Iterator {
-	return &chainSampleIterator{
-		iterators: iterators,
-		h:         nil,
-		lastt:     math.MinInt64,
+func ChainSampleIteratorFromSeries(it chunkenc.Iterator, series []Series) chunkenc.Iterator {
+	csi := getChainSampleIterator(it, len(series))
+	for i, s := range series {
+		csi.iterators[i] = s.Iterator(csi.iterators[i])
 	}
+	return csi
 }
 
-func (c *chainSampleIterator) Seek(t int64) bool {
+func ChainSampleIteratorFromIterables(it chunkenc.Iterator, iterables []chunkenc.Iterable) chunkenc.Iterator {
+	csi := getChainSampleIterator(it, len(iterables))
+	for i, c := range iterables {
+		csi.iterators[i] = c.Iterator(csi.iterators[i])
+	}
+	return csi
+}
+
+func ChainSampleIteratorFromIterators(it chunkenc.Iterator, iterators []chunkenc.Iterator) chunkenc.Iterator {
+	csi := getChainSampleIterator(it, 0)
+	csi.iterators = iterators
+	return csi
+}
+
+func (c *chainSampleIterator) Seek(t int64) chunkenc.ValueType {
+	// No-op check.
+	if c.curr != nil && c.lastT >= t {
+		return c.curr.Seek(c.lastT)
+	}
+	// Don't bother to find out if the next sample is consecutive. Callers
+	// of Seek usually aren't interested anyway.
+	c.consecutive = false
 	c.h = samplesIteratorHeap{}
 	for _, iter := range c.iterators {
-		if iter.Seek(t) {
-			heap.Push(&c.h, iter)
+		if iter.Seek(t) == chunkenc.ValNone {
+			if iter.Err() != nil {
+				// If any iterator is reporting an error, abort.
+				return chunkenc.ValNone
+			}
+			continue
 		}
+		heap.Push(&c.h, iter)
 	}
 	if len(c.h) > 0 {
 		c.curr = heap.Pop(&c.h).(chunkenc.Iterator)
-		return true
+		c.lastT = c.curr.AtT()
+		return c.curr.Seek(c.lastT)
 	}
 	c.curr = nil
-	return false
+	return chunkenc.ValNone
 }
 
 func (c *chainSampleIterator) At() (t int64, v float64) {
 	if c.curr == nil {
-		panic("chainSampleIterator.At() called before first .Next() or after .Next() returned false.")
+		panic("chainSampleIterator.At called before first .Next or after .Next returned false.")
 	}
 	return c.curr.At()
 }
 
-func (c *chainSampleIterator) Next() bool {
+func (c *chainSampleIterator) AtHistogram(h *histogram.Histogram) (int64, *histogram.Histogram) {
+	if c.curr == nil {
+		panic("chainSampleIterator.AtHistogram called before first .Next or after .Next returned false.")
+	}
+	t, h := c.curr.AtHistogram(h)
+	// If the current sample is not consecutive with the previous one, we
+	// cannot be sure anymore about counter resets for counter histograms.
+	// TODO(beorn7): If a `NotCounterReset` sample is followed by a
+	// non-consecutive `CounterReset` sample, we could keep the hint as
+	// `CounterReset`. But then we needed to track the previous sample
+	// in more detail, which might not be worth it.
+	if !c.consecutive && h.CounterResetHint != histogram.GaugeType {
+		h.CounterResetHint = histogram.UnknownCounterReset
+	}
+	return t, h
+}
+
+func (c *chainSampleIterator) AtFloatHistogram(fh *histogram.FloatHistogram) (int64, *histogram.FloatHistogram) {
+	if c.curr == nil {
+		panic("chainSampleIterator.AtFloatHistogram called before first .Next or after .Next returned false.")
+	}
+	t, fh := c.curr.AtFloatHistogram(fh)
+	// If the current sample is not consecutive with the previous one, we
+	// cannot be sure anymore about counter resets for counter histograms.
+	// TODO(beorn7): If a `NotCounterReset` sample is followed by a
+	// non-consecutive `CounterReset` sample, we could keep the hint as
+	// `CounterReset`. But then we needed to track the previous sample
+	// in more detail, which might not be worth it.
+	if !c.consecutive && fh.CounterResetHint != histogram.GaugeType {
+		fh.CounterResetHint = histogram.UnknownCounterReset
+	}
+	return t, fh
+}
+
+func (c *chainSampleIterator) AtT() int64 {
+	if c.curr == nil {
+		panic("chainSampleIterator.AtT called before first .Next or after .Next returned false.")
+	}
+	return c.curr.AtT()
+}
+
+func (c *chainSampleIterator) Next() chunkenc.ValueType {
+	var (
+		currT           int64
+		currValueType   chunkenc.ValueType
+		iteratorChanged bool
+	)
 	if c.h == nil {
+		iteratorChanged = true
 		c.h = samplesIteratorHeap{}
 		// We call c.curr.Next() as the first thing below.
 		// So, we don't call Next() on it here.
 		c.curr = c.iterators[0]
 		for _, iter := range c.iterators[1:] {
-			if iter.Next() {
+			if iter.Next() == chunkenc.ValNone {
+				if iter.Err() != nil {
+					// If any iterator is reporting an error, abort.
+					// If c.iterators[0] is reporting an error, we'll handle that below.
+					return chunkenc.ValNone
+				}
+			} else {
 				heap.Push(&c.h, iter)
 			}
 		}
 	}
 
 	if c.curr == nil {
-		return false
+		return chunkenc.ValNone
 	}
 
-	var currt int64
 	for {
-		if c.curr.Next() {
-			currt, _ = c.curr.At()
-			if currt == c.lastt {
+		currValueType = c.curr.Next()
+
+		if currValueType == chunkenc.ValNone {
+			if c.curr.Err() != nil {
+				// Abort if we've hit an error.
+				return chunkenc.ValNone
+			}
+
+			if len(c.h) == 0 {
+				// No iterator left to iterate.
+				c.curr = nil
+				return chunkenc.ValNone
+			}
+		} else {
+			currT = c.curr.AtT()
+			if currT == c.lastT {
 				// Ignoring sample for the same timestamp.
 				continue
 			}
@@ -510,35 +662,33 @@ func (c *chainSampleIterator) Next() bool {
 			}
 
 			// Check current iterator with the top of the heap.
-			if nextt, _ := c.h[0].At(); currt < nextt {
+			nextT := c.h[0].AtT()
+			if currT < nextT {
 				// Current iterator has smaller timestamp than the heap.
 				break
 			}
 			// Current iterator does not hold the smallest timestamp.
 			heap.Push(&c.h, c.curr)
-		} else if len(c.h) == 0 {
-			// No iterator left to iterate.
-			c.curr = nil
-			return false
 		}
 
 		c.curr = heap.Pop(&c.h).(chunkenc.Iterator)
-		currt, _ = c.curr.At()
-		if currt != c.lastt {
+		iteratorChanged = true
+		currT = c.curr.AtT()
+		currValueType = c.curr.Seek(currT)
+		if currT != c.lastT {
 			break
 		}
 	}
 
-	c.lastt = currt
-	return true
+	c.consecutive = !iteratorChanged
+	c.lastT = currT
+	return currValueType
 }
 
 func (c *chainSampleIterator) Err() error {
-	var errs tsdb_errors.MultiError
+	errs := tsdb_errors.NewMulti()
 	for _, iter := range c.iterators {
-		if err := iter.Err(); err != nil {
-			errs.Add(err)
-		}
+		errs.Add(iter.Err())
 	}
 	return errs.Err()
 }
@@ -549,9 +699,7 @@ func (h samplesIteratorHeap) Len() int      { return len(h) }
 func (h samplesIteratorHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] }
 
 func (h samplesIteratorHeap) Less(i, j int) bool {
-	at, _ := h[i].At()
-	bt, _ := h[j].At()
-	return at < bt
+	return h[i].AtT() < h[j].AtT()
 }
 
 func (h *samplesIteratorHeap) Push(x interface{}) {
@@ -580,10 +728,10 @@ func NewCompactingChunkSeriesMerger(mergeFunc VerticalSeriesMergeFunc) VerticalC
 		}
 		return &ChunkSeriesEntry{
 			Lset: series[0].Labels(),
-			ChunkIteratorFn: func() chunks.Iterator {
+			ChunkIteratorFn: func(chunks.Iterator) chunks.Iterator {
 				iterators := make([]chunks.Iterator, 0, len(series))
 				for _, s := range series {
-					iterators = append(iterators, s.Iterator())
+					iterators = append(iterators, s.Iterator(nil))
 				}
 				return &compactChunkIterator{
 					mergeFunc: mergeFunc,
@@ -643,13 +791,12 @@ func (c *compactChunkIterator) Next() bool {
 			break
 		}
 
-		if next.MinTime == prev.MinTime &&
-			next.MaxTime == prev.MaxTime &&
-			bytes.Equal(next.Chunk.Bytes(), prev.Chunk.Bytes()) {
-			// 1:1 duplicates, skip it.
-		} else {
-			// We operate on same series, so labels does not matter here.
-			overlapping = append(overlapping, newChunkToSeriesDecoder(nil, next))
+		// Only do something if it is not a perfect duplicate.
+		if next.MinTime != prev.MinTime ||
+			next.MaxTime != prev.MaxTime ||
+			!bytes.Equal(next.Chunk.Bytes(), prev.Chunk.Bytes()) {
+			// We operate on same series, so labels do not matter here.
+			overlapping = append(overlapping, newChunkToSeriesDecoder(labels.EmptyLabels(), next))
 			if next.MaxTime > oMaxTime {
 				oMaxTime = next.MaxTime
 			}
@@ -666,7 +813,7 @@ func (c *compactChunkIterator) Next() bool {
 	}
 
 	// Add last as it's not yet included in overlap. We operate on same series, so labels does not matter here.
-	iter = (&seriesToChunkEncoder{Series: c.mergeFunc(append(overlapping, newChunkToSeriesDecoder(nil, c.curr))...)}).Iterator()
+	iter = NewSeriesToChunkEncoder(c.mergeFunc(append(overlapping, newChunkToSeriesDecoder(labels.EmptyLabels(), c.curr))...)).Iterator(nil)
 	if !iter.Next() {
 		if c.err = iter.Err(); c.err != nil {
 			return false
@@ -681,11 +828,9 @@ func (c *compactChunkIterator) Next() bool {
 }
 
 func (c *compactChunkIterator) Err() error {
-	var errs tsdb_errors.MultiError
+	errs := tsdb_errors.NewMulti()
 	for _, iter := range c.iterators {
-		if err := iter.Err(); err != nil {
-			errs.Add(err)
-		}
+		errs.Add(iter.Err())
 	}
 	errs.Add(c.err)
 	return errs.Err()
@@ -716,3 +861,59 @@ func (h *chunkIteratorHeap) Pop() interface{} {
 	*h = old[0 : n-1]
 	return x
 }
+
+// NewConcatenatingChunkSeriesMerger returns a VerticalChunkSeriesMergeFunc that simply concatenates the
+// chunks from the series. The resultant stream of chunks for a series might be overlapping and unsorted.
+func NewConcatenatingChunkSeriesMerger() VerticalChunkSeriesMergeFunc {
+	return func(series ...ChunkSeries) ChunkSeries {
+		if len(series) == 0 {
+			return nil
+		}
+		return &ChunkSeriesEntry{
+			Lset: series[0].Labels(),
+			ChunkIteratorFn: func(chunks.Iterator) chunks.Iterator {
+				iterators := make([]chunks.Iterator, 0, len(series))
+				for _, s := range series {
+					iterators = append(iterators, s.Iterator(nil))
+				}
+				return &concatenatingChunkIterator{
+					iterators: iterators,
+				}
+			},
+		}
+	}
+}
+
+type concatenatingChunkIterator struct {
+	iterators []chunks.Iterator
+	idx       int
+
+	curr chunks.Meta
+}
+
+func (c *concatenatingChunkIterator) At() chunks.Meta {
+	return c.curr
+}
+
+func (c *concatenatingChunkIterator) Next() bool {
+	if c.idx >= len(c.iterators) {
+		return false
+	}
+	if c.iterators[c.idx].Next() {
+		c.curr = c.iterators[c.idx].At()
+		return true
+	}
+	if c.iterators[c.idx].Err() != nil {
+		return false
+	}
+	c.idx++
+	return c.Next()
+}
+
+func (c *concatenatingChunkIterator) Err() error {
+	errs := tsdb_errors.NewMulti()
+	for _, iter := range c.iterators {
+		errs.Add(iter.Err())
+	}
+	return errs.Err()
+}
diff --git a/vendor/github.com/prometheus/prometheus/storage/noop.go b/vendor/github.com/prometheus/prometheus/storage/noop.go
index 00599aba748a340ecade105b2ce7ba836c69ce5e..f5092da7c76ec8504931a9a038e86598e7b3455e 100644
--- a/vendor/github.com/prometheus/prometheus/storage/noop.go
+++ b/vendor/github.com/prometheus/prometheus/storage/noop.go
@@ -14,7 +14,10 @@
 package storage
 
 import (
-	"github.com/prometheus/prometheus/pkg/labels"
+	"context"
+
+	"github.com/prometheus/prometheus/model/labels"
+	"github.com/prometheus/prometheus/util/annotations"
 )
 
 type noopQuerier struct{}
@@ -24,15 +27,15 @@ func NoopQuerier() Querier {
 	return noopQuerier{}
 }
 
-func (noopQuerier) Select(bool, *SelectHints, ...*labels.Matcher) SeriesSet {
+func (noopQuerier) Select(context.Context, bool, *SelectHints, ...*labels.Matcher) SeriesSet {
 	return NoopSeriesSet()
 }
 
-func (noopQuerier) LabelValues(string) ([]string, Warnings, error) {
+func (noopQuerier) LabelValues(context.Context, string, *LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
 	return nil, nil, nil
 }
 
-func (noopQuerier) LabelNames() ([]string, Warnings, error) {
+func (noopQuerier) LabelNames(context.Context, *LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
 	return nil, nil, nil
 }
 
@@ -47,15 +50,15 @@ func NoopChunkedQuerier() ChunkQuerier {
 	return noopChunkQuerier{}
 }
 
-func (noopChunkQuerier) Select(bool, *SelectHints, ...*labels.Matcher) ChunkSeriesSet {
+func (noopChunkQuerier) Select(context.Context, bool, *SelectHints, ...*labels.Matcher) ChunkSeriesSet {
 	return NoopChunkedSeriesSet()
 }
 
-func (noopChunkQuerier) LabelValues(string) ([]string, Warnings, error) {
+func (noopChunkQuerier) LabelValues(context.Context, string, *LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
 	return nil, nil, nil
 }
 
-func (noopChunkQuerier) LabelNames() ([]string, Warnings, error) {
+func (noopChunkQuerier) LabelNames(context.Context, *LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
 	return nil, nil, nil
 }
 
@@ -76,7 +79,7 @@ func (noopSeriesSet) At() Series { return nil }
 
 func (noopSeriesSet) Err() error { return nil }
 
-func (noopSeriesSet) Warnings() Warnings { return nil }
+func (noopSeriesSet) Warnings() annotations.Annotations { return nil }
 
 type noopChunkedSeriesSet struct{}
 
@@ -91,4 +94,4 @@ func (noopChunkedSeriesSet) At() ChunkSeries { return nil }
 
 func (noopChunkedSeriesSet) Err() error { return nil }
 
-func (noopChunkedSeriesSet) Warnings() Warnings { return nil }
+func (noopChunkedSeriesSet) Warnings() annotations.Annotations { return nil }
diff --git a/vendor/github.com/prometheus/prometheus/storage/secondary.go b/vendor/github.com/prometheus/prometheus/storage/secondary.go
index 9e768b34987191a722e2749198a691df8410d681..1cf8024b65e42094209dd17fb676de57e8d2816e 100644
--- a/vendor/github.com/prometheus/prometheus/storage/secondary.go
+++ b/vendor/github.com/prometheus/prometheus/storage/secondary.go
@@ -14,9 +14,11 @@
 package storage
 
 import (
+	"context"
 	"sync"
 
-	"github.com/prometheus/prometheus/pkg/labels"
+	"github.com/prometheus/prometheus/model/labels"
+	"github.com/prometheus/prometheus/util/annotations"
 )
 
 // secondaryQuerier is a wrapper that allows a querier to be treated in a best effort manner.
@@ -47,28 +49,28 @@ func newSecondaryQuerierFromChunk(cq ChunkQuerier) genericQuerier {
 	return &secondaryQuerier{genericQuerier: newGenericQuerierFromChunk(cq)}
 }
 
-func (s *secondaryQuerier) LabelValues(name string) ([]string, Warnings, error) {
-	vals, w, err := s.genericQuerier.LabelValues(name)
+func (s *secondaryQuerier) LabelValues(ctx context.Context, name string, hints *LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
+	vals, w, err := s.genericQuerier.LabelValues(ctx, name, hints, matchers...)
 	if err != nil {
-		return nil, append([]error{err}, w...), nil
+		return nil, w.Add(err), nil
 	}
 	return vals, w, nil
 }
 
-func (s *secondaryQuerier) LabelNames() ([]string, Warnings, error) {
-	names, w, err := s.genericQuerier.LabelNames()
+func (s *secondaryQuerier) LabelNames(ctx context.Context, hints *LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
+	names, w, err := s.genericQuerier.LabelNames(ctx, hints, matchers...)
 	if err != nil {
-		return nil, append([]error{err}, w...), nil
+		return nil, w.Add(err), nil
 	}
 	return names, w, nil
 }
 
-func (s *secondaryQuerier) Select(sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) genericSeriesSet {
+func (s *secondaryQuerier) Select(ctx context.Context, sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) genericSeriesSet {
 	if s.done {
 		panic("secondaryQuerier: Select invoked after first Next of any returned SeriesSet was done")
 	}
 
-	s.asyncSets = append(s.asyncSets, s.genericQuerier.Select(sortSeries, hints, matchers...))
+	s.asyncSets = append(s.asyncSets, s.genericQuerier.Select(ctx, sortSeries, hints, matchers...))
 	curr := len(s.asyncSets) - 1
 	return &lazyGenericSeriesSet{init: func() (genericSeriesSet, bool) {
 		s.once.Do(func() {
@@ -82,7 +84,7 @@ func (s *secondaryQuerier) Select(sortSeries bool, hints *SelectHints, matchers
 				if err := set.Err(); err != nil {
 					// One of the sets failed, ensure current one returning errors as warnings, and rest of the sets return nothing.
 					// (All or nothing logic).
-					s.asyncSets[curr] = warningsOnlySeriesSet(append([]error{err}, ws...))
+					s.asyncSets[curr] = warningsOnlySeriesSet(ws.Add(err))
 					for i := range s.asyncSets {
 						if curr == i {
 							continue
diff --git a/vendor/github.com/prometheus/prometheus/storage/series.go b/vendor/github.com/prometheus/prometheus/storage/series.go
index c5440a45a9b99f99d883a27a6d3d04c03a6f6535..e61b2259370188a321fb886639816f7f4f41f764 100644
--- a/vendor/github.com/prometheus/prometheus/storage/series.go
+++ b/vendor/github.com/prometheus/prometheus/storage/series.go
@@ -14,50 +14,78 @@
 package storage
 
 import (
+	"fmt"
 	"math"
 	"sort"
 
-	"github.com/prometheus/prometheus/pkg/labels"
+	"github.com/prometheus/prometheus/model/histogram"
+	"github.com/prometheus/prometheus/model/labels"
 	"github.com/prometheus/prometheus/tsdb/chunkenc"
 	"github.com/prometheus/prometheus/tsdb/chunks"
-	"github.com/prometheus/prometheus/tsdb/tsdbutil"
 )
 
 type SeriesEntry struct {
 	Lset             labels.Labels
-	SampleIteratorFn func() chunkenc.Iterator
+	SampleIteratorFn func(chunkenc.Iterator) chunkenc.Iterator
 }
 
-func (s *SeriesEntry) Labels() labels.Labels       { return s.Lset }
-func (s *SeriesEntry) Iterator() chunkenc.Iterator { return s.SampleIteratorFn() }
+func (s *SeriesEntry) Labels() labels.Labels                           { return s.Lset }
+func (s *SeriesEntry) Iterator(it chunkenc.Iterator) chunkenc.Iterator { return s.SampleIteratorFn(it) }
 
 type ChunkSeriesEntry struct {
 	Lset            labels.Labels
-	ChunkIteratorFn func() chunks.Iterator
+	ChunkIteratorFn func(chunks.Iterator) chunks.Iterator
 }
 
-func (s *ChunkSeriesEntry) Labels() labels.Labels     { return s.Lset }
-func (s *ChunkSeriesEntry) Iterator() chunks.Iterator { return s.ChunkIteratorFn() }
+func (s *ChunkSeriesEntry) Labels() labels.Labels                       { return s.Lset }
+func (s *ChunkSeriesEntry) Iterator(it chunks.Iterator) chunks.Iterator { return s.ChunkIteratorFn(it) }
 
 // NewListSeries returns series entry with iterator that allows to iterate over provided samples.
-func NewListSeries(lset labels.Labels, s []tsdbutil.Sample) *SeriesEntry {
+func NewListSeries(lset labels.Labels, s []chunks.Sample) *SeriesEntry {
+	samplesS := Samples(samples(s))
 	return &SeriesEntry{
 		Lset: lset,
-		SampleIteratorFn: func() chunkenc.Iterator {
-			return NewListSeriesIterator(samples(s))
+		SampleIteratorFn: func(it chunkenc.Iterator) chunkenc.Iterator {
+			if lsi, ok := it.(*listSeriesIterator); ok {
+				lsi.Reset(samplesS)
+				return lsi
+			}
+			return NewListSeriesIterator(samplesS)
 		},
 	}
 }
 
-// NewListChunkSeriesFromSamples returns chunk series entry that allows to iterate over provided samples.
-// NOTE: It uses inefficient chunks encoding implementation, not caring about chunk size.
-func NewListChunkSeriesFromSamples(lset labels.Labels, samples ...[]tsdbutil.Sample) *ChunkSeriesEntry {
+// NewListChunkSeriesFromSamples returns a chunk series entry that allows to iterate over provided samples.
+// NOTE: It uses an inefficient chunks encoding implementation, not caring about chunk size.
+// Use only for testing.
+func NewListChunkSeriesFromSamples(lset labels.Labels, samples ...[]chunks.Sample) *ChunkSeriesEntry {
+	chksFromSamples := make([]chunks.Meta, 0, len(samples))
+	for _, s := range samples {
+		cfs, err := chunks.ChunkFromSamples(s)
+		if err != nil {
+			return &ChunkSeriesEntry{
+				Lset: lset,
+				ChunkIteratorFn: func(_ chunks.Iterator) chunks.Iterator {
+					return errChunksIterator{err: err}
+				},
+			}
+		}
+		chksFromSamples = append(chksFromSamples, cfs)
+	}
 	return &ChunkSeriesEntry{
 		Lset: lset,
-		ChunkIteratorFn: func() chunks.Iterator {
-			chks := make([]chunks.Meta, 0, len(samples))
-			for _, s := range samples {
-				chks = append(chks, tsdbutil.ChunkFromSamples(s))
+		ChunkIteratorFn: func(it chunks.Iterator) chunks.Iterator {
+			lcsi, existing := it.(*listChunkSeriesIterator)
+			var chks []chunks.Meta
+			if existing {
+				chks = lcsi.chks[:0]
+			} else {
+				chks = make([]chunks.Meta, 0, len(samples))
+			}
+			chks = append(chks, chksFromSamples...)
+			if existing {
+				lcsi.Reset(chks...)
+				return lcsi
 			}
 			return NewListChunkSeriesIterator(chks...)
 		},
@@ -69,14 +97,14 @@ type listSeriesIterator struct {
 	idx     int
 }
 
-type samples []tsdbutil.Sample
+type samples []chunks.Sample
 
-func (s samples) Get(i int) tsdbutil.Sample { return s[i] }
-func (s samples) Len() int                  { return len(s) }
+func (s samples) Get(i int) chunks.Sample { return s[i] }
+func (s samples) Len() int                { return len(s) }
 
-// Samples interface allows to work on arrays of types that are compatible with tsdbutil.Sample.
+// Samples interface allows to work on arrays of types that are compatible with chunks.Sample.
 type Samples interface {
-	Get(i int) tsdbutil.Sample
+	Get(i int) chunks.Sample
 	Len() int
 }
 
@@ -85,31 +113,92 @@ func NewListSeriesIterator(samples Samples) chunkenc.Iterator {
 	return &listSeriesIterator{samples: samples, idx: -1}
 }
 
+func (it *listSeriesIterator) Reset(samples Samples) {
+	it.samples = samples
+	it.idx = -1
+}
+
 func (it *listSeriesIterator) At() (int64, float64) {
 	s := it.samples.Get(it.idx)
-	return s.T(), s.V()
+	return s.T(), s.F()
+}
+
+func (it *listSeriesIterator) AtHistogram(*histogram.Histogram) (int64, *histogram.Histogram) {
+	s := it.samples.Get(it.idx)
+	return s.T(), s.H()
+}
+
+func (it *listSeriesIterator) AtFloatHistogram(*histogram.FloatHistogram) (int64, *histogram.FloatHistogram) {
+	s := it.samples.Get(it.idx)
+	return s.T(), s.FH()
+}
+
+func (it *listSeriesIterator) AtT() int64 {
+	s := it.samples.Get(it.idx)
+	return s.T()
 }
 
-func (it *listSeriesIterator) Next() bool {
+func (it *listSeriesIterator) Next() chunkenc.ValueType {
 	it.idx++
-	return it.idx < it.samples.Len()
+	if it.idx >= it.samples.Len() {
+		return chunkenc.ValNone
+	}
+	return it.samples.Get(it.idx).Type()
 }
 
-func (it *listSeriesIterator) Seek(t int64) bool {
+func (it *listSeriesIterator) Seek(t int64) chunkenc.ValueType {
 	if it.idx == -1 {
 		it.idx = 0
 	}
+	if it.idx >= it.samples.Len() {
+		return chunkenc.ValNone
+	}
+	// No-op check.
+	if s := it.samples.Get(it.idx); s.T() >= t {
+		return s.Type()
+	}
 	// Do binary search between current position and end.
-	it.idx = sort.Search(it.samples.Len()-it.idx, func(i int) bool {
+	it.idx += sort.Search(it.samples.Len()-it.idx, func(i int) bool {
 		s := it.samples.Get(i + it.idx)
 		return s.T() >= t
 	})
 
-	return it.idx < it.samples.Len()
+	if it.idx >= it.samples.Len() {
+		return chunkenc.ValNone
+	}
+	return it.samples.Get(it.idx).Type()
 }
 
 func (it *listSeriesIterator) Err() error { return nil }
 
+type listSeriesIteratorWithCopy struct {
+	*listSeriesIterator
+}
+
+func NewListSeriesIteratorWithCopy(samples Samples) chunkenc.Iterator {
+	return &listSeriesIteratorWithCopy{
+		listSeriesIterator: &listSeriesIterator{samples: samples, idx: -1},
+	}
+}
+
+func (it *listSeriesIteratorWithCopy) AtHistogram(h *histogram.Histogram) (int64, *histogram.Histogram) {
+	t, ih := it.listSeriesIterator.AtHistogram(nil)
+	if h == nil || ih == nil {
+		return t, ih
+	}
+	ih.CopyTo(h)
+	return t, h
+}
+
+func (it *listSeriesIteratorWithCopy) AtFloatHistogram(fh *histogram.FloatHistogram) (int64, *histogram.FloatHistogram) {
+	t, ih := it.listSeriesIterator.AtFloatHistogram(nil)
+	if fh == nil || ih == nil {
+		return t, ih
+	}
+	ih.CopyTo(fh)
+	return t, fh
+}
+
 type listChunkSeriesIterator struct {
 	chks []chunks.Meta
 	idx  int
@@ -120,6 +209,11 @@ func NewListChunkSeriesIterator(chks ...chunks.Meta) chunks.Iterator {
 	return &listChunkSeriesIterator{chks: chks, idx: -1}
 }
 
+func (it *listChunkSeriesIterator) Reset(chks ...chunks.Meta) {
+	it.chks = chks
+	it.idx = -1
+}
+
 func (it *listChunkSeriesIterator) At() chunks.Meta {
 	return it.chks[it.idx]
 }
@@ -134,6 +228,7 @@ func (it *listChunkSeriesIterator) Err() error { return nil }
 type chunkSetToSeriesSet struct {
 	ChunkSeriesSet
 
+	iter             chunks.Iterator
 	chkIterErr       error
 	sameSeriesChunks []Series
 }
@@ -148,18 +243,18 @@ func (c *chunkSetToSeriesSet) Next() bool {
 		return false
 	}
 
-	iter := c.ChunkSeriesSet.At().Iterator()
-	c.sameSeriesChunks = c.sameSeriesChunks[:0]
+	c.iter = c.ChunkSeriesSet.At().Iterator(c.iter)
+	c.sameSeriesChunks = nil
 
-	for iter.Next() {
+	for c.iter.Next() {
 		c.sameSeriesChunks = append(
 			c.sameSeriesChunks,
-			newChunkToSeriesDecoder(c.ChunkSeriesSet.At().Labels(), iter.At()),
+			newChunkToSeriesDecoder(c.ChunkSeriesSet.At().Labels(), c.iter.At()),
 		)
 	}
 
-	if iter.Err() != nil {
-		c.chkIterErr = iter.Err()
+	if c.iter.Err() != nil {
+		c.chkIterErr = c.iter.Err()
 		return false
 	}
 	return true
@@ -180,9 +275,9 @@ func (c *chunkSetToSeriesSet) Err() error {
 func newChunkToSeriesDecoder(labels labels.Labels, chk chunks.Meta) Series {
 	return &SeriesEntry{
 		Lset: labels,
-		SampleIteratorFn: func() chunkenc.Iterator {
+		SampleIteratorFn: func(it chunkenc.Iterator) chunkenc.Iterator {
 			// TODO(bwplotka): Can we provide any chunkenc buffer?
-			return chk.Chunk.Iterator(nil)
+			return chk.Chunk.Iterator(it)
 		},
 	}
 }
@@ -204,9 +299,7 @@ func (c *seriesSetToChunkSet) Next() bool {
 }
 
 func (c *seriesSetToChunkSet) At() ChunkSeries {
-	return &seriesToChunkEncoder{
-		Series: c.SeriesSet.At(),
-	}
+	return NewSeriesToChunkEncoder(c.SeriesSet.At())
 }
 
 func (c *seriesSetToChunkSet) Err() error {
@@ -217,35 +310,122 @@ type seriesToChunkEncoder struct {
 	Series
 }
 
-// TODO(bwplotka): Currently encoder will just naively build one chunk, without limit. Split it: https://github.com/prometheus/tsdb/issues/670
-func (s *seriesToChunkEncoder) Iterator() chunks.Iterator {
-	chk := chunkenc.NewXORChunk()
-	app, err := chk.Appender()
-	if err != nil {
-		return errChunksIterator{err: err}
-	}
+const seriesToChunkEncoderSplit = 120
+
+// NewSeriesToChunkEncoder encodes samples to chunks with 120 samples limit.
+func NewSeriesToChunkEncoder(series Series) ChunkSeries {
+	return &seriesToChunkEncoder{series}
+}
+
+func (s *seriesToChunkEncoder) Iterator(it chunks.Iterator) chunks.Iterator {
+	var (
+		chk, newChk chunkenc.Chunk
+		app         chunkenc.Appender
+		err         error
+		recoded     bool
+	)
 	mint := int64(math.MaxInt64)
 	maxt := int64(math.MinInt64)
 
-	seriesIter := s.Series.Iterator()
-	for seriesIter.Next() {
-		t, v := seriesIter.At()
-		app.Append(t, v)
+	var chks []chunks.Meta
+	lcsi, existing := it.(*listChunkSeriesIterator)
+	if existing {
+		chks = lcsi.chks[:0]
+	}
+
+	i := 0
+	seriesIter := s.Series.Iterator(nil)
+	lastType := chunkenc.ValNone
+	for typ := seriesIter.Next(); typ != chunkenc.ValNone; typ = seriesIter.Next() {
+		if typ != lastType || i >= seriesToChunkEncoderSplit {
+			// Create a new chunk if the sample type changed or too many samples in the current one.
+			chks = appendChunk(chks, mint, maxt, chk)
+			chk, err = chunkenc.NewEmptyChunk(typ.ChunkEncoding())
+			if err != nil {
+				return errChunksIterator{err: err}
+			}
+			app, err = chk.Appender()
+			if err != nil {
+				return errChunksIterator{err: err}
+			}
+			mint = int64(math.MaxInt64)
+			// maxt is immediately overwritten below which is why setting it here won't make a difference.
+			i = 0
+		}
+		lastType = typ
+
+		var (
+			t  int64
+			v  float64
+			h  *histogram.Histogram
+			fh *histogram.FloatHistogram
+		)
+		switch typ {
+		case chunkenc.ValFloat:
+			t, v = seriesIter.At()
+			app.Append(t, v)
+		case chunkenc.ValHistogram:
+			t, h = seriesIter.AtHistogram(nil)
+			newChk, recoded, app, err = app.AppendHistogram(nil, t, h, false)
+			if err != nil {
+				return errChunksIterator{err: err}
+			}
+			if newChk != nil {
+				if !recoded {
+					chks = appendChunk(chks, mint, maxt, chk)
+					mint = int64(math.MaxInt64)
+					// maxt is immediately overwritten below which is why setting it here won't make a difference.
+					i = 0
+				}
+				chk = newChk
+			}
+		case chunkenc.ValFloatHistogram:
+			t, fh = seriesIter.AtFloatHistogram(nil)
+			newChk, recoded, app, err = app.AppendFloatHistogram(nil, t, fh, false)
+			if err != nil {
+				return errChunksIterator{err: err}
+			}
+			if newChk != nil {
+				if !recoded {
+					chks = appendChunk(chks, mint, maxt, chk)
+					mint = int64(math.MaxInt64)
+					// maxt is immediately overwritten below which is why setting it here won't make a difference.
+					i = 0
+				}
+				chk = newChk
+			}
+		default:
+			return errChunksIterator{err: fmt.Errorf("unknown sample type %s", typ.String())}
+		}
 
 		maxt = t
 		if mint == math.MaxInt64 {
 			mint = t
 		}
+		i++
 	}
 	if err := seriesIter.Err(); err != nil {
 		return errChunksIterator{err: err}
 	}
 
-	return NewListChunkSeriesIterator(chunks.Meta{
-		MinTime: mint,
-		MaxTime: maxt,
-		Chunk:   chk,
-	})
+	chks = appendChunk(chks, mint, maxt, chk)
+
+	if existing {
+		lcsi.Reset(chks...)
+		return lcsi
+	}
+	return NewListChunkSeriesIterator(chks...)
+}
+
+func appendChunk(chks []chunks.Meta, mint, maxt int64, chk chunkenc.Chunk) []chunks.Meta {
+	if chk != nil {
+		chks = append(chks, chunks.Meta{
+			MinTime: mint,
+			MaxTime: maxt,
+			Chunk:   chk,
+		})
+	}
+	return chks
 }
 
 type errChunksIterator struct {
@@ -259,21 +439,40 @@ func (e errChunksIterator) Err() error      { return e.err }
 // ExpandSamples iterates over all samples in the iterator, buffering all in slice.
 // Optionally it takes samples constructor, useful when you want to compare sample slices with different
 // sample implementations. if nil, sample type from this package will be used.
-func ExpandSamples(iter chunkenc.Iterator, newSampleFn func(t int64, v float64) tsdbutil.Sample) ([]tsdbutil.Sample, error) {
+func ExpandSamples(iter chunkenc.Iterator, newSampleFn func(t int64, f float64, h *histogram.Histogram, fh *histogram.FloatHistogram) chunks.Sample) ([]chunks.Sample, error) {
 	if newSampleFn == nil {
-		newSampleFn = func(t int64, v float64) tsdbutil.Sample { return sample{t, v} }
+		newSampleFn = func(t int64, f float64, h *histogram.Histogram, fh *histogram.FloatHistogram) chunks.Sample {
+			switch {
+			case h != nil:
+				return hSample{t, h}
+			case fh != nil:
+				return fhSample{t, fh}
+			default:
+				return fSample{t, f}
+			}
+		}
 	}
 
-	var result []tsdbutil.Sample
-	for iter.Next() {
-		t, v := iter.At()
-		// NaNs can't be compared normally, so substitute for another value.
-		if math.IsNaN(v) {
-			v = -42
+	var result []chunks.Sample
+	for {
+		switch iter.Next() {
+		case chunkenc.ValNone:
+			return result, iter.Err()
+		case chunkenc.ValFloat:
+			t, f := iter.At()
+			// NaNs can't be compared normally, so substitute for another value.
+			if math.IsNaN(f) {
+				f = -42
+			}
+			result = append(result, newSampleFn(t, f, nil, nil))
+		case chunkenc.ValHistogram:
+			t, h := iter.AtHistogram(nil)
+			result = append(result, newSampleFn(t, 0, h, nil))
+		case chunkenc.ValFloatHistogram:
+			t, fh := iter.AtFloatHistogram(nil)
+			result = append(result, newSampleFn(t, 0, nil, fh))
 		}
-		result = append(result, newSampleFn(t, v))
 	}
-	return result, iter.Err()
 }
 
 // ExpandChunks iterates over all chunks in the iterator, buffering all in slice.
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/bstream.go b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/bstream.go
index ad8077c27e9f36918be0c5c123da6a3d4938a1dc..6e01798f7202d5ec17b6f9b8a87f2d2c117a0596 100644
--- a/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/bstream.go
+++ b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/bstream.go
@@ -48,8 +48,14 @@ import (
 
 // bstream is a stream of bits.
 type bstream struct {
-	stream []byte // the data stream
-	count  uint8  // how many bits are valid in current byte
+	stream []byte // The data stream.
+	count  uint8  // How many right-most bits are available for writing in the current byte (the last byte of the stream).
+}
+
+// Reset resets b around stream.
+func (b *bstream) Reset(stream []byte) {
+	b.stream = stream
+	b.count = 0
 }
 
 func (b *bstream) bytes() []byte {
@@ -80,22 +86,23 @@ func (b *bstream) writeBit(bit bit) {
 
 func (b *bstream) writeByte(byt byte) {
 	if b.count == 0 {
-		b.stream = append(b.stream, 0)
-		b.count = 8
+		b.stream = append(b.stream, byt)
+		return
 	}
 
 	i := len(b.stream) - 1
 
-	// fill up b.b with b.count bits from byt
+	// Complete the last byte with the leftmost b.count bits from byt.
 	b.stream[i] |= byt >> (8 - b.count)
 
-	b.stream = append(b.stream, 0)
-	i++
-	b.stream[i] = byt << b.count
+	// Write the remainder, if any.
+	b.stream = append(b.stream, byt<<b.count)
 }
 
+// writeBits writes the nbits right-most bits of u to the stream
+// in left-to-right order.
 func (b *bstream) writeBits(u uint64, nbits int) {
-	u <<= (64 - uint(nbits))
+	u <<= 64 - uint(nbits)
 	for nbits >= 8 {
 		byt := byte(u >> 56)
 		b.writeByte(byt)
@@ -115,12 +122,19 @@ type bstreamReader struct {
 	streamOffset int // The offset from which read the next byte from the stream.
 
 	buffer uint64 // The current buffer, filled from the stream, containing up to 8 bytes from which read bits.
-	valid  uint8  // The number of bits valid to read (from left) in the current buffer.
+	valid  uint8  // The number of right-most bits valid to read (from left) in the current 8 byte buffer.
+	last   byte   // A copy of the last byte of the stream.
 }
 
 func newBReader(b []byte) bstreamReader {
+	// The last byte of the stream can be updated later, so we take a copy.
+	var last byte
+	if len(b) > 0 {
+		last = b[len(b)-1]
+	}
 	return bstreamReader{
 		stream: b,
+		last:   last,
 	}
 }
 
@@ -148,6 +162,8 @@ func (b *bstreamReader) readBitFast() (bit, error) {
 	return (b.buffer & bitmask) != 0, nil
 }
 
+// readBits constructs a uint64 with the nbits right-most bits
+// read from the stream, and any other bits 0.
 func (b *bstreamReader) readBits(nbits uint8) (uint64, error) {
 	if b.valid == 0 {
 		if !b.loadNextBuffer(nbits) {
@@ -170,7 +186,7 @@ func (b *bstreamReader) readBits(nbits uint8) (uint64, error) {
 	}
 
 	bitmask = (uint64(1) << nbits) - 1
-	v = v | ((b.buffer >> (b.valid - nbits)) & bitmask)
+	v |= ((b.buffer >> (b.valid - nbits)) & bitmask)
 	b.valid -= nbits
 
 	return v, nil
@@ -218,18 +234,25 @@ func (b *bstreamReader) loadNextBuffer(nbits uint8) bool {
 		return true
 	}
 
-	// We're here if the are 8 or less bytes left in the stream. Since this reader needs
-	// to handle race conditions with concurrent writes happening on the very last byte
-	// we make sure to never over more than the minimum requested bits (rounded up to
-	// the next byte). The following code is slower but called less frequently.
+	// We're here if there are 8 or less bytes left in the stream.
+	// The following code is slower but called less frequently.
 	nbytes := int((nbits / 8) + 1)
 	if b.streamOffset+nbytes > len(b.stream) {
 		nbytes = len(b.stream) - b.streamOffset
 	}
 
 	buffer := uint64(0)
-	for i := 0; i < nbytes; i++ {
-		buffer = buffer | (uint64(b.stream[b.streamOffset+i]) << uint(8*(nbytes-i-1)))
+	skip := 0
+	if b.streamOffset+nbytes == len(b.stream) {
+		// There can be concurrent writes happening on the very last byte
+		// of the stream, so use the copy we took at initialization time.
+		buffer |= uint64(b.last)
+		// Read up to the byte before
+		skip = 1
+	}
+
+	for i := 0; i < nbytes-skip; i++ {
+		buffer |= (uint64(b.stream[b.streamOffset+i]) << uint(8*(nbytes-i-1)))
 	}
 
 	b.buffer = buffer
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/chunk.go b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/chunk.go
index fa67c1cf4c5993280abd3a84c9dffc02717ccb61..7082f34c3f48d6570b9515e3d394d775308b2af3 100644
--- a/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/chunk.go
+++ b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/chunk.go
@@ -18,30 +18,57 @@ import (
 	"math"
 	"sync"
 
-	"github.com/pkg/errors"
+	"github.com/prometheus/prometheus/model/histogram"
 )
 
 // Encoding is the identifier for a chunk encoding.
 type Encoding uint8
 
+// The different available chunk encodings.
+const (
+	EncNone Encoding = iota
+	EncXOR
+	EncHistogram
+	EncFloatHistogram
+)
+
 func (e Encoding) String() string {
 	switch e {
 	case EncNone:
 		return "none"
 	case EncXOR:
 		return "XOR"
+	case EncHistogram:
+		return "histogram"
+	case EncFloatHistogram:
+		return "floathistogram"
 	}
 	return "<unknown>"
 }
 
-// The different available chunk encodings.
+// IsValidEncoding returns true for supported encodings.
+func IsValidEncoding(e Encoding) bool {
+	return e == EncXOR || e == EncHistogram || e == EncFloatHistogram
+}
+
 const (
-	EncNone Encoding = iota
-	EncXOR
+	// MaxBytesPerXORChunk is the maximum size an XOR chunk can be.
+	MaxBytesPerXORChunk = 1024
+	// TargetBytesPerHistogramChunk sets a size target for each histogram chunk.
+	TargetBytesPerHistogramChunk = 1024
+	// MinSamplesPerHistogramChunk sets a minimum sample count for histogram chunks. This is desirable because a single
+	// histogram sample can be larger than TargetBytesPerHistogramChunk but we want to avoid too-small sample count
+	// chunks so we can achieve some measure of compression advantage even while dealing with really large histograms.
+	// Note that this minimum sample count is not enforced across chunk range boundaries (for example, if the chunk
+	// range is 100 and the first sample in the chunk range is 99, the next sample will be included in a new chunk
+	// resulting in the old chunk containing only a single sample).
+	MinSamplesPerHistogramChunk = 10
 )
 
 // Chunk holds a sequence of sample pairs that can be iterated over and appended to.
 type Chunk interface {
+	Iterable
+
 	// Bytes returns the underlying byte slice of the chunk.
 	Bytes() []byte
 
@@ -51,11 +78,6 @@ type Chunk interface {
 	// Appender returns an appender to append samples to the chunk.
 	Appender() (Appender, error)
 
-	// The iterator passed as argument is for re-use.
-	// Depending on implementation, the iterator can
-	// be re-used or a new iterator can be allocated.
-	Iterator(Iterator) Iterator
-
 	// NumSamples returns the number of samples in the chunk.
 	NumSamples() int
 
@@ -65,31 +87,170 @@ type Chunk interface {
 	// There's no strong guarantee that no samples will be appended once
 	// Compact() is called. Implementing this function is optional.
 	Compact()
+
+	// Reset resets the chunk given stream.
+	Reset(stream []byte)
+}
+
+type Iterable interface {
+	// The iterator passed as argument is for re-use.
+	// Depending on implementation, the iterator can
+	// be re-used or a new iterator can be allocated.
+	Iterator(Iterator) Iterator
 }
 
 // Appender adds sample pairs to a chunk.
 type Appender interface {
 	Append(int64, float64)
+
+	// AppendHistogram and AppendFloatHistogram append a histogram sample to a histogram or float histogram chunk.
+	// Appending a histogram may require creating a completely new chunk or recoding (changing) the current chunk.
+	// The Appender prev is used to determine if there is a counter reset between the previous Appender and the current Appender.
+	// The Appender prev is optional and only taken into account when the first sample is being appended.
+	// The bool appendOnly governs what happens when a sample cannot be appended to the current chunk. If appendOnly is true, then
+	// in such case an error is returned without modifying the chunk. If appendOnly is false, then a new chunk is created or the
+	// current chunk is recoded to accommodate the sample.
+	// The returned Chunk c is nil if sample could be appended to the current Chunk, otherwise c is the new Chunk.
+	// The returned bool isRecoded can be used to distinguish between the new Chunk c being a completely new Chunk
+	// or the current Chunk recoded to a new Chunk.
+	// The Appender app that can be used for the next append is always returned.
+	AppendHistogram(prev *HistogramAppender, t int64, h *histogram.Histogram, appendOnly bool) (c Chunk, isRecoded bool, app Appender, err error)
+	AppendFloatHistogram(prev *FloatHistogramAppender, t int64, h *histogram.FloatHistogram, appendOnly bool) (c Chunk, isRecoded bool, app Appender, err error)
 }
 
 // Iterator is a simple iterator that can only get the next value.
 // Iterator iterates over the samples of a time series, in timestamp-increasing order.
 type Iterator interface {
-	// Next advances the iterator by one.
-	Next() bool
-	// Seek advances the iterator forward to the first sample with the timestamp equal or greater than t.
-	// If current sample found by previous `Next` or `Seek` operation already has this property, Seek has no effect.
-	// Seek returns true, if such sample exists, false otherwise.
-	// Iterator is exhausted when the Seek returns false.
-	Seek(t int64) bool
-	// At returns the current timestamp/value pair.
-	// Before the iterator has advanced At behaviour is unspecified.
+	// Next advances the iterator by one and returns the type of the value
+	// at the new position (or ValNone if the iterator is exhausted).
+	Next() ValueType
+	// Seek advances the iterator forward to the first sample with a
+	// timestamp equal or greater than t. If the current sample found by a
+	// previous `Next` or `Seek` operation already has this property, Seek
+	// has no effect. If a sample has been found, Seek returns the type of
+	// its value. Otherwise, it returns ValNone, after which the iterator is
+	// exhausted.
+	Seek(t int64) ValueType
+	// At returns the current timestamp/value pair if the value is a float.
+	// Before the iterator has advanced, the behaviour is unspecified.
 	At() (int64, float64)
-	// Err returns the current error. It should be used only after iterator is
-	// exhausted, that is `Next` or `Seek` returns false.
+	// AtHistogram returns the current timestamp/value pair if the value is a
+	// histogram with integer counts. Before the iterator has advanced, the behaviour
+	// is unspecified.
+	// The method accepts an optional Histogram object which will be
+	// reused when not nil. Otherwise, a new Histogram object will be allocated.
+	AtHistogram(*histogram.Histogram) (int64, *histogram.Histogram)
+	// AtFloatHistogram returns the current timestamp/value pair if the
+	// value is a histogram with floating-point counts. It also works if the
+	// value is a histogram with integer counts, in which case a
+	// FloatHistogram copy of the histogram is returned. Before the iterator
+	// has advanced, the behaviour is unspecified.
+	// The method accepts an optional FloatHistogram object which will be
+	// reused when not nil. Otherwise, a new FloatHistogram object will be allocated.
+	AtFloatHistogram(*histogram.FloatHistogram) (int64, *histogram.FloatHistogram)
+	// AtT returns the current timestamp.
+	// Before the iterator has advanced, the behaviour is unspecified.
+	AtT() int64
+	// Err returns the current error. It should be used only after the
+	// iterator is exhausted, i.e. `Next` or `Seek` have returned ValNone.
 	Err() error
 }
 
+// ValueType defines the type of a value an Iterator points to.
+type ValueType uint8
+
+// Possible values for ValueType.
+const (
+	ValNone           ValueType = iota // No value at the current position.
+	ValFloat                           // A simple float, retrieved with At.
+	ValHistogram                       // A histogram, retrieve with AtHistogram, but AtFloatHistogram works, too.
+	ValFloatHistogram                  // A floating-point histogram, retrieve with AtFloatHistogram.
+)
+
+func (v ValueType) String() string {
+	switch v {
+	case ValNone:
+		return "none"
+	case ValFloat:
+		return "float"
+	case ValHistogram:
+		return "histogram"
+	case ValFloatHistogram:
+		return "floathistogram"
+	default:
+		return "unknown"
+	}
+}
+
+func (v ValueType) ChunkEncoding() Encoding {
+	switch v {
+	case ValFloat:
+		return EncXOR
+	case ValHistogram:
+		return EncHistogram
+	case ValFloatHistogram:
+		return EncFloatHistogram
+	default:
+		return EncNone
+	}
+}
+
+func (v ValueType) NewChunk() (Chunk, error) {
+	switch v {
+	case ValFloat:
+		return NewXORChunk(), nil
+	case ValHistogram:
+		return NewHistogramChunk(), nil
+	case ValFloatHistogram:
+		return NewFloatHistogramChunk(), nil
+	default:
+		return nil, fmt.Errorf("value type %v unsupported", v)
+	}
+}
+
+// MockSeriesIterator returns an iterator for a mock series with custom timeStamps and values.
+func MockSeriesIterator(timestamps []int64, values []float64) Iterator {
+	return &mockSeriesIterator{
+		timeStamps: timestamps,
+		values:     values,
+		currIndex:  -1,
+	}
+}
+
+type mockSeriesIterator struct {
+	timeStamps []int64
+	values     []float64
+	currIndex  int
+}
+
+func (it *mockSeriesIterator) Seek(int64) ValueType { return ValNone }
+
+func (it *mockSeriesIterator) At() (int64, float64) {
+	return it.timeStamps[it.currIndex], it.values[it.currIndex]
+}
+
+func (it *mockSeriesIterator) AtHistogram(*histogram.Histogram) (int64, *histogram.Histogram) {
+	return math.MinInt64, nil
+}
+
+func (it *mockSeriesIterator) AtFloatHistogram(*histogram.FloatHistogram) (int64, *histogram.FloatHistogram) {
+	return math.MinInt64, nil
+}
+
+func (it *mockSeriesIterator) AtT() int64 {
+	return it.timeStamps[it.currIndex]
+}
+
+func (it *mockSeriesIterator) Next() ValueType {
+	if it.currIndex < len(it.timeStamps)-1 {
+		it.currIndex++
+		return ValFloat
+	}
+
+	return ValNone
+}
+func (it *mockSeriesIterator) Err() error { return nil }
+
 // NewNopIterator returns a new chunk iterator that does not hold any data.
 func NewNopIterator() Iterator {
 	return nopIterator{}
@@ -97,10 +258,18 @@ func NewNopIterator() Iterator {
 
 type nopIterator struct{}
 
-func (nopIterator) Seek(int64) bool      { return false }
+func (nopIterator) Next() ValueType      { return ValNone }
+func (nopIterator) Seek(int64) ValueType { return ValNone }
 func (nopIterator) At() (int64, float64) { return math.MinInt64, 0 }
-func (nopIterator) Next() bool           { return false }
-func (nopIterator) Err() error           { return nil }
+func (nopIterator) AtHistogram(*histogram.Histogram) (int64, *histogram.Histogram) {
+	return math.MinInt64, nil
+}
+
+func (nopIterator) AtFloatHistogram(*histogram.FloatHistogram) (int64, *histogram.FloatHistogram) {
+	return math.MinInt64, nil
+}
+func (nopIterator) AtT() int64 { return math.MinInt64 }
+func (nopIterator) Err() error { return nil }
 
 // Pool is used to create and reuse chunk references to avoid allocations.
 type Pool interface {
@@ -110,7 +279,9 @@ type Pool interface {
 
 // pool is a memory pool of chunk objects.
 type pool struct {
-	xor sync.Pool
+	xor            sync.Pool
+	histogram      sync.Pool
+	floatHistogram sync.Pool
 }
 
 // NewPool returns a new pool.
@@ -121,36 +292,61 @@ func NewPool() Pool {
 				return &XORChunk{b: bstream{}}
 			},
 		},
+		histogram: sync.Pool{
+			New: func() interface{} {
+				return &HistogramChunk{b: bstream{}}
+			},
+		},
+		floatHistogram: sync.Pool{
+			New: func() interface{} {
+				return &FloatHistogramChunk{b: bstream{}}
+			},
+		},
 	}
 }
 
 func (p *pool) Get(e Encoding, b []byte) (Chunk, error) {
+	var c Chunk
 	switch e {
 	case EncXOR:
-		c := p.xor.Get().(*XORChunk)
-		c.b.stream = b
-		c.b.count = 0
-		return c, nil
+		c = p.xor.Get().(*XORChunk)
+	case EncHistogram:
+		c = p.histogram.Get().(*HistogramChunk)
+	case EncFloatHistogram:
+		c = p.floatHistogram.Get().(*FloatHistogramChunk)
+	default:
+		return nil, fmt.Errorf("invalid chunk encoding %q", e)
 	}
-	return nil, errors.Errorf("invalid encoding %q", e)
+
+	c.Reset(b)
+	return c, nil
 }
 
 func (p *pool) Put(c Chunk) error {
+	var sp *sync.Pool
+	var ok bool
 	switch c.Encoding() {
 	case EncXOR:
-		xc, ok := c.(*XORChunk)
+		_, ok = c.(*XORChunk)
+		sp = &p.xor
+	case EncHistogram:
+		_, ok = c.(*HistogramChunk)
+		sp = &p.histogram
+	case EncFloatHistogram:
+		_, ok = c.(*FloatHistogramChunk)
+		sp = &p.floatHistogram
+	default:
+		return fmt.Errorf("invalid chunk encoding %q", c.Encoding())
+	}
+	if !ok {
 		// This may happen often with wrapped chunks. Nothing we can really do about
 		// it but returning an error would cause a lot of allocations again. Thus,
 		// we just skip it.
-		if !ok {
-			return nil
-		}
-		xc.b.stream = nil
-		xc.b.count = 0
-		p.xor.Put(c)
-	default:
-		return errors.Errorf("invalid encoding %q", c.Encoding())
+		return nil
 	}
+
+	c.Reset(nil)
+	sp.Put(c)
 	return nil
 }
 
@@ -161,6 +357,23 @@ func FromData(e Encoding, d []byte) (Chunk, error) {
 	switch e {
 	case EncXOR:
 		return &XORChunk{b: bstream{count: 0, stream: d}}, nil
+	case EncHistogram:
+		return &HistogramChunk{b: bstream{count: 0, stream: d}}, nil
+	case EncFloatHistogram:
+		return &FloatHistogramChunk{b: bstream{count: 0, stream: d}}, nil
+	}
+	return nil, fmt.Errorf("invalid chunk encoding %q", e)
+}
+
+// NewEmptyChunk returns an empty chunk for the given encoding.
+func NewEmptyChunk(e Encoding) (Chunk, error) {
+	switch e {
+	case EncXOR:
+		return NewXORChunk(), nil
+	case EncHistogram:
+		return NewHistogramChunk(), nil
+	case EncFloatHistogram:
+		return NewFloatHistogramChunk(), nil
 	}
-	return nil, fmt.Errorf("unknown chunk encoding: %d", e)
+	return nil, fmt.Errorf("invalid chunk encoding %q", e)
 }
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/float_histogram.go b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/float_histogram.go
new file mode 100644
index 0000000000000000000000000000000000000000..0da00dcda441edcc0f68c4ba0fe8337747f1140c
--- /dev/null
+++ b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/float_histogram.go
@@ -0,0 +1,1165 @@
+// Copyright 2022 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package chunkenc
+
+import (
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"math"
+
+	"github.com/prometheus/prometheus/model/histogram"
+	"github.com/prometheus/prometheus/model/value"
+)
+
+// FloatHistogramChunk holds encoded sample data for a sparse, high-resolution
+// float histogram.
+//
+// Each sample has multiple "fields", stored in the following way (raw = store
+// number directly, delta = store delta to the previous number, dod = store
+// delta of the delta to the previous number, xor = what we do for regular
+// sample values):
+//
+//	field →    ts    count zeroCount sum []posbuckets []negbuckets
+//	sample 1   raw   raw   raw       raw []raw        []raw
+//	sample 2   delta xor   xor       xor []xor        []xor
+//	sample >2  dod   xor   xor       xor []xor        []xor
+type FloatHistogramChunk struct {
+	b bstream
+}
+
+// NewFloatHistogramChunk returns a new chunk with float histogram encoding.
+func NewFloatHistogramChunk() *FloatHistogramChunk {
+	b := make([]byte, 3, 128)
+	return &FloatHistogramChunk{b: bstream{stream: b, count: 0}}
+}
+
+func (c *FloatHistogramChunk) Reset(stream []byte) {
+	c.b.Reset(stream)
+}
+
+// xorValue holds all the necessary information to encode
+// and decode XOR encoded float64 values.
+type xorValue struct {
+	value    float64
+	leading  uint8
+	trailing uint8
+}
+
+// Encoding returns the encoding type.
+func (c *FloatHistogramChunk) Encoding() Encoding {
+	return EncFloatHistogram
+}
+
+// Bytes returns the underlying byte slice of the chunk.
+func (c *FloatHistogramChunk) Bytes() []byte {
+	return c.b.bytes()
+}
+
+// NumSamples returns the number of samples in the chunk.
+func (c *FloatHistogramChunk) NumSamples() int {
+	return int(binary.BigEndian.Uint16(c.Bytes()))
+}
+
+// Layout returns the histogram layout. Only call this on chunks that have at
+// least one sample.
+func (c *FloatHistogramChunk) Layout() (
+	schema int32, zeroThreshold float64,
+	negativeSpans, positiveSpans []histogram.Span,
+	customValues []float64,
+	err error,
+) {
+	if c.NumSamples() == 0 {
+		panic("FloatHistogramChunk.Layout() called on an empty chunk")
+	}
+	b := newBReader(c.Bytes()[2:])
+	return readHistogramChunkLayout(&b)
+}
+
+// GetCounterResetHeader returns the info about the first 2 bits of the chunk
+// header.
+func (c *FloatHistogramChunk) GetCounterResetHeader() CounterResetHeader {
+	return CounterResetHeader(c.Bytes()[2] & CounterResetHeaderMask)
+}
+
+// Compact implements the Chunk interface.
+func (c *FloatHistogramChunk) Compact() {
+	if l := len(c.b.stream); cap(c.b.stream) > l+chunkCompactCapacityThreshold {
+		buf := make([]byte, l)
+		copy(buf, c.b.stream)
+		c.b.stream = buf
+	}
+}
+
+// Appender implements the Chunk interface.
+func (c *FloatHistogramChunk) Appender() (Appender, error) {
+	it := c.iterator(nil)
+
+	// To get an appender, we must know the state it would have if we had
+	// appended all existing data from scratch. We iterate through the end
+	// and populate via the iterator's state.
+	for it.Next() == ValFloatHistogram {
+	}
+	if err := it.Err(); err != nil {
+		return nil, err
+	}
+
+	pBuckets := make([]xorValue, len(it.pBuckets))
+	for i := 0; i < len(it.pBuckets); i++ {
+		pBuckets[i] = xorValue{
+			value:    it.pBuckets[i],
+			leading:  it.pBucketsLeading[i],
+			trailing: it.pBucketsTrailing[i],
+		}
+	}
+	nBuckets := make([]xorValue, len(it.nBuckets))
+	for i := 0; i < len(it.nBuckets); i++ {
+		nBuckets[i] = xorValue{
+			value:    it.nBuckets[i],
+			leading:  it.nBucketsLeading[i],
+			trailing: it.nBucketsTrailing[i],
+		}
+	}
+
+	a := &FloatHistogramAppender{
+		b: &c.b,
+
+		schema:       it.schema,
+		zThreshold:   it.zThreshold,
+		pSpans:       it.pSpans,
+		nSpans:       it.nSpans,
+		customValues: it.customValues,
+		t:            it.t,
+		tDelta:       it.tDelta,
+		cnt:          it.cnt,
+		zCnt:         it.zCnt,
+		pBuckets:     pBuckets,
+		nBuckets:     nBuckets,
+		sum:          it.sum,
+	}
+	if it.numTotal == 0 {
+		a.sum.leading = 0xff
+		a.cnt.leading = 0xff
+		a.zCnt.leading = 0xff
+	}
+	return a, nil
+}
+
+func (c *FloatHistogramChunk) iterator(it Iterator) *floatHistogramIterator {
+	// This comment is copied from XORChunk.iterator:
+	//   Should iterators guarantee to act on a copy of the data so it doesn't lock append?
+	//   When using striped locks to guard access to chunks, probably yes.
+	//   Could only copy data if the chunk is not completed yet.
+	if histogramIter, ok := it.(*floatHistogramIterator); ok {
+		histogramIter.Reset(c.b.bytes())
+		return histogramIter
+	}
+	return newFloatHistogramIterator(c.b.bytes())
+}
+
+func newFloatHistogramIterator(b []byte) *floatHistogramIterator {
+	it := &floatHistogramIterator{
+		br:       newBReader(b),
+		numTotal: binary.BigEndian.Uint16(b),
+		t:        math.MinInt64,
+	}
+	// The first 3 bytes contain chunk headers.
+	// We skip that for actual samples.
+	_, _ = it.br.readBits(24)
+	it.counterResetHeader = CounterResetHeader(b[2] & CounterResetHeaderMask)
+	return it
+}
+
+// Iterator implements the Chunk interface.
+func (c *FloatHistogramChunk) Iterator(it Iterator) Iterator {
+	return c.iterator(it)
+}
+
+// FloatHistogramAppender is an Appender implementation for float histograms.
+type FloatHistogramAppender struct {
+	b *bstream
+
+	// Layout:
+	schema         int32
+	zThreshold     float64
+	pSpans, nSpans []histogram.Span
+	customValues   []float64
+
+	t, tDelta          int64
+	sum, cnt, zCnt     xorValue
+	pBuckets, nBuckets []xorValue
+}
+
+func (a *FloatHistogramAppender) GetCounterResetHeader() CounterResetHeader {
+	return CounterResetHeader(a.b.bytes()[2] & CounterResetHeaderMask)
+}
+
+func (a *FloatHistogramAppender) setCounterResetHeader(cr CounterResetHeader) {
+	a.b.bytes()[2] = (a.b.bytes()[2] & (^CounterResetHeaderMask)) | (byte(cr) & CounterResetHeaderMask)
+}
+
+func (a *FloatHistogramAppender) NumSamples() int {
+	return int(binary.BigEndian.Uint16(a.b.bytes()))
+}
+
+// Append implements Appender. This implementation panics because normal float
+// samples must never be appended to a histogram chunk.
+func (a *FloatHistogramAppender) Append(int64, float64) {
+	panic("appended a float sample to a histogram chunk")
+}
+
+// appendable returns whether the chunk can be appended to, and if so whether
+//  1. Any recoding needs to happen to the chunk using the provided forward
+//     inserts (in case of any new buckets, positive or negative range,
+//     respectively).
+//  2. Any recoding needs to happen for the histogram being appended, using the
+//     backward inserts (in case of any missing buckets, positive or negative
+//     range, respectively).
+//
+// If the sample is a gauge histogram, AppendableGauge must be used instead.
+//
+// The chunk is not appendable in the following cases:
+//
+//   - The schema has changed.
+//   - The custom bounds have changed if the current schema is custom buckets.
+//   - The threshold for the zero bucket has changed.
+//   - Any buckets have disappeared, unless the bucket count was 0, unused.
+//     Empty bucket can happen if the chunk was recoded and we're merging a non
+//     recoded histogram. In this case backward inserts will be provided.
+//   - There was a counter reset in the count of observations or in any bucket,
+//     including the zero bucket.
+//   - The last sample in the chunk was stale while the current sample is not stale.
+//
+// The method returns an additional boolean set to true if it is not appendable
+// because of a counter reset. If the given sample is stale, it is always ok to
+// append. If counterReset is true, okToAppend is always false.
+func (a *FloatHistogramAppender) appendable(h *histogram.FloatHistogram) (
+	positiveInserts, negativeInserts []Insert,
+	backwardPositiveInserts, backwardNegativeInserts []Insert,
+	okToAppend, counterReset bool,
+) {
+	if a.NumSamples() > 0 && a.GetCounterResetHeader() == GaugeType {
+		return
+	}
+	if h.CounterResetHint == histogram.CounterReset {
+		// Always honor the explicit counter reset hint.
+		counterReset = true
+		return
+	}
+	if value.IsStaleNaN(h.Sum) {
+		// This is a stale sample whose buckets and spans don't matter.
+		okToAppend = true
+		return
+	}
+	if value.IsStaleNaN(a.sum.value) {
+		// If the last sample was stale, then we can only accept stale
+		// samples in this chunk.
+		return
+	}
+
+	if h.Count < a.cnt.value {
+		// There has been a counter reset.
+		counterReset = true
+		return
+	}
+
+	if h.Schema != a.schema || h.ZeroThreshold != a.zThreshold {
+		return
+	}
+
+	if histogram.IsCustomBucketsSchema(h.Schema) && !histogram.FloatBucketsMatch(h.CustomValues, a.customValues) {
+		counterReset = true
+		return
+	}
+
+	if h.ZeroCount < a.zCnt.value {
+		// There has been a counter reset since ZeroThreshold didn't change.
+		counterReset = true
+		return
+	}
+
+	var ok bool
+	positiveInserts, backwardPositiveInserts, ok = expandFloatSpansAndBuckets(a.pSpans, h.PositiveSpans, a.pBuckets, h.PositiveBuckets)
+	if !ok {
+		counterReset = true
+		return
+	}
+	negativeInserts, backwardNegativeInserts, ok = expandFloatSpansAndBuckets(a.nSpans, h.NegativeSpans, a.nBuckets, h.NegativeBuckets)
+	if !ok {
+		counterReset = true
+		return
+	}
+
+	okToAppend = true
+	return
+}
+
+// expandFloatSpansAndBuckets returns the inserts to expand the bucket spans 'a' so that
+// they match the spans in 'b'. 'b' must cover the same or more buckets than
+// 'a', otherwise the function will return false.
+// The function also returns the inserts to expand 'b' to also cover all the
+// buckets that are missing in 'b', but are present with 0 counter value in 'a'.
+// The function also checks for counter resets between 'a' and 'b'.
+//
+// Example:
+//
+// Let's say the old buckets look like this:
+//
+//	span syntax: [offset, length]
+//	spans      : [ 0 , 2 ]               [2,1]                   [ 3 , 2 ]                     [3,1]       [1,1]
+//	bucket idx : [0]   [1]    2     3    [4]    5     6     7    [8]   [9]    10    11    12   [13]   14   [15]
+//	raw values    6     3                 3                       2     4                       5           1
+//	deltas        6    -3                 0                      -1     2                       1          -4
+//
+// But now we introduce a new bucket layout. (Carefully chosen example where we
+// have a span appended, one unchanged[*], one prepended, and two merge - in
+// that order.)
+//
+// [*] unchanged in terms of which bucket indices they represent. but to achieve
+// that, their offset needs to change if "disrupted" by spans changing ahead of
+// them
+//
+//	                                      \/ this one is "unchanged"
+//	spans      : [  0  ,  3    ]         [1,1]       [    1    ,   4     ]                     [  3  ,   3    ]
+//	bucket idx : [0]   [1]   [2]    3    [4]    5    [6]   [7]   [8]   [9]    10    11    12   [13]  [14]  [15]
+//	raw values    6     3     0           3           0     0     2     4                       5     0     1
+//	deltas        6    -3    -3           3          -3     0     2     2                       1    -5     1
+//	delta mods:                          / \                     / \                                       / \
+//
+// Note for histograms with delta-encoded buckets: Whenever any new buckets are
+// introduced, the subsequent "old" bucket needs to readjust its delta to the
+// new base of 0. Thus, for the caller who wants to transform the set of
+// original deltas to a new set of deltas to match a new span layout that adds
+// buckets, we simply need to generate a list of inserts.
+//
+// Note: Within expandSpansForward we don't have to worry about the changes to the
+// spans themselves, thanks to the iterators we get to work with the more useful
+// bucket indices (which of course directly correspond to the buckets we have to
+// adjust).
+func expandFloatSpansAndBuckets(a, b []histogram.Span, aBuckets []xorValue, bBuckets []float64) (forward, backward []Insert, ok bool) {
+	ai := newBucketIterator(a)
+	bi := newBucketIterator(b)
+
+	var aInserts []Insert // To insert into buckets of a, to make up for missing buckets in b.
+	var bInserts []Insert // To insert into buckets of b, to make up for missing empty(!) buckets in a.
+
+	// When aInter.num or bInter.num becomes > 0, this becomes a valid insert that should
+	// be yielded when we finish a streak of new buckets.
+	var aInter Insert
+	var bInter Insert
+
+	aIdx, aOK := ai.Next()
+	bIdx, bOK := bi.Next()
+
+	// Bucket count. Initialize the absolute count and index into the
+	// positive/negative counts or deltas array. The bucket count is
+	// used to detect counter reset as well as unused buckets in a.
+	var (
+		aCount    float64
+		bCount    float64
+		aCountIdx int
+		bCountIdx int
+	)
+	if aOK {
+		aCount = aBuckets[aCountIdx].value
+	}
+	if bOK {
+		bCount = bBuckets[bCountIdx]
+	}
+
+loop:
+	for {
+		switch {
+		case aOK && bOK:
+			switch {
+			case aIdx == bIdx: // Both have an identical bucket index.
+				// Bucket count. Check bucket for reset from a to b.
+				if aCount > bCount {
+					return nil, nil, false
+				}
+
+				// Finish WIP insert for a and reset.
+				if aInter.num > 0 {
+					aInserts = append(aInserts, aInter)
+					aInter.num = 0
+				}
+
+				// Finish WIP insert for b and reset.
+				if bInter.num > 0 {
+					bInserts = append(bInserts, bInter)
+					bInter.num = 0
+				}
+
+				aIdx, aOK = ai.Next()
+				bIdx, bOK = bi.Next()
+				aInter.pos++ // Advance potential insert position.
+				aCountIdx++  // Advance absolute bucket count index for a.
+				if aOK {
+					aCount = aBuckets[aCountIdx].value
+				}
+				bInter.pos++ // Advance potential insert position.
+				bCountIdx++  // Advance absolute bucket count index for b.
+				if bOK {
+					bCount = bBuckets[bCountIdx]
+				}
+
+				continue
+			case aIdx < bIdx: // b misses a bucket index that is in a.
+				// This is ok if the count in a is 0, in which case we make a note to
+				// fill in the bucket in b and advance a.
+				if aCount == 0 {
+					bInter.num++ // Mark that we need to insert a bucket in b.
+					bInter.bucketIdx = aIdx
+					// Advance a
+					if aInter.num > 0 {
+						aInserts = append(aInserts, aInter)
+						aInter.num = 0
+					}
+					aIdx, aOK = ai.Next()
+					aInter.pos++
+					aCountIdx++
+					if aOK {
+						aCount = aBuckets[aCountIdx].value
+					}
+					continue
+				}
+				// Otherwise we are missing a bucket that was in use in a, which is a reset.
+				return nil, nil, false
+			case aIdx > bIdx: // a misses a value that is in b. Forward b and recompare.
+				aInter.num++
+				bInter.bucketIdx = bIdx
+				// Advance b
+				if bInter.num > 0 {
+					bInserts = append(bInserts, bInter)
+					bInter.num = 0
+				}
+				bIdx, bOK = bi.Next()
+				bInter.pos++
+				bCountIdx++
+				if bOK {
+					bCount = bBuckets[bCountIdx]
+				}
+			}
+		case aOK && !bOK: // b misses a value that is in a.
+			// This is ok if the count in a is 0, in which case we make a note to
+			// fill in the bucket in b and advance a.
+			if aCount == 0 {
+				bInter.num++
+				bInter.bucketIdx = aIdx
+				// Advance a
+				if aInter.num > 0 {
+					aInserts = append(aInserts, aInter)
+					aInter.num = 0
+				}
+				aIdx, aOK = ai.Next()
+				aInter.pos++ // Advance potential insert position.
+				// Update absolute bucket counts for a.
+				aCountIdx++
+				if aOK {
+					aCount = aBuckets[aCountIdx].value
+				}
+				continue
+			}
+			// Otherwise we are missing a bucket that was in use in a, which is a reset.
+			return nil, nil, false
+		case !aOK && bOK: // a misses a value that is in b. Forward b and recompare.
+			aInter.num++
+			bInter.bucketIdx = bIdx
+			// Advance b
+			if bInter.num > 0 {
+				bInserts = append(bInserts, bInter)
+				bInter.num = 0
+			}
+			bIdx, bOK = bi.Next()
+			bInter.pos++ // Advance potential insert position.
+			// Update absolute bucket counts for b.
+			bCountIdx++
+			if bOK {
+				bCount = bBuckets[bCountIdx]
+			}
+		default: // Both iterators ran out. We're done.
+			if aInter.num > 0 {
+				aInserts = append(aInserts, aInter)
+			}
+			if bInter.num > 0 {
+				bInserts = append(bInserts, bInter)
+			}
+			break loop
+		}
+	}
+
+	return aInserts, bInserts, true
+}
+
+// appendableGauge returns whether the chunk can be appended to, and if so
+// whether:
+//  1. Any recoding needs to happen to the chunk using the provided inserts
+//     (in case of any new buckets, positive or negative range, respectively).
+//  2. Any recoding needs to happen for the histogram being appended, using the
+//     backward inserts (in case of any missing buckets, positive or negative
+//     range, respectively).
+//
+// This method must be only used for gauge histograms.
+//
+// The chunk is not appendable in the following cases:
+//   - The schema has changed.
+//   - The custom bounds have changed if the current schema is custom buckets.
+//   - The threshold for the zero bucket has changed.
+//   - The last sample in the chunk was stale while the current sample is not stale.
+func (a *FloatHistogramAppender) appendableGauge(h *histogram.FloatHistogram) (
+	positiveInserts, negativeInserts []Insert,
+	backwardPositiveInserts, backwardNegativeInserts []Insert,
+	positiveSpans, negativeSpans []histogram.Span,
+	okToAppend bool,
+) {
+	if a.NumSamples() > 0 && a.GetCounterResetHeader() != GaugeType {
+		return
+	}
+	if value.IsStaleNaN(h.Sum) {
+		// This is a stale sample whose buckets and spans don't matter.
+		okToAppend = true
+		return
+	}
+	if value.IsStaleNaN(a.sum.value) {
+		// If the last sample was stale, then we can only accept stale
+		// samples in this chunk.
+		return
+	}
+
+	if h.Schema != a.schema || h.ZeroThreshold != a.zThreshold {
+		return
+	}
+
+	if histogram.IsCustomBucketsSchema(h.Schema) && !histogram.FloatBucketsMatch(h.CustomValues, a.customValues) {
+		return
+	}
+
+	positiveInserts, backwardPositiveInserts, positiveSpans = expandSpansBothWays(a.pSpans, h.PositiveSpans)
+	negativeInserts, backwardNegativeInserts, negativeSpans = expandSpansBothWays(a.nSpans, h.NegativeSpans)
+	okToAppend = true
+	return
+}
+
+// appendFloatHistogram appends a float histogram to the chunk. The caller must ensure that
+// the histogram is properly structured, e.g. the number of buckets used
+// corresponds to the number conveyed by the span structures. First call
+// Appendable() and act accordingly!
+func (a *FloatHistogramAppender) appendFloatHistogram(t int64, h *histogram.FloatHistogram) {
+	var tDelta int64
+	num := binary.BigEndian.Uint16(a.b.bytes())
+
+	if value.IsStaleNaN(h.Sum) {
+		// Emptying out other fields to write no buckets, and an empty
+		// layout in case of first histogram in the chunk.
+		h = &histogram.FloatHistogram{Sum: h.Sum}
+	}
+
+	if num == 0 {
+		// The first append gets the privilege to dictate the layout
+		// but it's also responsible for encoding it into the chunk!
+		writeHistogramChunkLayout(a.b, h.Schema, h.ZeroThreshold, h.PositiveSpans, h.NegativeSpans, h.CustomValues)
+		a.schema = h.Schema
+		a.zThreshold = h.ZeroThreshold
+
+		if len(h.PositiveSpans) > 0 {
+			a.pSpans = make([]histogram.Span, len(h.PositiveSpans))
+			copy(a.pSpans, h.PositiveSpans)
+		} else {
+			a.pSpans = nil
+		}
+		if len(h.NegativeSpans) > 0 {
+			a.nSpans = make([]histogram.Span, len(h.NegativeSpans))
+			copy(a.nSpans, h.NegativeSpans)
+		} else {
+			a.nSpans = nil
+		}
+		if len(h.CustomValues) > 0 {
+			a.customValues = make([]float64, len(h.CustomValues))
+			copy(a.customValues, h.CustomValues)
+		} else {
+			a.customValues = nil
+		}
+
+		numPBuckets, numNBuckets := countSpans(h.PositiveSpans), countSpans(h.NegativeSpans)
+		if numPBuckets > 0 {
+			a.pBuckets = make([]xorValue, numPBuckets)
+			for i := 0; i < numPBuckets; i++ {
+				a.pBuckets[i] = xorValue{
+					value:   h.PositiveBuckets[i],
+					leading: 0xff,
+				}
+			}
+		} else {
+			a.pBuckets = nil
+		}
+		if numNBuckets > 0 {
+			a.nBuckets = make([]xorValue, numNBuckets)
+			for i := 0; i < numNBuckets; i++ {
+				a.nBuckets[i] = xorValue{
+					value:   h.NegativeBuckets[i],
+					leading: 0xff,
+				}
+			}
+		} else {
+			a.nBuckets = nil
+		}
+
+		// Now store the actual data.
+		putVarbitInt(a.b, t)
+		a.b.writeBits(math.Float64bits(h.Count), 64)
+		a.b.writeBits(math.Float64bits(h.ZeroCount), 64)
+		a.b.writeBits(math.Float64bits(h.Sum), 64)
+		a.cnt.value = h.Count
+		a.zCnt.value = h.ZeroCount
+		a.sum.value = h.Sum
+		for _, b := range h.PositiveBuckets {
+			a.b.writeBits(math.Float64bits(b), 64)
+		}
+		for _, b := range h.NegativeBuckets {
+			a.b.writeBits(math.Float64bits(b), 64)
+		}
+	} else {
+		// The case for the 2nd sample with single deltas is implicitly handled correctly with the double delta code,
+		// so we don't need a separate single delta logic for the 2nd sample.
+		tDelta = t - a.t
+		tDod := tDelta - a.tDelta
+		putVarbitInt(a.b, tDod)
+
+		a.writeXorValue(&a.cnt, h.Count)
+		a.writeXorValue(&a.zCnt, h.ZeroCount)
+		a.writeXorValue(&a.sum, h.Sum)
+
+		for i, b := range h.PositiveBuckets {
+			a.writeXorValue(&a.pBuckets[i], b)
+		}
+		for i, b := range h.NegativeBuckets {
+			a.writeXorValue(&a.nBuckets[i], b)
+		}
+	}
+
+	binary.BigEndian.PutUint16(a.b.bytes(), num+1)
+
+	a.t = t
+	a.tDelta = tDelta
+}
+
+func (a *FloatHistogramAppender) writeXorValue(old *xorValue, v float64) {
+	xorWrite(a.b, v, old.value, &old.leading, &old.trailing)
+	old.value = v
+}
+
+// recode converts the current chunk to accommodate an expansion of the set of
+// (positive and/or negative) buckets used, according to the provided inserts,
+// resulting in the honoring of the provided new positive and negative spans. To
+// continue appending, use the returned Appender rather than the receiver of
+// this method.
+func (a *FloatHistogramAppender) recode(
+	positiveInserts, negativeInserts []Insert,
+	positiveSpans, negativeSpans []histogram.Span,
+) (Chunk, Appender) {
+	// TODO(beorn7): This currently just decodes everything and then encodes
+	// it again with the new span layout. This can probably be done in-place
+	// by editing the chunk. But let's first see how expensive it is in the
+	// big picture. Also, in-place editing might create concurrency issues.
+	byts := a.b.bytes()
+	it := newFloatHistogramIterator(byts)
+	hc := NewFloatHistogramChunk()
+	app, err := hc.Appender()
+	if err != nil {
+		panic(err) // This should never happen for an empty float histogram chunk.
+	}
+	happ := app.(*FloatHistogramAppender)
+	numPositiveBuckets, numNegativeBuckets := countSpans(positiveSpans), countSpans(negativeSpans)
+
+	for it.Next() == ValFloatHistogram {
+		tOld, hOld := it.AtFloatHistogram(nil)
+
+		// We have to newly allocate slices for the modified buckets
+		// here because they are kept by the appender until the next
+		// append.
+		// TODO(beorn7): We might be able to optimize this.
+		var positiveBuckets, negativeBuckets []float64
+		if numPositiveBuckets > 0 {
+			positiveBuckets = make([]float64, numPositiveBuckets)
+		}
+		if numNegativeBuckets > 0 {
+			negativeBuckets = make([]float64, numNegativeBuckets)
+		}
+
+		// Save the modified histogram to the new chunk.
+		hOld.PositiveSpans, hOld.NegativeSpans = positiveSpans, negativeSpans
+		if len(positiveInserts) > 0 {
+			hOld.PositiveBuckets = insert(hOld.PositiveBuckets, positiveBuckets, positiveInserts, false)
+		}
+		if len(negativeInserts) > 0 {
+			hOld.NegativeBuckets = insert(hOld.NegativeBuckets, negativeBuckets, negativeInserts, false)
+		}
+		happ.appendFloatHistogram(tOld, hOld)
+	}
+
+	happ.setCounterResetHeader(CounterResetHeader(byts[2] & CounterResetHeaderMask))
+	return hc, app
+}
+
+// recodeHistogram converts the current histogram (in-place) to accommodate an expansion of the set of
+// (positive and/or negative) buckets used.
+func (a *FloatHistogramAppender) recodeHistogram(
+	fh *histogram.FloatHistogram,
+	pBackwardInter, nBackwardInter []Insert,
+) {
+	if len(pBackwardInter) > 0 {
+		numPositiveBuckets := countSpans(fh.PositiveSpans)
+		fh.PositiveBuckets = insert(fh.PositiveBuckets, make([]float64, numPositiveBuckets), pBackwardInter, false)
+	}
+	if len(nBackwardInter) > 0 {
+		numNegativeBuckets := countSpans(fh.NegativeSpans)
+		fh.NegativeBuckets = insert(fh.NegativeBuckets, make([]float64, numNegativeBuckets), nBackwardInter, false)
+	}
+}
+
+func (a *FloatHistogramAppender) AppendHistogram(*HistogramAppender, int64, *histogram.Histogram, bool) (Chunk, bool, Appender, error) {
+	panic("appended a histogram sample to a float histogram chunk")
+}
+
+func (a *FloatHistogramAppender) AppendFloatHistogram(prev *FloatHistogramAppender, t int64, h *histogram.FloatHistogram, appendOnly bool) (Chunk, bool, Appender, error) {
+	if a.NumSamples() == 0 {
+		a.appendFloatHistogram(t, h)
+		if h.CounterResetHint == histogram.GaugeType {
+			a.setCounterResetHeader(GaugeType)
+			return nil, false, a, nil
+		}
+
+		switch {
+		case h.CounterResetHint == histogram.CounterReset:
+			// Always honor the explicit counter reset hint.
+			a.setCounterResetHeader(CounterReset)
+		case prev != nil:
+			// This is a new chunk, but continued from a previous one. We need to calculate the reset header unless already set.
+			_, _, _, _, _, counterReset := prev.appendable(h)
+			if counterReset {
+				a.setCounterResetHeader(CounterReset)
+			} else {
+				a.setCounterResetHeader(NotCounterReset)
+			}
+		}
+		return nil, false, a, nil
+	}
+
+	// Adding counter-like histogram.
+	if h.CounterResetHint != histogram.GaugeType {
+		pForwardInserts, nForwardInserts, pBackwardInserts, nBackwardInserts, okToAppend, counterReset := a.appendable(h)
+		if !okToAppend || counterReset {
+			if appendOnly {
+				if counterReset {
+					return nil, false, a, errors.New("float histogram counter reset")
+				}
+				return nil, false, a, errors.New("float histogram schema change")
+			}
+			newChunk := NewFloatHistogramChunk()
+			app, err := newChunk.Appender()
+			if err != nil {
+				panic(err) // This should never happen for an empty float histogram chunk.
+			}
+			happ := app.(*FloatHistogramAppender)
+			if counterReset {
+				happ.setCounterResetHeader(CounterReset)
+			}
+			happ.appendFloatHistogram(t, h)
+			return newChunk, false, app, nil
+		}
+		if len(pBackwardInserts) > 0 || len(nBackwardInserts) > 0 {
+			// The histogram needs to be expanded to have the extra empty buckets
+			// of the chunk.
+			if len(pForwardInserts) == 0 && len(nForwardInserts) == 0 {
+				// No new chunks from the histogram, so the spans of the appender can accommodate the new buckets.
+				// However we need to make a copy in case the input is sharing spans from an iterator.
+				h.PositiveSpans = make([]histogram.Span, len(a.pSpans))
+				copy(h.PositiveSpans, a.pSpans)
+				h.NegativeSpans = make([]histogram.Span, len(a.nSpans))
+				copy(h.NegativeSpans, a.nSpans)
+			} else {
+				// Spans need pre-adjusting to accommodate the new buckets.
+				h.PositiveSpans = adjustForInserts(h.PositiveSpans, pBackwardInserts)
+				h.NegativeSpans = adjustForInserts(h.NegativeSpans, nBackwardInserts)
+			}
+			a.recodeHistogram(h, pBackwardInserts, nBackwardInserts)
+		}
+		if len(pForwardInserts) > 0 || len(nForwardInserts) > 0 {
+			if appendOnly {
+				return nil, false, a, fmt.Errorf("float histogram layout change with %d positive and %d negative forwards inserts", len(pForwardInserts), len(nForwardInserts))
+			}
+			chk, app := a.recode(
+				pForwardInserts, nForwardInserts,
+				h.PositiveSpans, h.NegativeSpans,
+			)
+			app.(*FloatHistogramAppender).appendFloatHistogram(t, h)
+			return chk, true, app, nil
+		}
+		a.appendFloatHistogram(t, h)
+		return nil, false, a, nil
+	}
+	// Adding gauge histogram.
+	pForwardInserts, nForwardInserts, pBackwardInserts, nBackwardInserts, pMergedSpans, nMergedSpans, okToAppend := a.appendableGauge(h)
+	if !okToAppend {
+		if appendOnly {
+			return nil, false, a, errors.New("float gauge histogram schema change")
+		}
+		newChunk := NewFloatHistogramChunk()
+		app, err := newChunk.Appender()
+		if err != nil {
+			panic(err) // This should never happen for an empty float histogram chunk.
+		}
+		happ := app.(*FloatHistogramAppender)
+		happ.setCounterResetHeader(GaugeType)
+		happ.appendFloatHistogram(t, h)
+		return newChunk, false, app, nil
+	}
+
+	if len(pBackwardInserts)+len(nBackwardInserts) > 0 {
+		if appendOnly {
+			return nil, false, a, fmt.Errorf("float gauge histogram layout change with %d positive and %d negative backwards inserts", len(pBackwardInserts), len(nBackwardInserts))
+		}
+		h.PositiveSpans = pMergedSpans
+		h.NegativeSpans = nMergedSpans
+		a.recodeHistogram(h, pBackwardInserts, nBackwardInserts)
+	}
+
+	if len(pForwardInserts) > 0 || len(nForwardInserts) > 0 {
+		if appendOnly {
+			return nil, false, a, fmt.Errorf("float gauge histogram layout change with %d positive and %d negative forwards inserts", len(pForwardInserts), len(nForwardInserts))
+		}
+		chk, app := a.recode(
+			pForwardInserts, nForwardInserts,
+			h.PositiveSpans, h.NegativeSpans,
+		)
+		app.(*FloatHistogramAppender).appendFloatHistogram(t, h)
+		return chk, true, app, nil
+	}
+
+	a.appendFloatHistogram(t, h)
+	return nil, false, a, nil
+}
+
+type floatHistogramIterator struct {
+	br       bstreamReader
+	numTotal uint16
+	numRead  uint16
+
+	counterResetHeader CounterResetHeader
+
+	// Layout:
+	schema         int32
+	zThreshold     float64
+	pSpans, nSpans []histogram.Span
+	customValues   []float64
+
+	// For the fields that are tracked as deltas and ultimately dod's.
+	t      int64
+	tDelta int64
+
+	// All Gorilla xor encoded.
+	sum, cnt, zCnt xorValue
+
+	// Buckets are not of type xorValue to avoid creating
+	// new slices for every AtFloatHistogram call.
+	pBuckets, nBuckets                 []float64
+	pBucketsLeading, nBucketsLeading   []uint8
+	pBucketsTrailing, nBucketsTrailing []uint8
+
+	err error
+
+	// Track calls to retrieve methods. Once they have been called, we
+	// cannot recycle the bucket slices anymore because we have returned
+	// them in the histogram.
+	atFloatHistogramCalled bool
+}
+
+func (it *floatHistogramIterator) Seek(t int64) ValueType {
+	if it.err != nil {
+		return ValNone
+	}
+
+	for t > it.t || it.numRead == 0 {
+		if it.Next() == ValNone {
+			return ValNone
+		}
+	}
+	return ValFloatHistogram
+}
+
+func (it *floatHistogramIterator) At() (int64, float64) {
+	panic("cannot call floatHistogramIterator.At")
+}
+
+func (it *floatHistogramIterator) AtHistogram(*histogram.Histogram) (int64, *histogram.Histogram) {
+	panic("cannot call floatHistogramIterator.AtHistogram")
+}
+
+func (it *floatHistogramIterator) AtFloatHistogram(fh *histogram.FloatHistogram) (int64, *histogram.FloatHistogram) {
+	if value.IsStaleNaN(it.sum.value) {
+		return it.t, &histogram.FloatHistogram{Sum: it.sum.value}
+	}
+	if fh == nil {
+		it.atFloatHistogramCalled = true
+		return it.t, &histogram.FloatHistogram{
+			CounterResetHint: counterResetHint(it.counterResetHeader, it.numRead),
+			Count:            it.cnt.value,
+			ZeroCount:        it.zCnt.value,
+			Sum:              it.sum.value,
+			ZeroThreshold:    it.zThreshold,
+			Schema:           it.schema,
+			PositiveSpans:    it.pSpans,
+			NegativeSpans:    it.nSpans,
+			PositiveBuckets:  it.pBuckets,
+			NegativeBuckets:  it.nBuckets,
+			CustomValues:     it.customValues,
+		}
+	}
+
+	fh.CounterResetHint = counterResetHint(it.counterResetHeader, it.numRead)
+	fh.Schema = it.schema
+	fh.ZeroThreshold = it.zThreshold
+	fh.ZeroCount = it.zCnt.value
+	fh.Count = it.cnt.value
+	fh.Sum = it.sum.value
+
+	fh.PositiveSpans = resize(fh.PositiveSpans, len(it.pSpans))
+	copy(fh.PositiveSpans, it.pSpans)
+
+	fh.NegativeSpans = resize(fh.NegativeSpans, len(it.nSpans))
+	copy(fh.NegativeSpans, it.nSpans)
+
+	fh.PositiveBuckets = resize(fh.PositiveBuckets, len(it.pBuckets))
+	copy(fh.PositiveBuckets, it.pBuckets)
+
+	fh.NegativeBuckets = resize(fh.NegativeBuckets, len(it.nBuckets))
+	copy(fh.NegativeBuckets, it.nBuckets)
+
+	fh.CustomValues = resize(fh.CustomValues, len(it.customValues))
+	copy(fh.CustomValues, it.customValues)
+
+	return it.t, fh
+}
+
+func (it *floatHistogramIterator) AtT() int64 {
+	return it.t
+}
+
+func (it *floatHistogramIterator) Err() error {
+	return it.err
+}
+
+func (it *floatHistogramIterator) Reset(b []byte) {
+	// The first 3 bytes contain chunk headers.
+	// We skip that for actual samples.
+	it.br = newBReader(b[3:])
+	it.numTotal = binary.BigEndian.Uint16(b)
+	it.numRead = 0
+
+	it.counterResetHeader = CounterResetHeader(b[2] & CounterResetHeaderMask)
+
+	it.t, it.tDelta = 0, 0
+	it.cnt, it.zCnt, it.sum = xorValue{}, xorValue{}, xorValue{}
+
+	if it.atFloatHistogramCalled {
+		it.atFloatHistogramCalled = false
+		it.pBuckets, it.nBuckets = nil, nil
+		it.pSpans, it.nSpans = nil, nil
+		it.customValues = nil
+	} else {
+		it.pBuckets, it.nBuckets = it.pBuckets[:0], it.nBuckets[:0]
+	}
+	it.pBucketsLeading, it.pBucketsTrailing = it.pBucketsLeading[:0], it.pBucketsTrailing[:0]
+	it.nBucketsLeading, it.nBucketsTrailing = it.nBucketsLeading[:0], it.nBucketsTrailing[:0]
+
+	it.err = nil
+}
+
+func (it *floatHistogramIterator) Next() ValueType {
+	if it.err != nil || it.numRead == it.numTotal {
+		return ValNone
+	}
+
+	if it.numRead == 0 {
+		// The first read is responsible for reading the chunk layout
+		// and for initializing fields that depend on it. We give
+		// counter reset info at chunk level, hence we discard it here.
+		schema, zeroThreshold, posSpans, negSpans, customValues, err := readHistogramChunkLayout(&it.br)
+		if err != nil {
+			it.err = err
+			return ValNone
+		}
+		it.schema = schema
+		it.zThreshold = zeroThreshold
+		it.pSpans, it.nSpans = posSpans, negSpans
+		it.customValues = customValues
+		numPBuckets, numNBuckets := countSpans(posSpans), countSpans(negSpans)
+		// Allocate bucket slices as needed, recycling existing slices
+		// in case this iterator was reset and already has slices of a
+		// sufficient capacity.
+		if numPBuckets > 0 {
+			it.pBuckets = append(it.pBuckets, make([]float64, numPBuckets)...)
+			it.pBucketsLeading = append(it.pBucketsLeading, make([]uint8, numPBuckets)...)
+			it.pBucketsTrailing = append(it.pBucketsTrailing, make([]uint8, numPBuckets)...)
+		}
+		if numNBuckets > 0 {
+			it.nBuckets = append(it.nBuckets, make([]float64, numNBuckets)...)
+			it.nBucketsLeading = append(it.nBucketsLeading, make([]uint8, numNBuckets)...)
+			it.nBucketsTrailing = append(it.nBucketsTrailing, make([]uint8, numNBuckets)...)
+		}
+
+		// Now read the actual data.
+		t, err := readVarbitInt(&it.br)
+		if err != nil {
+			it.err = err
+			return ValNone
+		}
+		it.t = t
+
+		cnt, err := it.br.readBits(64)
+		if err != nil {
+			it.err = err
+			return ValNone
+		}
+		it.cnt.value = math.Float64frombits(cnt)
+
+		zcnt, err := it.br.readBits(64)
+		if err != nil {
+			it.err = err
+			return ValNone
+		}
+		it.zCnt.value = math.Float64frombits(zcnt)
+
+		sum, err := it.br.readBits(64)
+		if err != nil {
+			it.err = err
+			return ValNone
+		}
+		it.sum.value = math.Float64frombits(sum)
+
+		for i := range it.pBuckets {
+			v, err := it.br.readBits(64)
+			if err != nil {
+				it.err = err
+				return ValNone
+			}
+			it.pBuckets[i] = math.Float64frombits(v)
+		}
+		for i := range it.nBuckets {
+			v, err := it.br.readBits(64)
+			if err != nil {
+				it.err = err
+				return ValNone
+			}
+			it.nBuckets[i] = math.Float64frombits(v)
+		}
+
+		it.numRead++
+		return ValFloatHistogram
+	}
+
+	// The case for the 2nd sample with single deltas is implicitly handled correctly with the double delta code,
+	// so we don't need a separate single delta logic for the 2nd sample.
+
+	// Recycle bucket, span and custom value slices that have not been returned yet. Otherwise, copy them.
+	// We can always recycle the slices for leading and trailing bits as they are
+	// never returned to the caller.
+	if it.atFloatHistogramCalled {
+		it.atFloatHistogramCalled = false
+		if len(it.pBuckets) > 0 {
+			newBuckets := make([]float64, len(it.pBuckets))
+			copy(newBuckets, it.pBuckets)
+			it.pBuckets = newBuckets
+		} else {
+			it.pBuckets = nil
+		}
+		if len(it.nBuckets) > 0 {
+			newBuckets := make([]float64, len(it.nBuckets))
+			copy(newBuckets, it.nBuckets)
+			it.nBuckets = newBuckets
+		} else {
+			it.nBuckets = nil
+		}
+		if len(it.pSpans) > 0 {
+			newSpans := make([]histogram.Span, len(it.pSpans))
+			copy(newSpans, it.pSpans)
+			it.pSpans = newSpans
+		} else {
+			it.pSpans = nil
+		}
+		if len(it.nSpans) > 0 {
+			newSpans := make([]histogram.Span, len(it.nSpans))
+			copy(newSpans, it.nSpans)
+			it.nSpans = newSpans
+		} else {
+			it.nSpans = nil
+		}
+		if len(it.customValues) > 0 {
+			newCustomValues := make([]float64, len(it.customValues))
+			copy(newCustomValues, it.customValues)
+			it.customValues = newCustomValues
+		} else {
+			it.customValues = nil
+		}
+	}
+
+	tDod, err := readVarbitInt(&it.br)
+	if err != nil {
+		it.err = err
+		return ValNone
+	}
+	it.tDelta += tDod
+	it.t += it.tDelta
+
+	if ok := it.readXor(&it.cnt.value, &it.cnt.leading, &it.cnt.trailing); !ok {
+		return ValNone
+	}
+
+	if ok := it.readXor(&it.zCnt.value, &it.zCnt.leading, &it.zCnt.trailing); !ok {
+		return ValNone
+	}
+
+	if ok := it.readXor(&it.sum.value, &it.sum.leading, &it.sum.trailing); !ok {
+		return ValNone
+	}
+
+	if value.IsStaleNaN(it.sum.value) {
+		it.numRead++
+		return ValFloatHistogram
+	}
+
+	for i := range it.pBuckets {
+		if ok := it.readXor(&it.pBuckets[i], &it.pBucketsLeading[i], &it.pBucketsTrailing[i]); !ok {
+			return ValNone
+		}
+	}
+
+	for i := range it.nBuckets {
+		if ok := it.readXor(&it.nBuckets[i], &it.nBucketsLeading[i], &it.nBucketsTrailing[i]); !ok {
+			return ValNone
+		}
+	}
+
+	it.numRead++
+	return ValFloatHistogram
+}
+
+func (it *floatHistogramIterator) readXor(v *float64, leading, trailing *uint8) bool {
+	err := xorRead(&it.br, v, leading, trailing)
+	if err != nil {
+		it.err = err
+		return false
+	}
+	return true
+}
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/histogram.go b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/histogram.go
new file mode 100644
index 0000000000000000000000000000000000000000..d2eec6b75ae74f4622415a6f152729e5005369ac
--- /dev/null
+++ b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/histogram.go
@@ -0,0 +1,1328 @@
+// Copyright 2021 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package chunkenc
+
+import (
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"math"
+
+	"github.com/prometheus/prometheus/model/histogram"
+	"github.com/prometheus/prometheus/model/value"
+)
+
+// HistogramChunk holds encoded sample data for a sparse, high-resolution
+// histogram.
+//
+// Each sample has multiple "fields", stored in the following way (raw = store
+// number directly, delta = store delta to the previous number, dod = store
+// delta of the delta to the previous number, xor = what we do for regular
+// sample values):
+//
+//	field →    ts    count zeroCount sum []posbuckets []negbuckets
+//	sample 1   raw   raw   raw       raw []raw        []raw
+//	sample 2   delta delta delta     xor []delta      []delta
+//	sample >2  dod   dod   dod       xor []dod        []dod
+type HistogramChunk struct {
+	b bstream
+}
+
+// NewHistogramChunk returns a new chunk with histogram encoding of the given
+// size.
+func NewHistogramChunk() *HistogramChunk {
+	b := make([]byte, 3, 128)
+	return &HistogramChunk{b: bstream{stream: b, count: 0}}
+}
+
+func (c *HistogramChunk) Reset(stream []byte) {
+	c.b.Reset(stream)
+}
+
+// Encoding returns the encoding type.
+func (c *HistogramChunk) Encoding() Encoding {
+	return EncHistogram
+}
+
+// Bytes returns the underlying byte slice of the chunk.
+func (c *HistogramChunk) Bytes() []byte {
+	return c.b.bytes()
+}
+
+// NumSamples returns the number of samples in the chunk.
+func (c *HistogramChunk) NumSamples() int {
+	return int(binary.BigEndian.Uint16(c.Bytes()))
+}
+
+// Layout returns the histogram layout. Only call this on chunks that have at
+// least one sample.
+func (c *HistogramChunk) Layout() (
+	schema int32, zeroThreshold float64,
+	negativeSpans, positiveSpans []histogram.Span,
+	customValues []float64,
+	err error,
+) {
+	if c.NumSamples() == 0 {
+		panic("HistogramChunk.Layout() called on an empty chunk")
+	}
+	b := newBReader(c.Bytes()[2:])
+	return readHistogramChunkLayout(&b)
+}
+
+// CounterResetHeader defines the first 2 bits of the chunk header.
+type CounterResetHeader byte
+
+const (
+	// CounterReset means there was definitely a counter reset that resulted in this chunk.
+	CounterReset CounterResetHeader = 0b10000000
+	// NotCounterReset means there was definitely no counter reset when cutting this chunk.
+	NotCounterReset CounterResetHeader = 0b01000000
+	// GaugeType means this chunk contains a gauge histogram, where counter resets do not happen.
+	GaugeType CounterResetHeader = 0b11000000
+	// UnknownCounterReset means we cannot say if this chunk was created due to a counter reset or not.
+	// An explicit counter reset detection needs to happen during query time.
+	UnknownCounterReset CounterResetHeader = 0b00000000
+)
+
+// CounterResetHeaderMask is the mask to get the counter reset header bits.
+const CounterResetHeaderMask byte = 0b11000000
+
+// GetCounterResetHeader returns the info about the first 2 bits of the chunk
+// header.
+func (c *HistogramChunk) GetCounterResetHeader() CounterResetHeader {
+	return CounterResetHeader(c.Bytes()[2] & CounterResetHeaderMask)
+}
+
+// Compact implements the Chunk interface.
+func (c *HistogramChunk) Compact() {
+	if l := len(c.b.stream); cap(c.b.stream) > l+chunkCompactCapacityThreshold {
+		buf := make([]byte, l)
+		copy(buf, c.b.stream)
+		c.b.stream = buf
+	}
+}
+
+// Appender implements the Chunk interface.
+func (c *HistogramChunk) Appender() (Appender, error) {
+	it := c.iterator(nil)
+
+	// To get an appender, we must know the state it would have if we had
+	// appended all existing data from scratch. We iterate through the end
+	// and populate via the iterator's state.
+	for it.Next() == ValHistogram {
+	}
+	if err := it.Err(); err != nil {
+		return nil, err
+	}
+
+	a := &HistogramAppender{
+		b: &c.b,
+
+		schema:        it.schema,
+		zThreshold:    it.zThreshold,
+		pSpans:        it.pSpans,
+		nSpans:        it.nSpans,
+		customValues:  it.customValues,
+		t:             it.t,
+		cnt:           it.cnt,
+		zCnt:          it.zCnt,
+		tDelta:        it.tDelta,
+		cntDelta:      it.cntDelta,
+		zCntDelta:     it.zCntDelta,
+		pBuckets:      it.pBuckets,
+		nBuckets:      it.nBuckets,
+		pBucketsDelta: it.pBucketsDelta,
+		nBucketsDelta: it.nBucketsDelta,
+
+		sum:      it.sum,
+		leading:  it.leading,
+		trailing: it.trailing,
+	}
+	if it.numTotal == 0 {
+		a.leading = 0xff
+	}
+	return a, nil
+}
+
+func countSpans(spans []histogram.Span) int {
+	var cnt int
+	for _, s := range spans {
+		cnt += int(s.Length)
+	}
+	return cnt
+}
+
+func newHistogramIterator(b []byte) *histogramIterator {
+	it := &histogramIterator{
+		br:       newBReader(b),
+		numTotal: binary.BigEndian.Uint16(b),
+		t:        math.MinInt64,
+	}
+	// The first 3 bytes contain chunk headers.
+	// We skip that for actual samples.
+	_, _ = it.br.readBits(24)
+	it.counterResetHeader = CounterResetHeader(b[2] & CounterResetHeaderMask)
+	return it
+}
+
+func (c *HistogramChunk) iterator(it Iterator) *histogramIterator {
+	// This comment is copied from XORChunk.iterator:
+	//   Should iterators guarantee to act on a copy of the data so it doesn't lock append?
+	//   When using striped locks to guard access to chunks, probably yes.
+	//   Could only copy data if the chunk is not completed yet.
+	if histogramIter, ok := it.(*histogramIterator); ok {
+		histogramIter.Reset(c.b.bytes())
+		return histogramIter
+	}
+	return newHistogramIterator(c.b.bytes())
+}
+
+// Iterator implements the Chunk interface.
+func (c *HistogramChunk) Iterator(it Iterator) Iterator {
+	return c.iterator(it)
+}
+
+// HistogramAppender is an Appender implementation for sparse histograms.
+type HistogramAppender struct {
+	b *bstream
+
+	// Layout:
+	schema         int32
+	zThreshold     float64
+	pSpans, nSpans []histogram.Span
+	customValues   []float64
+
+	// Although we intend to start new chunks on counter resets, we still
+	// have to handle negative deltas for gauge histograms. Therefore, even
+	// deltas are signed types here (even for tDelta to not treat that one
+	// specially).
+	t                            int64
+	cnt, zCnt                    uint64
+	tDelta, cntDelta, zCntDelta  int64
+	pBuckets, nBuckets           []int64
+	pBucketsDelta, nBucketsDelta []int64
+
+	// The sum is Gorilla xor encoded.
+	sum      float64
+	leading  uint8
+	trailing uint8
+}
+
+func (a *HistogramAppender) GetCounterResetHeader() CounterResetHeader {
+	return CounterResetHeader(a.b.bytes()[2] & CounterResetHeaderMask)
+}
+
+func (a *HistogramAppender) setCounterResetHeader(cr CounterResetHeader) {
+	a.b.bytes()[2] = (a.b.bytes()[2] & (^CounterResetHeaderMask)) | (byte(cr) & CounterResetHeaderMask)
+}
+
+func (a *HistogramAppender) NumSamples() int {
+	return int(binary.BigEndian.Uint16(a.b.bytes()))
+}
+
+// Append implements Appender. This implementation panics because normal float
+// samples must never be appended to a histogram chunk.
+func (a *HistogramAppender) Append(int64, float64) {
+	panic("appended a float sample to a histogram chunk")
+}
+
+// appendable returns whether the chunk can be appended to, and if so whether
+//  1. Any recoding needs to happen to the chunk using the provided forward
+//     inserts (in case of any new buckets, positive or negative range,
+//     respectively).
+//  2. Any recoding needs to happen for the histogram being appended, using the
+//     backward inserts (in case of any missing buckets, positive or negative
+//     range, respectively).
+//
+// If the sample is a gauge histogram, AppendableGauge must be used instead.
+//
+// The chunk is not appendable in the following cases:
+//
+//   - The schema has changed.
+//   - The custom bounds have changed if the current schema is custom buckets.
+//   - The threshold for the zero bucket has changed.
+//   - Any buckets have disappeared, unless the bucket count was 0, unused.
+//     Empty bucket can happen if the chunk was recoded and we're merging a non
+//     recoded histogram. In this case backward inserts will be provided.
+//   - There was a counter reset in the count of observations or in any bucket,
+//     including the zero bucket.
+//   - The last sample in the chunk was stale while the current sample is not stale.
+//
+// The method returns an additional boolean set to true if it is not appendable
+// because of a counter reset. If the given sample is stale, it is always ok to
+// append. If counterReset is true, okToAppend is always false.
+func (a *HistogramAppender) appendable(h *histogram.Histogram) (
+	positiveInserts, negativeInserts []Insert,
+	backwardPositiveInserts, backwardNegativeInserts []Insert,
+	okToAppend, counterReset bool,
+) {
+	if a.NumSamples() > 0 && a.GetCounterResetHeader() == GaugeType {
+		return
+	}
+	if h.CounterResetHint == histogram.CounterReset {
+		// Always honor the explicit counter reset hint.
+		counterReset = true
+		return
+	}
+	if value.IsStaleNaN(h.Sum) {
+		// This is a stale sample whose buckets and spans don't matter.
+		okToAppend = true
+		return
+	}
+	if value.IsStaleNaN(a.sum) {
+		// If the last sample was stale, then we can only accept stale
+		// samples in this chunk.
+		return
+	}
+
+	if h.Count < a.cnt {
+		// There has been a counter reset.
+		counterReset = true
+		return
+	}
+
+	if h.Schema != a.schema || h.ZeroThreshold != a.zThreshold {
+		return
+	}
+
+	if histogram.IsCustomBucketsSchema(h.Schema) && !histogram.FloatBucketsMatch(h.CustomValues, a.customValues) {
+		counterReset = true
+		return
+	}
+
+	if h.ZeroCount < a.zCnt {
+		// There has been a counter reset since ZeroThreshold didn't change.
+		counterReset = true
+		return
+	}
+
+	var ok bool
+	positiveInserts, backwardPositiveInserts, ok = expandIntSpansAndBuckets(a.pSpans, h.PositiveSpans, a.pBuckets, h.PositiveBuckets)
+	if !ok {
+		counterReset = true
+		return
+	}
+	negativeInserts, backwardNegativeInserts, ok = expandIntSpansAndBuckets(a.nSpans, h.NegativeSpans, a.nBuckets, h.NegativeBuckets)
+	if !ok {
+		counterReset = true
+		return
+	}
+
+	okToAppend = true
+	return
+}
+
+// expandIntSpansAndBuckets returns the inserts to expand the bucket spans 'a' so that
+// they match the spans in 'b'. 'b' must cover the same or more buckets than
+// 'a', otherwise the function will return false.
+// The function also returns the inserts to expand 'b' to also cover all the
+// buckets that are missing in 'b', but are present with 0 counter value in 'a'.
+// The function also checks for counter resets between 'a' and 'b'.
+//
+// Example:
+//
+// Let's say the old buckets look like this:
+//
+//	span syntax: [offset, length]
+//	spans      : [ 0 , 2 ]               [2,1]                   [ 3 , 2 ]                     [3,1]       [1,1]
+//	bucket idx : [0]   [1]    2     3    [4]    5     6     7    [8]   [9]    10    11    12   [13]   14   [15]
+//	raw values    6     3                 3                       2     4                       5           1
+//	deltas        6    -3                 0                      -1     2                       1          -4
+//
+// But now we introduce a new bucket layout. (Carefully chosen example where we
+// have a span appended, one unchanged[*], one prepended, and two merge - in
+// that order.)
+//
+// [*] unchanged in terms of which bucket indices they represent. but to achieve
+// that, their offset needs to change if "disrupted" by spans changing ahead of
+// them
+//
+//	                                      \/ this one is "unchanged"
+//	spans      : [  0  ,  3    ]         [1,1]       [    1    ,   4     ]                     [  3  ,   3    ]
+//	bucket idx : [0]   [1]   [2]    3    [4]    5    [6]   [7]   [8]   [9]    10    11    12   [13]  [14]  [15]
+//	raw values    6     3     0           3           0     0     2     4                       5     0     1
+//	deltas        6    -3    -3           3          -3     0     2     2                       1    -5     1
+//	delta mods:                          / \                     / \                                       / \
+//
+// Note for histograms with delta-encoded buckets: Whenever any new buckets are
+// introduced, the subsequent "old" bucket needs to readjust its delta to the
+// new base of 0. Thus, for the caller who wants to transform the set of
+// original deltas to a new set of deltas to match a new span layout that adds
+// buckets, we simply need to generate a list of inserts.
+//
+// Note: Within expandSpansForward we don't have to worry about the changes to the
+// spans themselves, thanks to the iterators we get to work with the more useful
+// bucket indices (which of course directly correspond to the buckets we have to
+// adjust).
+func expandIntSpansAndBuckets(a, b []histogram.Span, aBuckets, bBuckets []int64) (forward, backward []Insert, ok bool) {
+	ai := newBucketIterator(a)
+	bi := newBucketIterator(b)
+
+	var aInserts []Insert // To insert into buckets of a, to make up for missing buckets in b.
+	var bInserts []Insert // To insert into buckets of b, to make up for missing empty(!) buckets in a.
+
+	// When aInter.num or bInter.num becomes > 0, this becomes a valid insert that should
+	// be yielded when we finish a streak of new buckets.
+	var aInter Insert
+	var bInter Insert
+
+	aIdx, aOK := ai.Next()
+	bIdx, bOK := bi.Next()
+
+	// Bucket count. Initialize the absolute count and index into the
+	// positive/negative counts or deltas array. The bucket count is
+	// used to detect counter reset as well as unused buckets in a.
+	var (
+		aCount    int64
+		bCount    int64
+		aCountIdx int
+		bCountIdx int
+	)
+	if aOK {
+		aCount = aBuckets[aCountIdx]
+	}
+	if bOK {
+		bCount = bBuckets[bCountIdx]
+	}
+
+loop:
+	for {
+		switch {
+		case aOK && bOK:
+			switch {
+			case aIdx == bIdx: // Both have an identical bucket index.
+				// Bucket count. Check bucket for reset from a to b.
+				if aCount > bCount {
+					return nil, nil, false
+				}
+
+				// Finish WIP insert for a and reset.
+				if aInter.num > 0 {
+					aInserts = append(aInserts, aInter)
+					aInter.num = 0
+				}
+
+				// Finish WIP insert for b and reset.
+				if bInter.num > 0 {
+					bInserts = append(bInserts, bInter)
+					bInter.num = 0
+				}
+
+				aIdx, aOK = ai.Next()
+				bIdx, bOK = bi.Next()
+				aInter.pos++ // Advance potential insert position.
+				aCountIdx++  // Advance absolute bucket count index for a.
+				if aOK {
+					aCount += aBuckets[aCountIdx]
+				}
+				bInter.pos++ // Advance potential insert position.
+				bCountIdx++  // Advance absolute bucket count index for b.
+				if bOK {
+					bCount += bBuckets[bCountIdx]
+				}
+
+				continue
+			case aIdx < bIdx: // b misses a bucket index that is in a.
+				// This is ok if the count in a is 0, in which case we make a note to
+				// fill in the bucket in b and advance a.
+				if aCount == 0 {
+					bInter.num++ // Mark that we need to insert a bucket in b.
+					bInter.bucketIdx = aIdx
+					// Advance a
+					if aInter.num > 0 {
+						aInserts = append(aInserts, aInter)
+						aInter.num = 0
+					}
+					aIdx, aOK = ai.Next()
+					aInter.pos++
+					aCountIdx++
+					if aOK {
+						aCount += aBuckets[aCountIdx]
+					}
+					continue
+				}
+				// Otherwise we are missing a bucket that was in use in a, which is a reset.
+				return nil, nil, false
+			case aIdx > bIdx: // a misses a value that is in b. Forward b and recompare.
+				aInter.num++
+				aInter.bucketIdx = bIdx
+				// Advance b
+				if bInter.num > 0 {
+					bInserts = append(bInserts, bInter)
+					bInter.num = 0
+				}
+				bIdx, bOK = bi.Next()
+				bInter.pos++
+				bCountIdx++
+				if bOK {
+					bCount += bBuckets[bCountIdx]
+				}
+			}
+		case aOK && !bOK: // b misses a value that is in a.
+			// This is ok if the count in a is 0, in which case we make a note to
+			// fill in the bucket in b and advance a.
+			if aCount == 0 {
+				bInter.num++
+				bInter.bucketIdx = aIdx
+				// Advance a
+				if aInter.num > 0 {
+					aInserts = append(aInserts, aInter)
+					aInter.num = 0
+				}
+				aIdx, aOK = ai.Next()
+				aInter.pos++ // Advance potential insert position.
+				// Update absolute bucket counts for a.
+				aCountIdx++
+				if aOK {
+					aCount += aBuckets[aCountIdx]
+				}
+				continue
+			}
+			// Otherwise we are missing a bucket that was in use in a, which is a reset.
+			return nil, nil, false
+		case !aOK && bOK: // a misses a value that is in b. Forward b and recompare.
+			aInter.num++
+			aInter.bucketIdx = bIdx
+			// Advance b
+			if bInter.num > 0 {
+				bInserts = append(bInserts, bInter)
+				bInter.num = 0
+			}
+			bIdx, bOK = bi.Next()
+			bInter.pos++ // Advance potential insert position.
+			// Update absolute bucket counts for b.
+			bCountIdx++
+			if bOK {
+				bCount += bBuckets[bCountIdx]
+			}
+		default: // Both iterators ran out. We're done.
+			if aInter.num > 0 {
+				aInserts = append(aInserts, aInter)
+			}
+			if bInter.num > 0 {
+				bInserts = append(bInserts, bInter)
+			}
+			break loop
+		}
+	}
+
+	return aInserts, bInserts, true
+}
+
+// appendableGauge returns whether the chunk can be appended to, and if so
+// whether:
+//  1. Any recoding needs to happen to the chunk using the provided forward
+//     inserts (in case of any new buckets, positive or negative range,
+//     respectively).
+//  2. Any recoding needs to happen for the histogram being appended, using the
+//     backward inserts (in case of any missing buckets, positive or negative
+//     range, respectively).
+//
+// This method must be only used for gauge histograms.
+//
+// The chunk is not appendable in the following cases:
+//   - The schema has changed.
+//   - The custom bounds have changed if the current schema is custom buckets.
+//   - The threshold for the zero bucket has changed.
+//   - The last sample in the chunk was stale while the current sample is not stale.
+func (a *HistogramAppender) appendableGauge(h *histogram.Histogram) (
+	positiveInserts, negativeInserts []Insert,
+	backwardPositiveInserts, backwardNegativeInserts []Insert,
+	positiveSpans, negativeSpans []histogram.Span,
+	okToAppend bool,
+) {
+	if a.NumSamples() > 0 && a.GetCounterResetHeader() != GaugeType {
+		return
+	}
+	if value.IsStaleNaN(h.Sum) {
+		// This is a stale sample whose buckets and spans don't matter.
+		okToAppend = true
+		return
+	}
+	if value.IsStaleNaN(a.sum) {
+		// If the last sample was stale, then we can only accept stale
+		// samples in this chunk.
+		return
+	}
+
+	if h.Schema != a.schema || h.ZeroThreshold != a.zThreshold {
+		return
+	}
+
+	if histogram.IsCustomBucketsSchema(h.Schema) && !histogram.FloatBucketsMatch(h.CustomValues, a.customValues) {
+		return
+	}
+
+	positiveInserts, backwardPositiveInserts, positiveSpans = expandSpansBothWays(a.pSpans, h.PositiveSpans)
+	negativeInserts, backwardNegativeInserts, negativeSpans = expandSpansBothWays(a.nSpans, h.NegativeSpans)
+	okToAppend = true
+	return
+}
+
+// appendHistogram appends a histogram to the chunk. The caller must ensure that
+// the histogram is properly structured, e.g. the number of buckets used
+// corresponds to the number conveyed by the span structures. First call
+// Appendable() and act accordingly!
+func (a *HistogramAppender) appendHistogram(t int64, h *histogram.Histogram) {
+	var tDelta, cntDelta, zCntDelta int64
+	num := binary.BigEndian.Uint16(a.b.bytes())
+
+	if value.IsStaleNaN(h.Sum) {
+		// Emptying out other fields to write no buckets, and an empty
+		// layout in case of first histogram in the chunk.
+		h = &histogram.Histogram{Sum: h.Sum}
+	}
+
+	if num == 0 {
+		// The first append gets the privilege to dictate the layout
+		// but it's also responsible for encoding it into the chunk!
+		writeHistogramChunkLayout(a.b, h.Schema, h.ZeroThreshold, h.PositiveSpans, h.NegativeSpans, h.CustomValues)
+		a.schema = h.Schema
+		a.zThreshold = h.ZeroThreshold
+
+		if len(h.PositiveSpans) > 0 {
+			a.pSpans = make([]histogram.Span, len(h.PositiveSpans))
+			copy(a.pSpans, h.PositiveSpans)
+		} else {
+			a.pSpans = nil
+		}
+		if len(h.NegativeSpans) > 0 {
+			a.nSpans = make([]histogram.Span, len(h.NegativeSpans))
+			copy(a.nSpans, h.NegativeSpans)
+		} else {
+			a.nSpans = nil
+		}
+		if len(h.CustomValues) > 0 {
+			a.customValues = make([]float64, len(h.CustomValues))
+			copy(a.customValues, h.CustomValues)
+		} else {
+			a.customValues = nil
+		}
+
+		numPBuckets, numNBuckets := countSpans(h.PositiveSpans), countSpans(h.NegativeSpans)
+		if numPBuckets > 0 {
+			a.pBuckets = make([]int64, numPBuckets)
+			a.pBucketsDelta = make([]int64, numPBuckets)
+		} else {
+			a.pBuckets = nil
+			a.pBucketsDelta = nil
+		}
+		if numNBuckets > 0 {
+			a.nBuckets = make([]int64, numNBuckets)
+			a.nBucketsDelta = make([]int64, numNBuckets)
+		} else {
+			a.nBuckets = nil
+			a.nBucketsDelta = nil
+		}
+
+		// Now store the actual data.
+		putVarbitInt(a.b, t)
+		putVarbitUint(a.b, h.Count)
+		putVarbitUint(a.b, h.ZeroCount)
+		a.b.writeBits(math.Float64bits(h.Sum), 64)
+		for _, b := range h.PositiveBuckets {
+			putVarbitInt(a.b, b)
+		}
+		for _, b := range h.NegativeBuckets {
+			putVarbitInt(a.b, b)
+		}
+	} else {
+		// The case for the 2nd sample with single deltas is implicitly
+		// handled correctly with the double delta code, so we don't
+		// need a separate single delta logic for the 2nd sample.
+
+		tDelta = t - a.t
+		cntDelta = int64(h.Count) - int64(a.cnt)
+		zCntDelta = int64(h.ZeroCount) - int64(a.zCnt)
+
+		tDod := tDelta - a.tDelta
+		cntDod := cntDelta - a.cntDelta
+		zCntDod := zCntDelta - a.zCntDelta
+
+		if value.IsStaleNaN(h.Sum) {
+			cntDod, zCntDod = 0, 0
+		}
+
+		putVarbitInt(a.b, tDod)
+		putVarbitInt(a.b, cntDod)
+		putVarbitInt(a.b, zCntDod)
+
+		a.writeSumDelta(h.Sum)
+
+		for i, b := range h.PositiveBuckets {
+			delta := b - a.pBuckets[i]
+			dod := delta - a.pBucketsDelta[i]
+			putVarbitInt(a.b, dod)
+			a.pBucketsDelta[i] = delta
+		}
+		for i, b := range h.NegativeBuckets {
+			delta := b - a.nBuckets[i]
+			dod := delta - a.nBucketsDelta[i]
+			putVarbitInt(a.b, dod)
+			a.nBucketsDelta[i] = delta
+		}
+	}
+
+	binary.BigEndian.PutUint16(a.b.bytes(), num+1)
+
+	a.t = t
+	a.cnt = h.Count
+	a.zCnt = h.ZeroCount
+	a.tDelta = tDelta
+	a.cntDelta = cntDelta
+	a.zCntDelta = zCntDelta
+
+	copy(a.pBuckets, h.PositiveBuckets)
+	copy(a.nBuckets, h.NegativeBuckets)
+	// Note that the bucket deltas were already updated above.
+	a.sum = h.Sum
+}
+
+// recode converts the current chunk to accommodate an expansion of the set of
+// (positive and/or negative) buckets used, according to the provided inserts,
+// resulting in the honoring of the provided new positive and negative spans. To
+// continue appending, use the returned Appender rather than the receiver of
+// this method.
+func (a *HistogramAppender) recode(
+	positiveInserts, negativeInserts []Insert,
+	positiveSpans, negativeSpans []histogram.Span,
+) (Chunk, Appender) {
+	// TODO(beorn7): This currently just decodes everything and then encodes
+	// it again with the new span layout. This can probably be done in-place
+	// by editing the chunk. But let's first see how expensive it is in the
+	// big picture. Also, in-place editing might create concurrency issues.
+	byts := a.b.bytes()
+	it := newHistogramIterator(byts)
+	hc := NewHistogramChunk()
+	app, err := hc.Appender()
+	if err != nil {
+		panic(err) // This should never happen for an empty histogram chunk.
+	}
+	happ := app.(*HistogramAppender)
+	numPositiveBuckets, numNegativeBuckets := countSpans(positiveSpans), countSpans(negativeSpans)
+
+	for it.Next() == ValHistogram {
+		tOld, hOld := it.AtHistogram(nil)
+
+		// We have to newly allocate slices for the modified buckets
+		// here because they are kept by the appender until the next
+		// append.
+		// TODO(beorn7): We might be able to optimize this.
+		var positiveBuckets, negativeBuckets []int64
+		if numPositiveBuckets > 0 {
+			positiveBuckets = make([]int64, numPositiveBuckets)
+		}
+		if numNegativeBuckets > 0 {
+			negativeBuckets = make([]int64, numNegativeBuckets)
+		}
+
+		// Save the modified histogram to the new chunk.
+		hOld.PositiveSpans, hOld.NegativeSpans = positiveSpans, negativeSpans
+		if len(positiveInserts) > 0 {
+			hOld.PositiveBuckets = insert(hOld.PositiveBuckets, positiveBuckets, positiveInserts, true)
+		}
+		if len(negativeInserts) > 0 {
+			hOld.NegativeBuckets = insert(hOld.NegativeBuckets, negativeBuckets, negativeInserts, true)
+		}
+		happ.appendHistogram(tOld, hOld)
+	}
+
+	happ.setCounterResetHeader(CounterResetHeader(byts[2] & CounterResetHeaderMask))
+	return hc, app
+}
+
+// recodeHistogram converts the current histogram (in-place) to accommodate an
+// expansion of the set of (positive and/or negative) buckets used.
+func (a *HistogramAppender) recodeHistogram(
+	h *histogram.Histogram,
+	pBackwardInserts, nBackwardInserts []Insert,
+) {
+	if len(pBackwardInserts) > 0 {
+		numPositiveBuckets := countSpans(h.PositiveSpans)
+		h.PositiveBuckets = insert(h.PositiveBuckets, make([]int64, numPositiveBuckets), pBackwardInserts, true)
+	}
+	if len(nBackwardInserts) > 0 {
+		numNegativeBuckets := countSpans(h.NegativeSpans)
+		h.NegativeBuckets = insert(h.NegativeBuckets, make([]int64, numNegativeBuckets), nBackwardInserts, true)
+	}
+}
+
+func (a *HistogramAppender) writeSumDelta(v float64) {
+	xorWrite(a.b, v, a.sum, &a.leading, &a.trailing)
+}
+
+func (a *HistogramAppender) AppendFloatHistogram(*FloatHistogramAppender, int64, *histogram.FloatHistogram, bool) (Chunk, bool, Appender, error) {
+	panic("appended a float histogram sample to a histogram chunk")
+}
+
+func (a *HistogramAppender) AppendHistogram(prev *HistogramAppender, t int64, h *histogram.Histogram, appendOnly bool) (Chunk, bool, Appender, error) {
+	if a.NumSamples() == 0 {
+		a.appendHistogram(t, h)
+		if h.CounterResetHint == histogram.GaugeType {
+			a.setCounterResetHeader(GaugeType)
+			return nil, false, a, nil
+		}
+
+		switch {
+		case h.CounterResetHint == histogram.CounterReset:
+			// Always honor the explicit counter reset hint.
+			a.setCounterResetHeader(CounterReset)
+		case prev != nil:
+			// This is a new chunk, but continued from a previous one. We need to calculate the reset header unless already set.
+			_, _, _, _, _, counterReset := prev.appendable(h)
+			if counterReset {
+				a.setCounterResetHeader(CounterReset)
+			} else {
+				a.setCounterResetHeader(NotCounterReset)
+			}
+		}
+		return nil, false, a, nil
+	}
+
+	// Adding counter-like histogram.
+	if h.CounterResetHint != histogram.GaugeType {
+		pForwardInserts, nForwardInserts, pBackwardInserts, nBackwardInserts, okToAppend, counterReset := a.appendable(h)
+		if !okToAppend || counterReset {
+			if appendOnly {
+				if counterReset {
+					return nil, false, a, errors.New("histogram counter reset")
+				}
+				return nil, false, a, errors.New("histogram schema change")
+			}
+			newChunk := NewHistogramChunk()
+			app, err := newChunk.Appender()
+			if err != nil {
+				panic(err) // This should never happen for an empty histogram chunk.
+			}
+			happ := app.(*HistogramAppender)
+			if counterReset {
+				happ.setCounterResetHeader(CounterReset)
+			}
+			happ.appendHistogram(t, h)
+			return newChunk, false, app, nil
+		}
+		if len(pBackwardInserts) > 0 || len(nBackwardInserts) > 0 {
+			// The histogram needs to be expanded to have the extra empty buckets
+			// of the chunk.
+			if len(pForwardInserts) == 0 && len(nForwardInserts) == 0 {
+				// No new chunks from the histogram, so the spans of the appender can accommodate the new buckets.
+				// However we need to make a copy in case the input is sharing spans from an iterator.
+				h.PositiveSpans = make([]histogram.Span, len(a.pSpans))
+				copy(h.PositiveSpans, a.pSpans)
+				h.NegativeSpans = make([]histogram.Span, len(a.nSpans))
+				copy(h.NegativeSpans, a.nSpans)
+			} else {
+				// Spans need pre-adjusting to accommodate the new buckets.
+				h.PositiveSpans = adjustForInserts(h.PositiveSpans, pBackwardInserts)
+				h.NegativeSpans = adjustForInserts(h.NegativeSpans, nBackwardInserts)
+			}
+			a.recodeHistogram(h, pBackwardInserts, nBackwardInserts)
+		}
+		if len(pForwardInserts) > 0 || len(nForwardInserts) > 0 {
+			if appendOnly {
+				return nil, false, a, fmt.Errorf("histogram layout change with %d positive and %d negative forwards inserts", len(pForwardInserts), len(nForwardInserts))
+			}
+			chk, app := a.recode(
+				pForwardInserts, nForwardInserts,
+				h.PositiveSpans, h.NegativeSpans,
+			)
+			app.(*HistogramAppender).appendHistogram(t, h)
+			return chk, true, app, nil
+		}
+		a.appendHistogram(t, h)
+		return nil, false, a, nil
+	}
+	// Adding gauge histogram.
+	pForwardInserts, nForwardInserts, pBackwardInserts, nBackwardInserts, pMergedSpans, nMergedSpans, okToAppend := a.appendableGauge(h)
+	if !okToAppend {
+		if appendOnly {
+			return nil, false, a, errors.New("gauge histogram schema change")
+		}
+		newChunk := NewHistogramChunk()
+		app, err := newChunk.Appender()
+		if err != nil {
+			panic(err) // This should never happen for an empty histogram chunk.
+		}
+		happ := app.(*HistogramAppender)
+		happ.setCounterResetHeader(GaugeType)
+		happ.appendHistogram(t, h)
+		return newChunk, false, app, nil
+	}
+
+	if len(pBackwardInserts)+len(nBackwardInserts) > 0 {
+		if appendOnly {
+			return nil, false, a, fmt.Errorf("gauge histogram layout change with %d positive and %d negative backwards inserts", len(pBackwardInserts), len(nBackwardInserts))
+		}
+		h.PositiveSpans = pMergedSpans
+		h.NegativeSpans = nMergedSpans
+		a.recodeHistogram(h, pBackwardInserts, nBackwardInserts)
+	}
+
+	if len(pForwardInserts) > 0 || len(nForwardInserts) > 0 {
+		if appendOnly {
+			return nil, false, a, fmt.Errorf("gauge histogram layout change with %d positive and %d negative forwards inserts", len(pForwardInserts), len(nForwardInserts))
+		}
+		chk, app := a.recode(
+			pForwardInserts, nForwardInserts,
+			h.PositiveSpans, h.NegativeSpans,
+		)
+		app.(*HistogramAppender).appendHistogram(t, h)
+		return chk, true, app, nil
+	}
+
+	a.appendHistogram(t, h)
+	return nil, false, a, nil
+}
+
+func CounterResetHintToHeader(hint histogram.CounterResetHint) CounterResetHeader {
+	switch hint {
+	case histogram.CounterReset:
+		return CounterReset
+	case histogram.NotCounterReset:
+		return NotCounterReset
+	case histogram.GaugeType:
+		return GaugeType
+	default:
+		return UnknownCounterReset
+	}
+}
+
+type histogramIterator struct {
+	br       bstreamReader
+	numTotal uint16
+	numRead  uint16
+
+	counterResetHeader CounterResetHeader
+
+	// Layout:
+	schema         int32
+	zThreshold     float64
+	pSpans, nSpans []histogram.Span
+	customValues   []float64
+
+	// For the fields that are tracked as deltas and ultimately dod's.
+	t                            int64
+	cnt, zCnt                    uint64
+	tDelta, cntDelta, zCntDelta  int64
+	pBuckets, nBuckets           []int64   // Delta between buckets.
+	pFloatBuckets, nFloatBuckets []float64 // Absolute counts.
+	pBucketsDelta, nBucketsDelta []int64
+
+	// The sum is Gorilla xor encoded.
+	sum      float64
+	leading  uint8
+	trailing uint8
+
+	// Track calls to retrieve methods. Once they have been called, we
+	// cannot recycle the bucket slices anymore because we have returned
+	// them in the histogram.
+	atHistogramCalled, atFloatHistogramCalled bool
+
+	err error
+}
+
+func (it *histogramIterator) Seek(t int64) ValueType {
+	if it.err != nil {
+		return ValNone
+	}
+
+	for t > it.t || it.numRead == 0 {
+		if it.Next() == ValNone {
+			return ValNone
+		}
+	}
+	return ValHistogram
+}
+
+func (it *histogramIterator) At() (int64, float64) {
+	panic("cannot call histogramIterator.At")
+}
+
+func (it *histogramIterator) AtHistogram(h *histogram.Histogram) (int64, *histogram.Histogram) {
+	if value.IsStaleNaN(it.sum) {
+		return it.t, &histogram.Histogram{Sum: it.sum}
+	}
+	if h == nil {
+		it.atHistogramCalled = true
+		return it.t, &histogram.Histogram{
+			CounterResetHint: counterResetHint(it.counterResetHeader, it.numRead),
+			Count:            it.cnt,
+			ZeroCount:        it.zCnt,
+			Sum:              it.sum,
+			ZeroThreshold:    it.zThreshold,
+			Schema:           it.schema,
+			PositiveSpans:    it.pSpans,
+			NegativeSpans:    it.nSpans,
+			PositiveBuckets:  it.pBuckets,
+			NegativeBuckets:  it.nBuckets,
+			CustomValues:     it.customValues,
+		}
+	}
+
+	h.CounterResetHint = counterResetHint(it.counterResetHeader, it.numRead)
+	h.Schema = it.schema
+	h.ZeroThreshold = it.zThreshold
+	h.ZeroCount = it.zCnt
+	h.Count = it.cnt
+	h.Sum = it.sum
+
+	h.PositiveSpans = resize(h.PositiveSpans, len(it.pSpans))
+	copy(h.PositiveSpans, it.pSpans)
+
+	h.NegativeSpans = resize(h.NegativeSpans, len(it.nSpans))
+	copy(h.NegativeSpans, it.nSpans)
+
+	h.PositiveBuckets = resize(h.PositiveBuckets, len(it.pBuckets))
+	copy(h.PositiveBuckets, it.pBuckets)
+
+	h.NegativeBuckets = resize(h.NegativeBuckets, len(it.nBuckets))
+	copy(h.NegativeBuckets, it.nBuckets)
+
+	h.CustomValues = resize(h.CustomValues, len(it.customValues))
+	copy(h.CustomValues, it.customValues)
+
+	return it.t, h
+}
+
+func (it *histogramIterator) AtFloatHistogram(fh *histogram.FloatHistogram) (int64, *histogram.FloatHistogram) {
+	if value.IsStaleNaN(it.sum) {
+		return it.t, &histogram.FloatHistogram{Sum: it.sum}
+	}
+	if fh == nil {
+		it.atFloatHistogramCalled = true
+		return it.t, &histogram.FloatHistogram{
+			CounterResetHint: counterResetHint(it.counterResetHeader, it.numRead),
+			Count:            float64(it.cnt),
+			ZeroCount:        float64(it.zCnt),
+			Sum:              it.sum,
+			ZeroThreshold:    it.zThreshold,
+			Schema:           it.schema,
+			PositiveSpans:    it.pSpans,
+			NegativeSpans:    it.nSpans,
+			PositiveBuckets:  it.pFloatBuckets,
+			NegativeBuckets:  it.nFloatBuckets,
+			CustomValues:     it.customValues,
+		}
+	}
+
+	fh.CounterResetHint = counterResetHint(it.counterResetHeader, it.numRead)
+	fh.Schema = it.schema
+	fh.ZeroThreshold = it.zThreshold
+	fh.ZeroCount = float64(it.zCnt)
+	fh.Count = float64(it.cnt)
+	fh.Sum = it.sum
+
+	fh.PositiveSpans = resize(fh.PositiveSpans, len(it.pSpans))
+	copy(fh.PositiveSpans, it.pSpans)
+
+	fh.NegativeSpans = resize(fh.NegativeSpans, len(it.nSpans))
+	copy(fh.NegativeSpans, it.nSpans)
+
+	fh.PositiveBuckets = resize(fh.PositiveBuckets, len(it.pBuckets))
+	var currentPositive float64
+	for i, b := range it.pBuckets {
+		currentPositive += float64(b)
+		fh.PositiveBuckets[i] = currentPositive
+	}
+
+	fh.NegativeBuckets = resize(fh.NegativeBuckets, len(it.nBuckets))
+	var currentNegative float64
+	for i, b := range it.nBuckets {
+		currentNegative += float64(b)
+		fh.NegativeBuckets[i] = currentNegative
+	}
+
+	fh.CustomValues = resize(fh.CustomValues, len(it.customValues))
+	copy(fh.CustomValues, it.customValues)
+
+	return it.t, fh
+}
+
+func (it *histogramIterator) AtT() int64 {
+	return it.t
+}
+
+func (it *histogramIterator) Err() error {
+	return it.err
+}
+
+func (it *histogramIterator) Reset(b []byte) {
+	// The first 3 bytes contain chunk headers.
+	// We skip that for actual samples.
+	it.br = newBReader(b[3:])
+	it.numTotal = binary.BigEndian.Uint16(b)
+	it.numRead = 0
+
+	it.counterResetHeader = CounterResetHeader(b[2] & CounterResetHeaderMask)
+
+	it.t, it.cnt, it.zCnt = 0, 0, 0
+	it.tDelta, it.cntDelta, it.zCntDelta = 0, 0, 0
+
+	// Recycle slices that have not been returned yet. Otherwise, start from
+	// scratch.
+	if it.atHistogramCalled {
+		it.atHistogramCalled = false
+		it.pBuckets, it.nBuckets = nil, nil
+		it.pSpans, it.nSpans = nil, nil
+		it.customValues = nil
+	} else {
+		it.pBuckets = it.pBuckets[:0]
+		it.nBuckets = it.nBuckets[:0]
+	}
+	if it.atFloatHistogramCalled {
+		it.atFloatHistogramCalled = false
+		it.pFloatBuckets, it.nFloatBuckets = nil, nil
+		it.customValues = nil
+	} else {
+		it.pFloatBuckets = it.pFloatBuckets[:0]
+		it.nFloatBuckets = it.nFloatBuckets[:0]
+	}
+
+	it.pBucketsDelta = it.pBucketsDelta[:0]
+	it.nBucketsDelta = it.nBucketsDelta[:0]
+
+	it.sum = 0
+	it.leading = 0
+	it.trailing = 0
+	it.err = nil
+}
+
+func (it *histogramIterator) Next() ValueType {
+	if it.err != nil || it.numRead == it.numTotal {
+		return ValNone
+	}
+
+	if it.numRead == 0 {
+		// The first read is responsible for reading the chunk layout
+		// and for initializing fields that depend on it. We give
+		// counter reset info at chunk level, hence we discard it here.
+		schema, zeroThreshold, posSpans, negSpans, customValues, err := readHistogramChunkLayout(&it.br)
+		if err != nil {
+			it.err = err
+			return ValNone
+		}
+		it.schema = schema
+		it.zThreshold = zeroThreshold
+		it.pSpans, it.nSpans = posSpans, negSpans
+		it.customValues = customValues
+		numPBuckets, numNBuckets := countSpans(posSpans), countSpans(negSpans)
+		// The code below recycles existing slices in case this iterator
+		// was reset and already has slices of a sufficient capacity.
+		if numPBuckets > 0 {
+			it.pBuckets = append(it.pBuckets, make([]int64, numPBuckets)...)
+			it.pBucketsDelta = append(it.pBucketsDelta, make([]int64, numPBuckets)...)
+			it.pFloatBuckets = append(it.pFloatBuckets, make([]float64, numPBuckets)...)
+		}
+		if numNBuckets > 0 {
+			it.nBuckets = append(it.nBuckets, make([]int64, numNBuckets)...)
+			it.nBucketsDelta = append(it.nBucketsDelta, make([]int64, numNBuckets)...)
+			it.nFloatBuckets = append(it.nFloatBuckets, make([]float64, numNBuckets)...)
+		}
+
+		// Now read the actual data.
+		t, err := readVarbitInt(&it.br)
+		if err != nil {
+			it.err = err
+			return ValNone
+		}
+		it.t = t
+
+		cnt, err := readVarbitUint(&it.br)
+		if err != nil {
+			it.err = err
+			return ValNone
+		}
+		it.cnt = cnt
+
+		zcnt, err := readVarbitUint(&it.br)
+		if err != nil {
+			it.err = err
+			return ValNone
+		}
+		it.zCnt = zcnt
+
+		sum, err := it.br.readBits(64)
+		if err != nil {
+			it.err = err
+			return ValNone
+		}
+		it.sum = math.Float64frombits(sum)
+
+		var current int64
+		for i := range it.pBuckets {
+			v, err := readVarbitInt(&it.br)
+			if err != nil {
+				it.err = err
+				return ValNone
+			}
+			it.pBuckets[i] = v
+			current += it.pBuckets[i]
+			it.pFloatBuckets[i] = float64(current)
+		}
+		current = 0
+		for i := range it.nBuckets {
+			v, err := readVarbitInt(&it.br)
+			if err != nil {
+				it.err = err
+				return ValNone
+			}
+			it.nBuckets[i] = v
+			current += it.nBuckets[i]
+			it.nFloatBuckets[i] = float64(current)
+		}
+
+		it.numRead++
+		return ValHistogram
+	}
+
+	// The case for the 2nd sample with single deltas is implicitly handled correctly with the double delta code,
+	// so we don't need a separate single delta logic for the 2nd sample.
+
+	// Recycle bucket, span and custom value slices that have not been returned yet. Otherwise, copy them.
+	if it.atFloatHistogramCalled || it.atHistogramCalled {
+		if len(it.pSpans) > 0 {
+			newSpans := make([]histogram.Span, len(it.pSpans))
+			copy(newSpans, it.pSpans)
+			it.pSpans = newSpans
+		} else {
+			it.pSpans = nil
+		}
+		if len(it.nSpans) > 0 {
+			newSpans := make([]histogram.Span, len(it.nSpans))
+			copy(newSpans, it.nSpans)
+			it.nSpans = newSpans
+		} else {
+			it.nSpans = nil
+		}
+		if len(it.customValues) > 0 {
+			newCustomValues := make([]float64, len(it.customValues))
+			copy(newCustomValues, it.customValues)
+			it.customValues = newCustomValues
+		} else {
+			it.customValues = nil
+		}
+	}
+
+	if it.atHistogramCalled {
+		it.atHistogramCalled = false
+		if len(it.pBuckets) > 0 {
+			newBuckets := make([]int64, len(it.pBuckets))
+			copy(newBuckets, it.pBuckets)
+			it.pBuckets = newBuckets
+		} else {
+			it.pBuckets = nil
+		}
+		if len(it.nBuckets) > 0 {
+			newBuckets := make([]int64, len(it.nBuckets))
+			copy(newBuckets, it.nBuckets)
+			it.nBuckets = newBuckets
+		} else {
+			it.nBuckets = nil
+		}
+	}
+
+	// FloatBuckets are set from scratch, so simply create empty ones.
+	if it.atFloatHistogramCalled {
+		it.atFloatHistogramCalled = false
+		if len(it.pFloatBuckets) > 0 {
+			it.pFloatBuckets = make([]float64, len(it.pFloatBuckets))
+		} else {
+			it.pFloatBuckets = nil
+		}
+		if len(it.nFloatBuckets) > 0 {
+			it.nFloatBuckets = make([]float64, len(it.nFloatBuckets))
+		} else {
+			it.nFloatBuckets = nil
+		}
+	}
+
+	tDod, err := readVarbitInt(&it.br)
+	if err != nil {
+		it.err = err
+		return ValNone
+	}
+	it.tDelta += tDod
+	it.t += it.tDelta
+
+	cntDod, err := readVarbitInt(&it.br)
+	if err != nil {
+		it.err = err
+		return ValNone
+	}
+	it.cntDelta += cntDod
+	it.cnt = uint64(int64(it.cnt) + it.cntDelta)
+
+	zcntDod, err := readVarbitInt(&it.br)
+	if err != nil {
+		it.err = err
+		return ValNone
+	}
+	it.zCntDelta += zcntDod
+	it.zCnt = uint64(int64(it.zCnt) + it.zCntDelta)
+
+	ok := it.readSum()
+	if !ok {
+		return ValNone
+	}
+
+	if value.IsStaleNaN(it.sum) {
+		it.numRead++
+		return ValHistogram
+	}
+
+	var current int64
+	for i := range it.pBuckets {
+		dod, err := readVarbitInt(&it.br)
+		if err != nil {
+			it.err = err
+			return ValNone
+		}
+		it.pBucketsDelta[i] += dod
+		it.pBuckets[i] += it.pBucketsDelta[i]
+		current += it.pBuckets[i]
+		it.pFloatBuckets[i] = float64(current)
+	}
+
+	current = 0
+	for i := range it.nBuckets {
+		dod, err := readVarbitInt(&it.br)
+		if err != nil {
+			it.err = err
+			return ValNone
+		}
+		it.nBucketsDelta[i] += dod
+		it.nBuckets[i] += it.nBucketsDelta[i]
+		current += it.nBuckets[i]
+		it.nFloatBuckets[i] = float64(current)
+	}
+
+	it.numRead++
+	return ValHistogram
+}
+
+func (it *histogramIterator) readSum() bool {
+	err := xorRead(&it.br, &it.sum, &it.leading, &it.trailing)
+	if err != nil {
+		it.err = err
+		return false
+	}
+	return true
+}
+
+func resize[T any](items []T, n int) []T {
+	if cap(items) < n {
+		return make([]T, n)
+	}
+	return items[:n]
+}
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/histogram_meta.go b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/histogram_meta.go
new file mode 100644
index 0000000000000000000000000000000000000000..7bb31acf00c27e74bb83bab8640c1468c5d3b40d
--- /dev/null
+++ b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/histogram_meta.go
@@ -0,0 +1,635 @@
+// Copyright 2021 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package chunkenc
+
+import (
+	"math"
+
+	"github.com/prometheus/prometheus/model/histogram"
+)
+
+func writeHistogramChunkLayout(
+	b *bstream, schema int32, zeroThreshold float64,
+	positiveSpans, negativeSpans []histogram.Span, customValues []float64,
+) {
+	putZeroThreshold(b, zeroThreshold)
+	putVarbitInt(b, int64(schema))
+	putHistogramChunkLayoutSpans(b, positiveSpans)
+	putHistogramChunkLayoutSpans(b, negativeSpans)
+	if histogram.IsCustomBucketsSchema(schema) {
+		putHistogramChunkLayoutCustomBounds(b, customValues)
+	}
+}
+
+func readHistogramChunkLayout(b *bstreamReader) (
+	schema int32, zeroThreshold float64,
+	positiveSpans, negativeSpans []histogram.Span,
+	customValues []float64,
+	err error,
+) {
+	zeroThreshold, err = readZeroThreshold(b)
+	if err != nil {
+		return
+	}
+
+	v, err := readVarbitInt(b)
+	if err != nil {
+		return
+	}
+	schema = int32(v)
+
+	positiveSpans, err = readHistogramChunkLayoutSpans(b)
+	if err != nil {
+		return
+	}
+
+	negativeSpans, err = readHistogramChunkLayoutSpans(b)
+	if err != nil {
+		return
+	}
+
+	if histogram.IsCustomBucketsSchema(schema) {
+		customValues, err = readHistogramChunkLayoutCustomBounds(b)
+		if err != nil {
+			return
+		}
+	}
+
+	return
+}
+
+func putHistogramChunkLayoutSpans(b *bstream, spans []histogram.Span) {
+	putVarbitUint(b, uint64(len(spans)))
+	for _, s := range spans {
+		putVarbitUint(b, uint64(s.Length))
+		putVarbitInt(b, int64(s.Offset))
+	}
+}
+
+func readHistogramChunkLayoutSpans(b *bstreamReader) ([]histogram.Span, error) {
+	var spans []histogram.Span
+	num, err := readVarbitUint(b)
+	if err != nil {
+		return nil, err
+	}
+	for i := 0; i < int(num); i++ {
+		length, err := readVarbitUint(b)
+		if err != nil {
+			return nil, err
+		}
+
+		offset, err := readVarbitInt(b)
+		if err != nil {
+			return nil, err
+		}
+
+		spans = append(spans, histogram.Span{
+			Length: uint32(length),
+			Offset: int32(offset),
+		})
+	}
+	return spans, nil
+}
+
+func putHistogramChunkLayoutCustomBounds(b *bstream, customValues []float64) {
+	putVarbitUint(b, uint64(len(customValues)))
+	for _, bound := range customValues {
+		putCustomBound(b, bound)
+	}
+}
+
+func readHistogramChunkLayoutCustomBounds(b *bstreamReader) ([]float64, error) {
+	var customValues []float64
+	num, err := readVarbitUint(b)
+	if err != nil {
+		return nil, err
+	}
+	for i := 0; i < int(num); i++ {
+		bound, err := readCustomBound(b)
+		if err != nil {
+			return nil, err
+		}
+
+		customValues = append(customValues, bound)
+	}
+	return customValues, nil
+}
+
+// putZeroThreshold writes the zero threshold to the bstream. It stores typical
+// values in just one byte, but needs 9 bytes for other values. In detail:
+//   - If the threshold is 0, store a single zero byte.
+//   - If the threshold is a power of 2 between (and including) 2^-243 and 2^10,
+//     take the exponent from the IEEE 754 representation of the threshold, which
+//     covers a range between (and including) -242 and 11. (2^-243 is 0.5*2^-242
+//     in IEEE 754 representation, and 2^10 is 0.5*2^11.) Add 243 to the exponent
+//     and store the result (which will be between 1 and 254) as a single
+//     byte. Note that small powers of two are preferred values for the zero
+//     threshold. The default value for the zero threshold is 2^-128 (or
+//     0.5*2^-127 in IEEE 754 representation) and will therefore be encoded as a
+//     single byte (with value 116).
+//   - In all other cases, store 255 as a single byte, followed by the 8 bytes of
+//     the threshold as a float64, i.e. taking 9 bytes in total.
+func putZeroThreshold(b *bstream, threshold float64) {
+	if threshold == 0 {
+		b.writeByte(0)
+		return
+	}
+	frac, exp := math.Frexp(threshold)
+	if frac != 0.5 || exp < -242 || exp > 11 {
+		b.writeByte(255)
+		b.writeBits(math.Float64bits(threshold), 64)
+		return
+	}
+	b.writeByte(byte(exp + 243))
+}
+
+// readZeroThreshold reads the zero threshold written with putZeroThreshold.
+func readZeroThreshold(br *bstreamReader) (float64, error) {
+	b, err := br.ReadByte()
+	if err != nil {
+		return 0, err
+	}
+	switch b {
+	case 0:
+		return 0, nil
+	case 255:
+		v, err := br.readBits(64)
+		if err != nil {
+			return 0, err
+		}
+		return math.Float64frombits(v), nil
+	default:
+		return math.Ldexp(0.5, int(b)-243), nil
+	}
+}
+
+// isWholeWhenMultiplied checks to see if the number when multiplied by 1000 can
+// be converted into an integer without losing precision.
+func isWholeWhenMultiplied(in float64) bool {
+	i := uint(math.Round(in * 1000))
+	out := float64(i) / 1000
+	return in == out
+}
+
+// putCustomBound writes a custom bound to the bstream. It stores values from
+// 0 to 33554.430 (inclusive) that are multiples of 0.001 in unsigned varbit
+// encoding of up to 4 bytes, but needs 1 bit + 8 bytes for other values like
+// negative numbers, numbers greater than 33554.430, or numbers that are not
+// a multiple of 0.001, on the assumption that they are less common. In detail:
+//   - Multiply the bound by 1000, without rounding.
+//   - If the multiplied bound is >= 0, <= 33554430 and a whole number,
+//     add 1 and store it in unsigned varbit encoding. All these numbers are
+//     greater than 0, so the leading bit of the varbit is always 1!
+//   - Otherwise, store a 0 bit, followed by the 8 bytes of the original
+//     bound as a float64.
+//
+// When reading the values, we can first decode a value as unsigned varbit,
+// if it's 0, then we read the next 8 bytes as a float64, otherwise
+// we can convert the value to a float64 by subtracting 1 and dividing by 1000.
+func putCustomBound(b *bstream, f float64) {
+	tf := f * 1000
+	// 33554431-1 comes from the maximum that can be stored in a varbit in 4
+	// bytes, other values are stored in 8 bytes anyway.
+	if tf < 0 || tf > 33554430 || !isWholeWhenMultiplied(f) {
+		b.writeBit(zero)
+		b.writeBits(math.Float64bits(f), 64)
+		return
+	}
+	putVarbitUint(b, uint64(math.Round(tf))+1)
+}
+
+// readCustomBound reads the custom bound written with putCustomBound.
+func readCustomBound(br *bstreamReader) (float64, error) {
+	b, err := readVarbitUint(br)
+	if err != nil {
+		return 0, err
+	}
+	switch b {
+	case 0:
+		v, err := br.readBits(64)
+		if err != nil {
+			return 0, err
+		}
+		return math.Float64frombits(v), nil
+	default:
+		return float64(b-1) / 1000, nil
+	}
+}
+
+type bucketIterator struct {
+	spans  []histogram.Span
+	span   int // Span position of last yielded bucket.
+	bucket int // Bucket position within span of last yielded bucket.
+	idx    int // Bucket index (globally across all spans) of last yielded bucket.
+}
+
+func newBucketIterator(spans []histogram.Span) *bucketIterator {
+	b := bucketIterator{
+		spans:  spans,
+		span:   0,
+		bucket: -1,
+		idx:    -1,
+	}
+	if len(spans) > 0 {
+		b.idx += int(spans[0].Offset)
+	}
+	return &b
+}
+
+func (b *bucketIterator) Next() (int, bool) {
+	// We're already out of bounds.
+	if b.span >= len(b.spans) {
+		return 0, false
+	}
+	if b.bucket < int(b.spans[b.span].Length)-1 { // Try to move within same span.
+		b.bucket++
+		b.idx++
+		return b.idx, true
+	}
+
+	for b.span < len(b.spans)-1 { // Try to move from one span to the next.
+		b.span++
+		b.idx += int(b.spans[b.span].Offset + 1)
+		b.bucket = 0
+		if b.spans[b.span].Length == 0 {
+			b.idx--
+			continue
+		}
+		return b.idx, true
+	}
+
+	// We're out of options.
+	return 0, false
+}
+
+// An Insert describes how many new buckets have to be inserted before
+// processing the pos'th bucket from the original slice.
+type Insert struct {
+	pos int
+	num int
+
+	// Optional: bucketIdx is the index of the bucket that is inserted.
+	// Can be used to adjust spans.
+	bucketIdx int
+}
+
+// Deprecated: expandSpansForward, use expandIntSpansAndBuckets or
+// expandFloatSpansAndBuckets instead.
+// expandSpansForward is left here for reference.
+// expandSpansForward returns the inserts to expand the bucket spans 'a' so that
+// they match the spans in 'b'. 'b' must cover the same or more buckets than
+// 'a', otherwise the function will return false.
+//
+// Example:
+//
+// Let's say the old buckets look like this:
+//
+//	span syntax: [offset, length]
+//	spans      : [ 0 , 2 ]               [2,1]                   [ 3 , 2 ]                     [3,1]       [1,1]
+//	bucket idx : [0]   [1]    2     3    [4]    5     6     7    [8]   [9]    10    11    12   [13]   14   [15]
+//	raw values    6     3                 3                       2     4                       5           1
+//	deltas        6    -3                 0                      -1     2                       1          -4
+//
+// But now we introduce a new bucket layout. (Carefully chosen example where we
+// have a span appended, one unchanged[*], one prepended, and two merge - in
+// that order.)
+//
+// [*] unchanged in terms of which bucket indices they represent. but to achieve
+// that, their offset needs to change if "disrupted" by spans changing ahead of
+// them
+//
+//	                                      \/ this one is "unchanged"
+//	spans      : [  0  ,  3    ]         [1,1]       [    1    ,   4     ]                     [  3  ,   3    ]
+//	bucket idx : [0]   [1]   [2]    3    [4]    5    [6]   [7]   [8]   [9]    10    11    12   [13]  [14]  [15]
+//	raw values    6     3     0           3           0     0     2     4                       5     0     1
+//	deltas        6    -3    -3           3          -3     0     2     2                       1    -5     1
+//	delta mods:                          / \                     / \                                       / \
+//
+// Note for histograms with delta-encoded buckets: Whenever any new buckets are
+// introduced, the subsequent "old" bucket needs to readjust its delta to the
+// new base of 0. Thus, for the caller who wants to transform the set of
+// original deltas to a new set of deltas to match a new span layout that adds
+// buckets, we simply need to generate a list of inserts.
+//
+// Note: Within expandSpansForward we don't have to worry about the changes to the
+// spans themselves, thanks to the iterators we get to work with the more useful
+// bucket indices (which of course directly correspond to the buckets we have to
+// adjust).
+func expandSpansForward(a, b []histogram.Span) (forward []Insert, ok bool) {
+	ai := newBucketIterator(a)
+	bi := newBucketIterator(b)
+
+	var inserts []Insert
+
+	// When inter.num becomes > 0, this becomes a valid insert that should
+	// be yielded when we finish a streak of new buckets.
+	var inter Insert
+
+	av, aOK := ai.Next()
+	bv, bOK := bi.Next()
+loop:
+	for {
+		switch {
+		case aOK && bOK:
+			switch {
+			case av == bv: // Both have an identical value. move on!
+				// Finish WIP insert and reset.
+				if inter.num > 0 {
+					inserts = append(inserts, inter)
+				}
+				inter.num = 0
+				av, aOK = ai.Next()
+				bv, bOK = bi.Next()
+				inter.pos++
+			case av < bv: // b misses a value that is in a.
+				return inserts, false
+			case av > bv: // a misses a value that is in b. Forward b and recompare.
+				inter.num++
+				bv, bOK = bi.Next()
+			}
+		case aOK && !bOK: // b misses a value that is in a.
+			return inserts, false
+		case !aOK && bOK: // a misses a value that is in b. Forward b and recompare.
+			inter.num++
+			bv, bOK = bi.Next()
+		default: // Both iterators ran out. We're done.
+			if inter.num > 0 {
+				inserts = append(inserts, inter)
+			}
+			break loop
+		}
+	}
+
+	return inserts, true
+}
+
+// expandSpansBothWays is similar to expandSpansForward, but now b may also
+// cover an entirely different set of buckets. The function returns the
+// “forward” inserts to expand 'a' to also cover all the buckets exclusively
+// covered by 'b', and it returns the “backward” inserts to expand 'b' to also
+// cover all the buckets exclusively covered by 'a'.
+func expandSpansBothWays(a, b []histogram.Span) (forward, backward []Insert, mergedSpans []histogram.Span) {
+	ai := newBucketIterator(a)
+	bi := newBucketIterator(b)
+
+	var fInserts, bInserts []Insert
+	var lastBucket int
+	addBucket := func(b int) {
+		offset := b - lastBucket - 1
+		if offset == 0 && len(mergedSpans) > 0 {
+			mergedSpans[len(mergedSpans)-1].Length++
+		} else {
+			if len(mergedSpans) == 0 {
+				offset++
+			}
+			mergedSpans = append(mergedSpans, histogram.Span{
+				Offset: int32(offset),
+				Length: 1,
+			})
+		}
+
+		lastBucket = b
+	}
+
+	// When fInter.num (or bInter.num, respectively) becomes > 0, this
+	// becomes a valid insert that should be yielded when we finish a streak
+	// of new buckets.
+	var fInter, bInter Insert
+
+	av, aOK := ai.Next()
+	bv, bOK := bi.Next()
+loop:
+	for {
+		switch {
+		case aOK && bOK:
+			switch {
+			case av == bv: // Both have an identical value. move on!
+				// Finish WIP insert and reset.
+				if fInter.num > 0 {
+					fInserts = append(fInserts, fInter)
+					fInter.num = 0
+				}
+				if bInter.num > 0 {
+					bInserts = append(bInserts, bInter)
+					bInter.num = 0
+				}
+				addBucket(av)
+				av, aOK = ai.Next()
+				bv, bOK = bi.Next()
+				fInter.pos++
+				bInter.pos++
+			case av < bv: // b misses a value that is in a.
+				bInter.num++
+				// Collect the forward inserts before advancing
+				// the position of 'a'.
+				if fInter.num > 0 {
+					fInserts = append(fInserts, fInter)
+					fInter.num = 0
+				}
+				addBucket(av)
+				fInter.pos++
+				av, aOK = ai.Next()
+			case av > bv: // a misses a value that is in b. Forward b and recompare.
+				fInter.num++
+				// Collect the backward inserts before advancing the
+				// position of 'b'.
+				if bInter.num > 0 {
+					bInserts = append(bInserts, bInter)
+					bInter.num = 0
+				}
+				addBucket(bv)
+				bInter.pos++
+				bv, bOK = bi.Next()
+			}
+		case aOK && !bOK: // b misses a value that is in a.
+			bInter.num++
+			addBucket(av)
+			av, aOK = ai.Next()
+		case !aOK && bOK: // a misses a value that is in b. Forward b and recompare.
+			fInter.num++
+			addBucket(bv)
+			bv, bOK = bi.Next()
+		default: // Both iterators ran out. We're done.
+			if fInter.num > 0 {
+				fInserts = append(fInserts, fInter)
+			}
+			if bInter.num > 0 {
+				bInserts = append(bInserts, bInter)
+			}
+			break loop
+		}
+	}
+
+	return fInserts, bInserts, mergedSpans
+}
+
+type bucketValue interface {
+	int64 | float64
+}
+
+// insert merges 'in' with the provided inserts and writes them into 'out',
+// which must already have the appropriate length. 'out' is also returned for
+// convenience.
+func insert[BV bucketValue](in, out []BV, inserts []Insert, deltas bool) []BV {
+	var (
+		oi int // Position in out.
+		v  BV  // The last value seen.
+		ii int // The next insert to process.
+	)
+	for i, d := range in {
+		if ii < len(inserts) && i == inserts[ii].pos {
+			// We have an insert!
+			// Add insert.num new delta values such that their
+			// bucket values equate 0. When deltas==false, it means
+			// that it is an absolute value. So we set it to 0
+			// directly.
+			if deltas {
+				out[oi] = -v
+			} else {
+				out[oi] = 0
+			}
+			oi++
+			for x := 1; x < inserts[ii].num; x++ {
+				out[oi] = 0
+				oi++
+			}
+			ii++
+
+			// Now save the value from the input. The delta value we
+			// should save is the original delta value + the last
+			// value of the point before the insert (to undo the
+			// delta that was introduced by the insert). When
+			// deltas==false, it means that it is an absolute value,
+			// so we set it directly to the value in the 'in' slice.
+			if deltas {
+				out[oi] = d + v
+			} else {
+				out[oi] = d
+			}
+			oi++
+			v = d + v
+			continue
+		}
+		// If there was no insert, the original delta is still valid.
+		out[oi] = d
+		oi++
+		v += d
+	}
+	switch ii {
+	case len(inserts):
+		// All inserts processed. Nothing more to do.
+	case len(inserts) - 1:
+		// One more insert to process at the end.
+		if deltas {
+			out[oi] = -v
+		} else {
+			out[oi] = 0
+		}
+		oi++
+		for x := 1; x < inserts[ii].num; x++ {
+			out[oi] = 0
+			oi++
+		}
+	default:
+		panic("unprocessed inserts left")
+	}
+	return out
+}
+
+// counterResetHint returns a CounterResetHint based on the CounterResetHeader
+// and on the position into the chunk.
+func counterResetHint(crh CounterResetHeader, numRead uint16) histogram.CounterResetHint {
+	switch {
+	case crh == GaugeType:
+		// A gauge histogram chunk only contains gauge histograms.
+		return histogram.GaugeType
+	case numRead > 1:
+		// In a counter histogram chunk, there will not be any counter
+		// resets after the first histogram.
+		return histogram.NotCounterReset
+	default:
+		// Sadly, we have to return "unknown" as the hint for all other
+		// cases, even if we know that the chunk was started with or without a
+		// counter reset. But we cannot be sure that the previous chunk
+		// still exists in the TSDB, or if the previous chunk was added later
+		// by out of order or backfill, so we conservatively return "unknown".
+		//
+		// TODO: If we can detect whether the previous and current chunk are
+		// actually consecutive then we could trust its hint:
+		// https://github.com/prometheus/prometheus/issues/15346.
+		return histogram.UnknownCounterReset
+	}
+}
+
+// adjustForInserts adjusts the spans for the given inserts.
+func adjustForInserts(spans []histogram.Span, inserts []Insert) (mergedSpans []histogram.Span) {
+	if len(inserts) == 0 {
+		return spans
+	}
+
+	it := newBucketIterator(spans)
+
+	var (
+		lastBucket int
+		i          int
+		insertIdx  = inserts[i].bucketIdx
+		insertNum  = inserts[i].num
+	)
+
+	addBucket := func(b int) {
+		offset := b - lastBucket - 1
+		if offset == 0 && len(mergedSpans) > 0 {
+			mergedSpans[len(mergedSpans)-1].Length++
+		} else {
+			if len(mergedSpans) == 0 {
+				offset++
+			}
+			mergedSpans = append(mergedSpans, histogram.Span{
+				Offset: int32(offset),
+				Length: 1,
+			})
+		}
+
+		lastBucket = b
+	}
+	consumeInsert := func() {
+		// Consume the insert.
+		insertNum--
+		if insertNum == 0 {
+			i++
+			if i < len(inserts) {
+				insertIdx = inserts[i].bucketIdx
+				insertNum = inserts[i].num
+			}
+		} else {
+			insertIdx++
+		}
+	}
+
+	bucket, ok := it.Next()
+	for ok {
+		if i < len(inserts) && insertIdx < bucket {
+			addBucket(insertIdx)
+			consumeInsert()
+		} else {
+			addBucket(bucket)
+			bucket, ok = it.Next()
+		}
+	}
+	for i < len(inserts) {
+		addBucket(inserts[i].bucketIdx)
+		consumeInsert()
+	}
+	return
+}
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/varbit.go b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/varbit.go
new file mode 100644
index 0000000000000000000000000000000000000000..574edec48b38bf689f65e89e736249d78ab40fef
--- /dev/null
+++ b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/varbit.go
@@ -0,0 +1,231 @@
+// Copyright 2021 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package chunkenc
+
+import (
+	"fmt"
+	"math/bits"
+)
+
+// putVarbitInt writes an int64 using varbit encoding with a bit bucketing
+// optimized for the dod's observed in histogram buckets, plus a few additional
+// buckets for large numbers.
+//
+// For optimal space utilization, each branch didn't need to support any values
+// of any of the prior branches. So we could expand the range of each branch. Do
+// more with fewer bits. It would come at the price of more expensive encoding
+// and decoding (cutting out and later adding back that center-piece we
+// skip). With the distributions of values we see in practice, we would reduce
+// the size by around 1%. A more detailed study would be needed for precise
+// values, but it's appears quite certain that we would end up far below 10%,
+// which would maybe convince us to invest the increased coding/decoding cost.
+func putVarbitInt(b *bstream, val int64) {
+	switch {
+	case val == 0: // Precisely 0, needs 1 bit.
+		b.writeBit(zero)
+	case bitRange(val, 3): // -3 <= val <= 4, needs 5 bits.
+		b.writeBits(0b10, 2)
+		b.writeBits(uint64(val), 3)
+	case bitRange(val, 6): // -31 <= val <= 32, 9 bits.
+		b.writeBits(0b110, 3)
+		b.writeBits(uint64(val), 6)
+	case bitRange(val, 9): // -255 <= val <= 256, 13 bits.
+		b.writeBits(0b1110, 4)
+		b.writeBits(uint64(val), 9)
+	case bitRange(val, 12): // -2047 <= val <= 2048, 17 bits.
+		b.writeBits(0b11110, 5)
+		b.writeBits(uint64(val), 12)
+	case bitRange(val, 18): // -131071 <= val <= 131072, 3 bytes.
+		b.writeBits(0b111110, 6)
+		b.writeBits(uint64(val), 18)
+	case bitRange(val, 25): // -16777215 <= val <= 16777216, 4 bytes.
+		b.writeBits(0b1111110, 7)
+		b.writeBits(uint64(val), 25)
+	case bitRange(val, 56): // -36028797018963967 <= val <= 36028797018963968, 8 bytes.
+		b.writeBits(0b11111110, 8)
+		b.writeBits(uint64(val), 56)
+	default:
+		b.writeBits(0b11111111, 8) // Worst case, needs 9 bytes.
+		b.writeBits(uint64(val), 64)
+	}
+}
+
+// readVarbitInt reads an int64 encoded with putVarbitInt.
+func readVarbitInt(b *bstreamReader) (int64, error) {
+	var d byte
+	for i := 0; i < 8; i++ {
+		d <<= 1
+		bit, err := b.readBitFast()
+		if err != nil {
+			bit, err = b.readBit()
+		}
+		if err != nil {
+			return 0, err
+		}
+		if bit == zero {
+			break
+		}
+		d |= 1
+	}
+
+	var val int64
+	var sz uint8
+
+	switch d {
+	case 0b0:
+		// val == 0
+	case 0b10:
+		sz = 3
+	case 0b110:
+		sz = 6
+	case 0b1110:
+		sz = 9
+	case 0b11110:
+		sz = 12
+	case 0b111110:
+		sz = 18
+	case 0b1111110:
+		sz = 25
+	case 0b11111110:
+		sz = 56
+	case 0b11111111:
+		// Do not use fast because it's very unlikely it will succeed.
+		bits, err := b.readBits(64)
+		if err != nil {
+			return 0, err
+		}
+
+		val = int64(bits)
+	default:
+		return 0, fmt.Errorf("invalid bit pattern %b", d)
+	}
+
+	if sz != 0 {
+		bits, err := b.readBitsFast(sz)
+		if err != nil {
+			bits, err = b.readBits(sz)
+		}
+		if err != nil {
+			return 0, err
+		}
+		if bits > (1 << (sz - 1)) {
+			// Or something.
+			bits -= (1 << sz)
+		}
+		val = int64(bits)
+	}
+
+	return val, nil
+}
+
+func bitRangeUint(x uint64, nbits int) bool {
+	return bits.LeadingZeros64(x) >= 64-nbits
+}
+
+// putVarbitUint writes a uint64 using varbit encoding. It uses the same bit
+// buckets as putVarbitInt.
+func putVarbitUint(b *bstream, val uint64) {
+	switch {
+	case val == 0: // Precisely 0, needs 1 bit.
+		b.writeBit(zero)
+	case bitRangeUint(val, 3): // val <= 7, needs 5 bits.
+		b.writeBits(0b10, 2)
+		b.writeBits(val, 3)
+	case bitRangeUint(val, 6): // val <= 63, 9 bits.
+		b.writeBits(0b110, 3)
+		b.writeBits(val, 6)
+	case bitRangeUint(val, 9): // val <= 511, 13 bits.
+		b.writeBits(0b1110, 4)
+		b.writeBits(val, 9)
+	case bitRangeUint(val, 12): // val <= 4095, 17 bits.
+		b.writeBits(0b11110, 5)
+		b.writeBits(val, 12)
+	case bitRangeUint(val, 18): // val <= 262143, 3 bytes.
+		b.writeBits(0b111110, 6)
+		b.writeBits(val, 18)
+	case bitRangeUint(val, 25): // val <= 33554431, 4 bytes.
+		b.writeBits(0b1111110, 7)
+		b.writeBits(val, 25)
+	case bitRangeUint(val, 56): // val <= 72057594037927935, 8 bytes.
+		b.writeBits(0b11111110, 8)
+		b.writeBits(val, 56)
+	default:
+		b.writeBits(0b11111111, 8) // Worst case, needs 9 bytes.
+		b.writeBits(val, 64)
+	}
+}
+
+// readVarbitUint reads a uint64 encoded with putVarbitUint.
+func readVarbitUint(b *bstreamReader) (uint64, error) {
+	var d byte
+	for i := 0; i < 8; i++ {
+		d <<= 1
+		bit, err := b.readBitFast()
+		if err != nil {
+			bit, err = b.readBit()
+		}
+		if err != nil {
+			return 0, err
+		}
+		if bit == zero {
+			break
+		}
+		d |= 1
+	}
+
+	var (
+		bits uint64
+		sz   uint8
+		err  error
+	)
+
+	switch d {
+	case 0b0:
+		// val == 0
+	case 0b10:
+		sz = 3
+	case 0b110:
+		sz = 6
+	case 0b1110:
+		sz = 9
+	case 0b11110:
+		sz = 12
+	case 0b111110:
+		sz = 18
+	case 0b1111110:
+		sz = 25
+	case 0b11111110:
+		sz = 56
+	case 0b11111111:
+		// Do not use fast because it's very unlikely it will succeed.
+		bits, err = b.readBits(64)
+		if err != nil {
+			return 0, err
+		}
+	default:
+		return 0, fmt.Errorf("invalid bit pattern %b", d)
+	}
+
+	if sz != 0 {
+		bits, err = b.readBitsFast(sz)
+		if err != nil {
+			bits, err = b.readBits(sz)
+		}
+		if err != nil {
+			return 0, err
+		}
+	}
+
+	return bits, nil
+}
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/xor.go b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/xor.go
index e3b4f58b2ad0679c856facc1f4b805ea5e3fb5da..ac75a5994bbdeb5e8ce68c476b2af45dd848f7e1 100644
--- a/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/xor.go
+++ b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/xor.go
@@ -14,7 +14,7 @@
 // The code in this file was largely written by Damian Gryski as part of
 // https://github.com/dgryski/go-tsz and published under the license below.
 // It was modified to accommodate reading from byte slices without modifying
-// the underlying bytes, which would panic when reading from mmap'd
+// the underlying bytes, which would panic when reading from mmapped
 // read-only byte slices.
 
 // Copyright (c) 2015,2016 Damian Gryski <damian@gryski.com>
@@ -47,6 +47,8 @@ import (
 	"encoding/binary"
 	"math"
 	"math/bits"
+
+	"github.com/prometheus/prometheus/model/histogram"
 )
 
 const (
@@ -58,12 +60,16 @@ type XORChunk struct {
 	b bstream
 }
 
-// NewXORChunk returns a new chunk with XOR encoding of the given size.
+// NewXORChunk returns a new chunk with XOR encoding.
 func NewXORChunk() *XORChunk {
 	b := make([]byte, 2, 128)
 	return &XORChunk{b: bstream{stream: b, count: 0}}
 }
 
+func (c *XORChunk) Reset(stream []byte) {
+	c.b.Reset(stream)
+}
+
 // Encoding returns the encoding type.
 func (c *XORChunk) Encoding() Encoding {
 	return EncXOR
@@ -79,6 +85,7 @@ func (c *XORChunk) NumSamples() int {
 	return int(binary.BigEndian.Uint16(c.Bytes()))
 }
 
+// Compact implements the Chunk interface.
 func (c *XORChunk) Compact() {
 	if l := len(c.b.stream); cap(c.b.stream) > l+chunkCompactCapacityThreshold {
 		buf := make([]byte, l)
@@ -88,13 +95,15 @@ func (c *XORChunk) Compact() {
 }
 
 // Appender implements the Chunk interface.
+// It is not valid to call Appender() multiple times concurrently or to use multiple
+// Appenders on the same chunk.
 func (c *XORChunk) Appender() (Appender, error) {
 	it := c.iterator(nil)
 
 	// To get an appender we must know the state it would have if we had
 	// appended all existing data from scratch.
 	// We iterate through the end and populate via the iterator's state.
-	for it.Next() {
+	for it.Next() != ValNone {
 	}
 	if err := it.Err(); err != nil {
 		return nil, err
@@ -108,16 +117,13 @@ func (c *XORChunk) Appender() (Appender, error) {
 		leading:  it.leading,
 		trailing: it.trailing,
 	}
-	if binary.BigEndian.Uint16(a.b.bytes()) == 0 {
+	if it.numTotal == 0 {
 		a.leading = 0xff
 	}
 	return a, nil
 }
 
 func (c *XORChunk) iterator(it Iterator) *xorIterator {
-	// Should iterators guarantee to act on a copy of the data so it doesn't lock append?
-	// When using striped locks to guard access to chunks, probably yes.
-	// Could only copy data if the chunk is not completed yet.
 	if xorIter, ok := it.(*xorIterator); ok {
 		xorIter.Reset(c.b.bytes())
 		return xorIter
@@ -132,6 +138,9 @@ func (c *XORChunk) iterator(it Iterator) *xorIterator {
 }
 
 // Iterator implements the Chunk interface.
+// Iterator() must not be called concurrently with any modifications to the chunk,
+// but after it returns you can use an Iterator concurrently with an Appender or
+// other Iterators.
 func (c *XORChunk) Iterator(it Iterator) Iterator {
 	return c.iterator(it)
 }
@@ -150,15 +159,14 @@ type xorAppender struct {
 func (a *xorAppender) Append(t int64, v float64) {
 	var tDelta uint64
 	num := binary.BigEndian.Uint16(a.b.bytes())
-
-	if num == 0 {
+	switch num {
+	case 0:
 		buf := make([]byte, binary.MaxVarintLen64)
 		for _, b := range buf[:binary.PutVarint(buf, t)] {
 			a.b.writeByte(b)
 		}
 		a.b.writeBits(math.Float64bits(v), 64)
-
-	} else if num == 1 {
+	case 1:
 		tDelta = uint64(t - a.t)
 
 		buf := make([]byte, binary.MaxVarintLen64)
@@ -167,27 +175,32 @@ func (a *xorAppender) Append(t int64, v float64) {
 		}
 
 		a.writeVDelta(v)
-
-	} else {
+	default:
 		tDelta = uint64(t - a.t)
 		dod := int64(tDelta - a.tDelta)
 
 		// Gorilla has a max resolution of seconds, Prometheus milliseconds.
 		// Thus we use higher value range steps with larger bit size.
+		//
+		// TODO(beorn7): This seems to needlessly jump to large bit
+		// sizes even for very small deviations from zero. Timestamp
+		// compression can probably benefit from some smaller bit
+		// buckets. See also what was done for histogram encoding in
+		// varbit.go.
 		switch {
 		case dod == 0:
 			a.b.writeBit(zero)
 		case bitRange(dod, 14):
-			a.b.writeBits(0x02, 2) // '10'
-			a.b.writeBits(uint64(dod), 14)
+			a.b.writeByte(0b10<<6 | (uint8(dod>>8) & (1<<6 - 1))) // 0b10 size code combined with 6 bits of dod.
+			a.b.writeByte(uint8(dod))                             // Bottom 8 bits of dod.
 		case bitRange(dod, 17):
-			a.b.writeBits(0x06, 3) // '110'
+			a.b.writeBits(0b110, 3)
 			a.b.writeBits(uint64(dod), 17)
 		case bitRange(dod, 20):
-			a.b.writeBits(0x0e, 4) // '1110'
+			a.b.writeBits(0b1110, 4)
 			a.b.writeBits(uint64(dod), 20)
 		default:
-			a.b.writeBits(0x0f, 4) // '1111'
+			a.b.writeBits(0b1111, 4)
 			a.b.writeBits(uint64(dod), 64)
 		}
 
@@ -200,43 +213,22 @@ func (a *xorAppender) Append(t int64, v float64) {
 	a.tDelta = tDelta
 }
 
+// bitRange returns whether the given integer can be represented by nbits.
+// See docs/bstream.md.
 func bitRange(x int64, nbits uint8) bool {
 	return -((1<<(nbits-1))-1) <= x && x <= 1<<(nbits-1)
 }
 
 func (a *xorAppender) writeVDelta(v float64) {
-	vDelta := math.Float64bits(v) ^ math.Float64bits(a.v)
-
-	if vDelta == 0 {
-		a.b.writeBit(zero)
-		return
-	}
-	a.b.writeBit(one)
-
-	leading := uint8(bits.LeadingZeros64(vDelta))
-	trailing := uint8(bits.TrailingZeros64(vDelta))
-
-	// Clamp number of leading zeros to avoid overflow when encoding.
-	if leading >= 32 {
-		leading = 31
-	}
-
-	if a.leading != 0xff && leading >= a.leading && trailing >= a.trailing {
-		a.b.writeBit(zero)
-		a.b.writeBits(vDelta>>a.trailing, 64-int(a.leading)-int(a.trailing))
-	} else {
-		a.leading, a.trailing = leading, trailing
+	xorWrite(a.b, v, a.v, &a.leading, &a.trailing)
+}
 
-		a.b.writeBit(one)
-		a.b.writeBits(uint64(leading), 5)
+func (a *xorAppender) AppendHistogram(*HistogramAppender, int64, *histogram.Histogram, bool) (Chunk, bool, Appender, error) {
+	panic("appended a histogram sample to a float chunk")
+}
 
-		// Note that if leading == trailing == 0, then sigbits == 64.  But that value doesn't actually fit into the 6 bits we have.
-		// Luckily, we never need to encode 0 significant bits, since that would put us in the other case (vdelta == 0).
-		// So instead we write out a 0 and adjust it back to 64 on unpacking.
-		sigbits := 64 - leading - trailing
-		a.b.writeBits(uint64(sigbits), 6)
-		a.b.writeBits(vDelta>>trailing, int(sigbits))
-	}
+func (a *xorAppender) AppendFloatHistogram(*FloatHistogramAppender, int64, *histogram.FloatHistogram, bool) (Chunk, bool, Appender, error) {
+	panic("appended a float histogram sample to a float chunk")
 }
 
 type xorIterator struct {
@@ -254,23 +246,35 @@ type xorIterator struct {
 	err    error
 }
 
-func (it *xorIterator) Seek(t int64) bool {
+func (it *xorIterator) Seek(t int64) ValueType {
 	if it.err != nil {
-		return false
+		return ValNone
 	}
 
 	for t > it.t || it.numRead == 0 {
-		if !it.Next() {
-			return false
+		if it.Next() == ValNone {
+			return ValNone
 		}
 	}
-	return true
+	return ValFloat
 }
 
 func (it *xorIterator) At() (int64, float64) {
 	return it.t, it.val
 }
 
+func (it *xorIterator) AtHistogram(*histogram.Histogram) (int64, *histogram.Histogram) {
+	panic("cannot call xorIterator.AtHistogram")
+}
+
+func (it *xorIterator) AtFloatHistogram(*histogram.FloatHistogram) (int64, *histogram.FloatHistogram) {
+	panic("cannot call xorIterator.AtFloatHistogram")
+}
+
+func (it *xorIterator) AtT() int64 {
+	return it.t
+}
+
 func (it *xorIterator) Err() error {
 	return it.err
 }
@@ -290,36 +294,36 @@ func (it *xorIterator) Reset(b []byte) {
 	it.err = nil
 }
 
-func (it *xorIterator) Next() bool {
+func (it *xorIterator) Next() ValueType {
 	if it.err != nil || it.numRead == it.numTotal {
-		return false
+		return ValNone
 	}
 
 	if it.numRead == 0 {
 		t, err := binary.ReadVarint(&it.br)
 		if err != nil {
 			it.err = err
-			return false
+			return ValNone
 		}
 		v, err := it.br.readBits(64)
 		if err != nil {
 			it.err = err
-			return false
+			return ValNone
 		}
 		it.t = t
 		it.val = math.Float64frombits(v)
 
 		it.numRead++
-		return true
+		return ValFloat
 	}
 	if it.numRead == 1 {
 		tDelta, err := binary.ReadUvarint(&it.br)
 		if err != nil {
 			it.err = err
-			return false
+			return ValNone
 		}
 		it.tDelta = tDelta
-		it.t = it.t + int64(it.tDelta)
+		it.t += int64(it.tDelta)
 
 		return it.readValue()
 	}
@@ -334,7 +338,7 @@ func (it *xorIterator) Next() bool {
 		}
 		if err != nil {
 			it.err = err
-			return false
+			return ValNone
 		}
 		if bit == zero {
 			break
@@ -344,20 +348,20 @@ func (it *xorIterator) Next() bool {
 	var sz uint8
 	var dod int64
 	switch d {
-	case 0x00:
+	case 0b0:
 		// dod == 0
-	case 0x02:
+	case 0b10:
 		sz = 14
-	case 0x06:
+	case 0b110:
 		sz = 17
-	case 0x0e:
+	case 0b1110:
 		sz = 20
-	case 0x0f:
+	case 0b1111:
 		// Do not use fast because it's very unlikely it will succeed.
 		bits, err := it.br.readBits(64)
 		if err != nil {
 			it.err = err
-			return false
+			return ValNone
 		}
 
 		dod = int64(bits)
@@ -370,86 +374,137 @@ func (it *xorIterator) Next() bool {
 		}
 		if err != nil {
 			it.err = err
-			return false
+			return ValNone
 		}
+
+		// Account for negative numbers, which come back as high unsigned numbers.
+		// See docs/bstream.md.
 		if bits > (1 << (sz - 1)) {
-			// or something
-			bits = bits - (1 << sz)
+			bits -= 1 << sz
 		}
 		dod = int64(bits)
 	}
 
 	it.tDelta = uint64(int64(it.tDelta) + dod)
-	it.t = it.t + int64(it.tDelta)
+	it.t += int64(it.tDelta)
 
 	return it.readValue()
 }
 
-func (it *xorIterator) readValue() bool {
-	bit, err := it.br.readBitFast()
+func (it *xorIterator) readValue() ValueType {
+	err := xorRead(&it.br, &it.val, &it.leading, &it.trailing)
+	if err != nil {
+		it.err = err
+		return ValNone
+	}
+	it.numRead++
+	return ValFloat
+}
+
+func xorWrite(b *bstream, newValue, currentValue float64, leading, trailing *uint8) {
+	delta := math.Float64bits(newValue) ^ math.Float64bits(currentValue)
+
+	if delta == 0 {
+		b.writeBit(zero)
+		return
+	}
+	b.writeBit(one)
+
+	newLeading := uint8(bits.LeadingZeros64(delta))
+	newTrailing := uint8(bits.TrailingZeros64(delta))
+
+	// Clamp number of leading zeros to avoid overflow when encoding.
+	if newLeading >= 32 {
+		newLeading = 31
+	}
+
+	if *leading != 0xff && newLeading >= *leading && newTrailing >= *trailing {
+		// In this case, we stick with the current leading/trailing.
+		b.writeBit(zero)
+		b.writeBits(delta>>*trailing, 64-int(*leading)-int(*trailing))
+		return
+	}
+
+	// Update leading/trailing for the caller.
+	*leading, *trailing = newLeading, newTrailing
+
+	b.writeBit(one)
+	b.writeBits(uint64(newLeading), 5)
+
+	// Note that if newLeading == newTrailing == 0, then sigbits == 64. But
+	// that value doesn't actually fit into the 6 bits we have.  Luckily, we
+	// never need to encode 0 significant bits, since that would put us in
+	// the other case (vdelta == 0).  So instead we write out a 0 and adjust
+	// it back to 64 on unpacking.
+	sigbits := 64 - newLeading - newTrailing
+	b.writeBits(uint64(sigbits), 6)
+	b.writeBits(delta>>newTrailing, int(sigbits))
+}
+
+func xorRead(br *bstreamReader, value *float64, leading, trailing *uint8) error {
+	bit, err := br.readBitFast()
 	if err != nil {
-		bit, err = it.br.readBit()
+		bit, err = br.readBit()
 	}
 	if err != nil {
-		it.err = err
-		return false
+		return err
+	}
+	if bit == zero {
+		return nil
+	}
+	bit, err = br.readBitFast()
+	if err != nil {
+		bit, err = br.readBit()
+	}
+	if err != nil {
+		return err
 	}
 
+	var (
+		bits                           uint64
+		newLeading, newTrailing, mbits uint8
+	)
+
 	if bit == zero {
-		// it.val = it.val
+		// Reuse leading/trailing zero bits.
+		newLeading, newTrailing = *leading, *trailing
+		mbits = 64 - newLeading - newTrailing
 	} else {
-		bit, err := it.br.readBitFast()
+		bits, err = br.readBitsFast(5)
 		if err != nil {
-			bit, err = it.br.readBit()
+			bits, err = br.readBits(5)
 		}
 		if err != nil {
-			it.err = err
-			return false
-		}
-		if bit == zero {
-			// reuse leading/trailing zero bits
-			// it.leading, it.trailing = it.leading, it.trailing
-		} else {
-			bits, err := it.br.readBitsFast(5)
-			if err != nil {
-				bits, err = it.br.readBits(5)
-			}
-			if err != nil {
-				it.err = err
-				return false
-			}
-			it.leading = uint8(bits)
-
-			bits, err = it.br.readBitsFast(6)
-			if err != nil {
-				bits, err = it.br.readBits(6)
-			}
-			if err != nil {
-				it.err = err
-				return false
-			}
-			mbits := uint8(bits)
-			// 0 significant bits here means we overflowed and we actually need 64; see comment in encoder
-			if mbits == 0 {
-				mbits = 64
-			}
-			it.trailing = 64 - it.leading - mbits
+			return err
 		}
+		newLeading = uint8(bits)
 
-		mbits := 64 - it.leading - it.trailing
-		bits, err := it.br.readBitsFast(mbits)
+		bits, err = br.readBitsFast(6)
 		if err != nil {
-			bits, err = it.br.readBits(mbits)
+			bits, err = br.readBits(6)
 		}
 		if err != nil {
-			it.err = err
-			return false
+			return err
+		}
+		mbits = uint8(bits)
+		// 0 significant bits here means we overflowed and we actually
+		// need 64; see comment in xrWrite.
+		if mbits == 0 {
+			mbits = 64
 		}
-		vbits := math.Float64bits(it.val)
-		vbits ^= bits << it.trailing
-		it.val = math.Float64frombits(vbits)
+		newTrailing = 64 - newLeading - mbits
+		// Update leading/trailing zero bits for the caller.
+		*leading, *trailing = newLeading, newTrailing
 	}
-
-	it.numRead++
-	return true
+	bits, err = br.readBitsFast(mbits)
+	if err != nil {
+		bits, err = br.readBits(mbits)
+	}
+	if err != nil {
+		return err
+	}
+	vbits := math.Float64bits(*value)
+	vbits ^= bits << newTrailing
+	*value = math.Float64frombits(vbits)
+	return nil
 }
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/chunks/chunk_write_queue.go b/vendor/github.com/prometheus/prometheus/tsdb/chunks/chunk_write_queue.go
new file mode 100644
index 0000000000000000000000000000000000000000..ba9730d93692a15bb3fda500f5d912eb9868b1f1
--- /dev/null
+++ b/vendor/github.com/prometheus/prometheus/tsdb/chunks/chunk_write_queue.go
@@ -0,0 +1,264 @@
+// Copyright 2021 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package chunks
+
+import (
+	"errors"
+	"sync"
+	"time"
+
+	"github.com/prometheus/client_golang/prometheus"
+
+	"github.com/prometheus/prometheus/tsdb/chunkenc"
+)
+
+const (
+	// Minimum recorded peak since the last shrinking of chunkWriteQueue.chunkRefMap to shrink it again.
+	chunkRefMapShrinkThreshold = 1000
+
+	// Minimum interval between shrinking of chunkWriteQueue.chunkRefMap.
+	chunkRefMapMinShrinkInterval = 10 * time.Minute
+
+	// Maximum size of segment used by job queue (number of elements). With chunkWriteJob being 64 bytes,
+	// this will use ~512 KiB for empty queue.
+	maxChunkQueueSegmentSize = 8192
+)
+
+type chunkWriteJob struct {
+	cutFile   bool
+	seriesRef HeadSeriesRef
+	mint      int64
+	maxt      int64
+	chk       chunkenc.Chunk
+	ref       ChunkDiskMapperRef
+	isOOO     bool
+	callback  func(error)
+}
+
+// chunkWriteQueue is a queue for writing chunks to disk in a non-blocking fashion.
+// Chunks that shall be written get added to the queue, which is consumed asynchronously.
+// Adding jobs to the queue is non-blocking as long as the queue isn't full.
+type chunkWriteQueue struct {
+	jobs *writeJobQueue
+
+	chunkRefMapMtx        sync.RWMutex
+	chunkRefMap           map[ChunkDiskMapperRef]chunkenc.Chunk
+	chunkRefMapPeakSize   int       // Largest size that chunkRefMap has grown to since the last time we shrank it.
+	chunkRefMapLastShrink time.Time // When the chunkRefMap has been shrunk the last time.
+
+	// isRunningMtx serves two purposes:
+	// 1. It protects isRunning field.
+	// 2. It serializes adding of jobs to the chunkRefMap in addJob() method. If jobs channel is full then addJob() will block
+	// while holding this mutex, which guarantees that chunkRefMap won't ever grow beyond the queue size + 1.
+	isRunningMtx sync.Mutex
+	isRunning    bool // Used to prevent that new jobs get added to the queue when the chan is already closed.
+
+	workerWg sync.WaitGroup
+
+	writeChunk writeChunkF
+
+	// Keeping separate counters instead of only a single CounterVec to improve the performance of the critical
+	// addJob() method which otherwise would need to perform a WithLabelValues call on the CounterVec.
+	adds      prometheus.Counter
+	gets      prometheus.Counter
+	completed prometheus.Counter
+	shrink    prometheus.Counter
+}
+
+// writeChunkF is a function which writes chunks, it is dynamic to allow mocking in tests.
+type writeChunkF func(HeadSeriesRef, int64, int64, chunkenc.Chunk, ChunkDiskMapperRef, bool, bool) error
+
+func newChunkWriteQueue(reg prometheus.Registerer, size int, writeChunk writeChunkF) *chunkWriteQueue {
+	counters := prometheus.NewCounterVec(
+		prometheus.CounterOpts{
+			Name: "prometheus_tsdb_chunk_write_queue_operations_total",
+			Help: "Number of operations on the chunk_write_queue.",
+		},
+		[]string{"operation"},
+	)
+
+	segmentSize := size
+	if segmentSize > maxChunkQueueSegmentSize {
+		segmentSize = maxChunkQueueSegmentSize
+	}
+
+	q := &chunkWriteQueue{
+		jobs:                  newWriteJobQueue(size, segmentSize),
+		chunkRefMap:           make(map[ChunkDiskMapperRef]chunkenc.Chunk),
+		chunkRefMapLastShrink: time.Now(),
+		writeChunk:            writeChunk,
+
+		adds:      counters.WithLabelValues("add"),
+		gets:      counters.WithLabelValues("get"),
+		completed: counters.WithLabelValues("complete"),
+		shrink:    counters.WithLabelValues("shrink"),
+	}
+
+	if reg != nil {
+		reg.MustRegister(counters)
+	}
+
+	q.start()
+	return q
+}
+
+func (c *chunkWriteQueue) start() {
+	c.workerWg.Add(1)
+	go func() {
+		defer c.workerWg.Done()
+
+		for {
+			job, ok := c.jobs.pop()
+			if !ok {
+				return
+			}
+
+			c.processJob(job)
+		}
+	}()
+
+	c.isRunningMtx.Lock()
+	c.isRunning = true
+	c.isRunningMtx.Unlock()
+}
+
+func (c *chunkWriteQueue) processJob(job chunkWriteJob) {
+	err := c.writeChunk(job.seriesRef, job.mint, job.maxt, job.chk, job.ref, job.isOOO, job.cutFile)
+	if job.callback != nil {
+		job.callback(err)
+	}
+
+	c.chunkRefMapMtx.Lock()
+	defer c.chunkRefMapMtx.Unlock()
+
+	delete(c.chunkRefMap, job.ref)
+
+	c.completed.Inc()
+
+	c.shrinkChunkRefMap()
+}
+
+// shrinkChunkRefMap checks whether the conditions to shrink the chunkRefMap are met,
+// if so chunkRefMap is reinitialized. The chunkRefMapMtx must be held when calling this method.
+//
+// We do this because Go runtime doesn't release internal memory used by map after map has been emptied.
+// To achieve that we create new map instead and throw the old one away.
+func (c *chunkWriteQueue) shrinkChunkRefMap() {
+	if len(c.chunkRefMap) > 0 {
+		// Can't shrink it while there is data in it.
+		return
+	}
+
+	if c.chunkRefMapPeakSize < chunkRefMapShrinkThreshold {
+		// Not shrinking it because it has not grown to the minimum threshold yet.
+		return
+	}
+
+	now := time.Now()
+
+	if now.Sub(c.chunkRefMapLastShrink) < chunkRefMapMinShrinkInterval {
+		// Not shrinking it because the minimum duration between shrink-events has not passed yet.
+		return
+	}
+
+	// Re-initialize the chunk ref map to half of the peak size that it has grown to since the last re-init event.
+	// We are trying to hit the sweet spot in the trade-off between initializing it to a very small size
+	// potentially resulting in many allocations to re-grow it, and initializing it to a large size potentially
+	// resulting in unused allocated memory.
+	c.chunkRefMap = make(map[ChunkDiskMapperRef]chunkenc.Chunk, c.chunkRefMapPeakSize/2)
+
+	c.chunkRefMapPeakSize = 0
+	c.chunkRefMapLastShrink = now
+	c.shrink.Inc()
+}
+
+func (c *chunkWriteQueue) addJob(job chunkWriteJob) (err error) {
+	defer func() {
+		if err == nil {
+			c.adds.Inc()
+		}
+	}()
+
+	c.isRunningMtx.Lock()
+	defer c.isRunningMtx.Unlock()
+
+	if !c.isRunning {
+		return errors.New("queue is not running")
+	}
+
+	c.chunkRefMapMtx.Lock()
+	c.chunkRefMap[job.ref] = job.chk
+
+	// Keep track of the peak usage of c.chunkRefMap.
+	if len(c.chunkRefMap) > c.chunkRefMapPeakSize {
+		c.chunkRefMapPeakSize = len(c.chunkRefMap)
+	}
+	c.chunkRefMapMtx.Unlock()
+
+	if ok := c.jobs.push(job); !ok {
+		c.chunkRefMapMtx.Lock()
+		delete(c.chunkRefMap, job.ref)
+		c.chunkRefMapMtx.Unlock()
+
+		return errors.New("queue is closed")
+	}
+
+	return nil
+}
+
+func (c *chunkWriteQueue) get(ref ChunkDiskMapperRef) chunkenc.Chunk {
+	c.chunkRefMapMtx.RLock()
+	defer c.chunkRefMapMtx.RUnlock()
+
+	chk, ok := c.chunkRefMap[ref]
+	if ok {
+		c.gets.Inc()
+	}
+
+	return chk
+}
+
+func (c *chunkWriteQueue) stop() {
+	c.isRunningMtx.Lock()
+	defer c.isRunningMtx.Unlock()
+
+	if !c.isRunning {
+		return
+	}
+
+	c.isRunning = false
+
+	c.jobs.close()
+
+	c.workerWg.Wait()
+}
+
+func (c *chunkWriteQueue) queueIsEmpty() bool {
+	return c.queueSize() == 0
+}
+
+func (c *chunkWriteQueue) queueIsFull() bool {
+	// When the queue is full and blocked on the writer the chunkRefMap has one more job than the cap of the jobCh
+	// because one job is currently being processed and blocked in the writer.
+	return c.queueSize() == c.jobs.maxSize+1
+}
+
+func (c *chunkWriteQueue) queueSize() int {
+	c.chunkRefMapMtx.Lock()
+	defer c.chunkRefMapMtx.Unlock()
+
+	// Looking at chunkRefMap instead of jobCh because the job is popped from the chan before it has
+	// been fully processed, it remains in the chunkRefMap until the processing is complete.
+	return len(c.chunkRefMap)
+}
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/chunks/chunks.go b/vendor/github.com/prometheus/prometheus/tsdb/chunks/chunks.go
index af781e2597044fd8aa6a6346c3db1a720b75dab2..f505d762bb2f6b11798a68b4fb4913296a0befeb 100644
--- a/vendor/github.com/prometheus/prometheus/tsdb/chunks/chunks.go
+++ b/vendor/github.com/prometheus/prometheus/tsdb/chunks/chunks.go
@@ -15,19 +15,16 @@ package chunks
 
 import (
 	"bufio"
-	"bytes"
 	"encoding/binary"
+	"errors"
 	"fmt"
 	"hash"
 	"hash/crc32"
 	"io"
-	"io/ioutil"
 	"os"
 	"path/filepath"
 	"strconv"
 
-	"github.com/pkg/errors"
-
 	"github.com/prometheus/prometheus/tsdb/chunkenc"
 	tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
 	"github.com/prometheus/prometheus/tsdb/fileutil"
@@ -54,13 +51,84 @@ const (
 	ChunkEncodingSize = 1
 )
 
-// Meta holds information about a chunk of data.
+// ChunkRef is a generic reference for reading chunk data. In prometheus it
+// is either a HeadChunkRef or BlockChunkRef, though other implementations
+// may have their own reference types.
+type ChunkRef uint64
+
+// HeadSeriesRef refers to in-memory series.
+type HeadSeriesRef uint64
+
+// HeadChunkRef packs a HeadSeriesRef and a ChunkID into a global 8 Byte ID.
+// The HeadSeriesRef and ChunkID may not exceed 5 and 3 bytes respectively.
+type HeadChunkRef uint64
+
+func NewHeadChunkRef(hsr HeadSeriesRef, chunkID HeadChunkID) HeadChunkRef {
+	if hsr > (1<<40)-1 {
+		panic("series ID exceeds 5 bytes")
+	}
+	if chunkID > (1<<24)-1 {
+		panic("chunk ID exceeds 3 bytes")
+	}
+	return HeadChunkRef(uint64(hsr<<24) | uint64(chunkID))
+}
+
+func (p HeadChunkRef) Unpack() (HeadSeriesRef, HeadChunkID) {
+	return HeadSeriesRef(p >> 24), HeadChunkID(p<<40) >> 40
+}
+
+// HeadChunkID refers to a specific chunk in a series (memSeries) in the Head.
+// Each memSeries has its own monotonically increasing number to refer to its chunks.
+// If the HeadChunkID value is...
+//   - memSeries.firstChunkID+len(memSeries.mmappedChunks), it's the head chunk.
+//   - less than the above, but >= memSeries.firstID, then it's
+//     memSeries.mmappedChunks[i] where i = HeadChunkID - memSeries.firstID.
+//
+// If memSeries.headChunks is non-nil it points to a *memChunk that holds the current
+// "open" (accepting appends) instance. *memChunk is a linked list and memChunk.next pointer
+// might link to the older *memChunk instance.
+// If there are multiple *memChunk instances linked to each other from memSeries.headChunks
+// they will be m-mapped as soon as possible leaving only "open" *memChunk instance.
+//
+// Example:
+// assume a memSeries.firstChunkID=7 and memSeries.mmappedChunks=[p5,p6,p7,p8,p9].
+//
+//	| HeadChunkID value | refers to ...                                                                          |
+//	|-------------------|----------------------------------------------------------------------------------------|
+//	|               0-6 | chunks that have been compacted to blocks, these won't return data for queries in Head |
+//	|              7-11 | memSeries.mmappedChunks[i] where i is 0 to 4.                                          |
+//	|                12 |                                                         *memChunk{next: nil}
+//	|                13 |                                         *memChunk{next: ^}
+//	|                14 | memSeries.headChunks -> *memChunk{next: ^}
+type HeadChunkID uint64
+
+// BlockChunkRef refers to a chunk within a persisted block.
+// The upper 4 bytes are for the segment index and
+// the lower 4 bytes are for the segment offset where the data starts for this chunk.
+type BlockChunkRef uint64
+
+// NewBlockChunkRef packs the file index and byte offset into a BlockChunkRef.
+func NewBlockChunkRef(fileIndex, fileOffset uint64) BlockChunkRef {
+	return BlockChunkRef(fileIndex<<32 | fileOffset)
+}
+
+func (b BlockChunkRef) Unpack() (int, int) {
+	sgmIndex := int(b >> 32)
+	chkStart := int((b << 32) >> 32)
+	return sgmIndex, chkStart
+}
+
+// Meta holds information about one or more chunks.
+// For examples of when chunks.Meta could refer to multiple chunks, see
+// ChunkReader.ChunkOrIterable().
 type Meta struct {
 	// Ref and Chunk hold either a reference that can be used to retrieve
 	// chunk data or the data itself.
-	// When it is a reference it is the segment offset at which the chunk bytes start.
-	// Generally, only one of them is set.
-	Ref   uint64
+	// If Chunk is nil, call ChunkReader.ChunkOrIterable(Meta.Ref) to get the
+	// chunk and assign it to the Chunk field. If an iterable is returned from
+	// that method, then it may not be possible to set Chunk as the iterable
+	// might form several chunks.
+	Ref   ChunkRef
 	Chunk chunkenc.Chunk
 
 	// Time range the data covers.
@@ -68,10 +136,96 @@ type Meta struct {
 	MinTime, MaxTime int64
 }
 
-// Iterator iterates over the chunk of a time series.
+// ChunkFromSamples requires all samples to have the same type.
+func ChunkFromSamples(s []Sample) (Meta, error) {
+	return ChunkFromSamplesGeneric(SampleSlice(s))
+}
+
+// ChunkFromSamplesGeneric requires all samples to have the same type.
+func ChunkFromSamplesGeneric(s Samples) (Meta, error) {
+	emptyChunk := Meta{Chunk: chunkenc.NewXORChunk()}
+	mint, maxt := int64(0), int64(0)
+
+	if s.Len() > 0 {
+		mint, maxt = s.Get(0).T(), s.Get(s.Len()-1).T()
+	}
+
+	if s.Len() == 0 {
+		return emptyChunk, nil
+	}
+
+	sampleType := s.Get(0).Type()
+	c, err := chunkenc.NewEmptyChunk(sampleType.ChunkEncoding())
+	if err != nil {
+		return Meta{}, err
+	}
+
+	ca, _ := c.Appender()
+	var newChunk chunkenc.Chunk
+
+	for i := 0; i < s.Len(); i++ {
+		switch sampleType {
+		case chunkenc.ValFloat:
+			ca.Append(s.Get(i).T(), s.Get(i).F())
+		case chunkenc.ValHistogram:
+			newChunk, _, ca, err = ca.AppendHistogram(nil, s.Get(i).T(), s.Get(i).H(), false)
+			if err != nil {
+				return emptyChunk, err
+			}
+			if newChunk != nil {
+				return emptyChunk, errors.New("did not expect to start a second chunk")
+			}
+		case chunkenc.ValFloatHistogram:
+			newChunk, _, ca, err = ca.AppendFloatHistogram(nil, s.Get(i).T(), s.Get(i).FH(), false)
+			if err != nil {
+				return emptyChunk, err
+			}
+			if newChunk != nil {
+				return emptyChunk, errors.New("did not expect to start a second chunk")
+			}
+		default:
+			panic(fmt.Sprintf("unknown sample type %s", sampleType.String()))
+		}
+	}
+	return Meta{
+		MinTime: mint,
+		MaxTime: maxt,
+		Chunk:   c,
+	}, nil
+}
+
+// ChunkMetasToSamples converts a slice of chunk meta data to a slice of samples.
+// Used in tests to compare the content of chunks.
+func ChunkMetasToSamples(chunks []Meta) (result []Sample) {
+	if len(chunks) == 0 {
+		return
+	}
+
+	for _, chunk := range chunks {
+		it := chunk.Chunk.Iterator(nil)
+		for vt := it.Next(); vt != chunkenc.ValNone; vt = it.Next() {
+			switch vt {
+			case chunkenc.ValFloat:
+				t, v := it.At()
+				result = append(result, sample{t: t, f: v})
+			case chunkenc.ValHistogram:
+				t, h := it.AtHistogram(nil)
+				result = append(result, sample{t: t, h: h})
+			case chunkenc.ValFloatHistogram:
+				t, fh := it.AtFloatHistogram(nil)
+				result = append(result, sample{t: t, fh: fh})
+			default:
+				panic("unexpected value type")
+			}
+		}
+	}
+	return
+}
+
+// Iterator iterates over the chunks of a single time series.
 type Iterator interface {
 	// At returns the current meta.
-	// It depends on implementation if the chunk is populated or not.
+	// It depends on the implementation whether the chunk is populated or not.
 	At() Meta
 	// Next advances the iterator by one.
 	Next() bool
@@ -97,9 +251,7 @@ func (cm *Meta) OverlapsClosedInterval(mint, maxt int64) bool {
 	return cm.MinTime <= maxt && mint <= cm.MaxTime
 }
 
-var (
-	errInvalidSize = fmt.Errorf("invalid size")
-)
+var errInvalidSize = errors.New("invalid size")
 
 var castagnoliTable *crc32.Table
 
@@ -113,6 +265,17 @@ func newCRC32() hash.Hash32 {
 	return crc32.New(castagnoliTable)
 }
 
+// Check if the CRC of data matches that stored in sum, computed when the chunk was stored.
+func checkCRC32(data, sum []byte) error {
+	got := crc32.Checksum(data, castagnoliTable)
+	// This combination of shifts is the inverse of digest.Sum() in go/src/hash/crc32.
+	want := uint32(sum[0])<<24 + uint32(sum[1])<<16 + uint32(sum[2])<<8 + uint32(sum[3])
+	if got != want {
+		return fmt.Errorf("checksum mismatch expected:%x, actual:%x", want, got)
+	}
+	return nil
+}
+
 // Writer implements the ChunkWriter interface for the standard
 // serialization format.
 type Writer struct {
@@ -148,7 +311,7 @@ func newWriter(dir string, segmentSize int64) (*Writer, error) {
 		segmentSize = DefaultChunkSegmentSize
 	}
 
-	if err := os.MkdirAll(dir, 0777); err != nil {
+	if err := os.MkdirAll(dir, 0o777); err != nil {
 		return nil, err
 	}
 	dirFile, err := fileutil.OpenDir(dir)
@@ -221,32 +384,31 @@ func (w *Writer) cut() error {
 func cutSegmentFile(dirFile *os.File, magicNumber uint32, chunksFormat byte, allocSize int64) (headerSize int, newFile *os.File, seq int, returnErr error) {
 	p, seq, err := nextSequenceFile(dirFile.Name())
 	if err != nil {
-		return 0, nil, 0, errors.Wrap(err, "next sequence file")
+		return 0, nil, 0, fmt.Errorf("next sequence file: %w", err)
 	}
 	ptmp := p + ".tmp"
-	f, err := os.OpenFile(ptmp, os.O_WRONLY|os.O_CREATE, 0666)
+	f, err := os.OpenFile(ptmp, os.O_WRONLY|os.O_CREATE, 0o666)
 	if err != nil {
-		return 0, nil, 0, errors.Wrap(err, "open temp file")
+		return 0, nil, 0, fmt.Errorf("open temp file: %w", err)
 	}
 	defer func() {
 		if returnErr != nil {
-			var merr tsdb_errors.MultiError
-			merr.Add(returnErr)
+			errs := tsdb_errors.NewMulti(returnErr)
 			if f != nil {
-				merr.Add(f.Close())
+				errs.Add(f.Close())
 			}
 			// Calling RemoveAll on a non-existent file does not return error.
-			merr.Add(os.RemoveAll(ptmp))
-			returnErr = merr.Err()
+			errs.Add(os.RemoveAll(ptmp))
+			returnErr = errs.Err()
 		}
 	}()
 	if allocSize > 0 {
 		if err = fileutil.Preallocate(f, allocSize, true); err != nil {
-			return 0, nil, 0, errors.Wrap(err, "preallocate")
+			return 0, nil, 0, fmt.Errorf("preallocate: %w", err)
 		}
 	}
 	if err = dirFile.Sync(); err != nil {
-		return 0, nil, 0, errors.Wrap(err, "sync directory")
+		return 0, nil, 0, fmt.Errorf("sync directory: %w", err)
 	}
 
 	// Write header metadata for new file.
@@ -256,24 +418,24 @@ func cutSegmentFile(dirFile *os.File, magicNumber uint32, chunksFormat byte, all
 
 	n, err := f.Write(metab)
 	if err != nil {
-		return 0, nil, 0, errors.Wrap(err, "write header")
+		return 0, nil, 0, fmt.Errorf("write header: %w", err)
 	}
 	if err := f.Close(); err != nil {
-		return 0, nil, 0, errors.Wrap(err, "close temp file")
+		return 0, nil, 0, fmt.Errorf("close temp file: %w", err)
 	}
 	f = nil
 
 	if err := fileutil.Rename(ptmp, p); err != nil {
-		return 0, nil, 0, errors.Wrap(err, "replace file")
+		return 0, nil, 0, fmt.Errorf("replace file: %w", err)
 	}
 
-	f, err = os.OpenFile(p, os.O_WRONLY, 0666)
+	f, err = os.OpenFile(p, os.O_WRONLY, 0o666)
 	if err != nil {
-		return 0, nil, 0, errors.Wrap(err, "open final file")
+		return 0, nil, 0, fmt.Errorf("open final file: %w", err)
 	}
 	// Skip header for further writes.
 	if _, err := f.Seek(int64(n), 0); err != nil {
-		return 0, nil, 0, errors.Wrap(err, "seek in final file")
+		return 0, nil, 0, fmt.Errorf("seek in final file: %w", err)
 	}
 	return n, f, seq, nil
 }
@@ -308,7 +470,7 @@ func (w *Writer) WriteChunks(chks ...Meta) error {
 		// the batch is too large to fit in the current segment.
 		cutNewBatch := (i != 0) && (batchSize+SegmentHeaderSize > w.segmentSize)
 
-		// When the segment already has some data than
+		// If the segment already has some data then
 		// the first batch size calculation should account for that.
 		if firstBatch && w.n > SegmentHeaderSize {
 			cutNewBatch = batchSize+w.n > w.segmentSize
@@ -356,16 +518,11 @@ func (w *Writer) writeChunks(chks []Meta) error {
 		return nil
 	}
 
-	var seq = uint64(w.seq()) << 32
+	seq := uint64(w.seq())
 	for i := range chks {
 		chk := &chks[i]
 
-		// The reference is set to the segment index and the offset where
-		// the data starts for this chunk.
-		//
-		// The upper 4 bytes are for the segment index and
-		// the lower 4 bytes are for the segment offset where to start reading this chunk.
-		chk.Ref = seq | uint64(w.n)
+		chk.Ref = ChunkRef(NewBlockChunkRef(seq, uint64(w.n)))
 
 		n := binary.PutUvarint(w.buf[:], uint64(len(chk.Chunk.Bytes())))
 
@@ -435,16 +592,16 @@ func newReader(bs []ByteSlice, cs []io.Closer, pool chunkenc.Pool) (*Reader, err
 	cr := Reader{pool: pool, bs: bs, cs: cs}
 	for i, b := range cr.bs {
 		if b.Len() < SegmentHeaderSize {
-			return nil, errors.Wrapf(errInvalidSize, "invalid segment header in segment %d", i)
+			return nil, fmt.Errorf("invalid segment header in segment %d: %w", i, errInvalidSize)
 		}
 		// Verify magic number.
 		if m := binary.BigEndian.Uint32(b.Range(0, MagicChunksSize)); m != MagicChunks {
-			return nil, errors.Errorf("invalid magic number %x", m)
+			return nil, fmt.Errorf("invalid magic number %x", m)
 		}
 
 		// Verify chunk format version.
 		if v := int(b.Range(MagicChunksSize, MagicChunksSize+ChunksFormatVersionSize)[0]); v != chunksFormatV1 {
-			return nil, errors.Errorf("invalid chunk format version %d", v)
+			return nil, fmt.Errorf("invalid chunk format version %d", v)
 		}
 		cr.size += int64(b.Len())
 	}
@@ -463,16 +620,16 @@ func NewDirReader(dir string, pool chunkenc.Pool) (*Reader, error) {
 	}
 
 	var (
-		bs   []ByteSlice
-		cs   []io.Closer
-		merr tsdb_errors.MultiError
+		bs []ByteSlice
+		cs []io.Closer
 	)
 	for _, fn := range files {
 		f, err := fileutil.OpenMmapFile(fn)
 		if err != nil {
-			merr.Add(errors.Wrap(err, "mmap files"))
-			merr.Add(closeAll(cs))
-			return nil, merr
+			return nil, tsdb_errors.NewMulti(
+				fmt.Errorf("mmap files: %w", err),
+				tsdb_errors.CloseAll(cs),
+			).Err()
 		}
 		cs = append(cs, f)
 		bs = append(bs, realByteSlice(f.Bytes()))
@@ -480,15 +637,16 @@ func NewDirReader(dir string, pool chunkenc.Pool) (*Reader, error) {
 
 	reader, err := newReader(bs, cs, pool)
 	if err != nil {
-		merr.Add(err)
-		merr.Add(closeAll(cs))
-		return nil, merr
+		return nil, tsdb_errors.NewMulti(
+			err,
+			tsdb_errors.CloseAll(cs),
+		).Err()
 	}
 	return reader, nil
 }
 
 func (s *Reader) Close() error {
-	return closeAll(s.cs)
+	return tsdb_errors.CloseAll(s.cs)
 }
 
 // Size returns the size of the chunks.
@@ -496,33 +654,25 @@ func (s *Reader) Size() int64 {
 	return s.size
 }
 
-// Chunk returns a chunk from a given reference.
-func (s *Reader) Chunk(ref uint64) (chunkenc.Chunk, error) {
-	var (
-		// Get the upper 4 bytes.
-		// These contain the segment index.
-		sgmIndex = int(ref >> 32)
-		// Get the lower 4 bytes.
-		// These contain the segment offset where the data for this chunk starts.
-		chkStart = int((ref << 32) >> 32)
-		chkCRC32 = newCRC32()
-	)
+// ChunkOrIterable returns a chunk from a given reference.
+func (s *Reader) ChunkOrIterable(meta Meta) (chunkenc.Chunk, chunkenc.Iterable, error) {
+	sgmIndex, chkStart := BlockChunkRef(meta.Ref).Unpack()
 
 	if sgmIndex >= len(s.bs) {
-		return nil, errors.Errorf("segment index %d out of range", sgmIndex)
+		return nil, nil, fmt.Errorf("segment index %d out of range", sgmIndex)
 	}
 
 	sgmBytes := s.bs[sgmIndex]
 
 	if chkStart+MaxChunkLengthFieldSize > sgmBytes.Len() {
-		return nil, errors.Errorf("segment doesn't include enough bytes to read the chunk size data field - required:%v, available:%v", chkStart+MaxChunkLengthFieldSize, sgmBytes.Len())
+		return nil, nil, fmt.Errorf("segment doesn't include enough bytes to read the chunk size data field - required:%v, available:%v", chkStart+MaxChunkLengthFieldSize, sgmBytes.Len())
 	}
 	// With the minimum chunk length this should never cause us reading
 	// over the end of the slice.
 	c := sgmBytes.Range(chkStart, chkStart+MaxChunkLengthFieldSize)
 	chkDataLen, n := binary.Uvarint(c)
 	if n <= 0 {
-		return nil, errors.Errorf("reading chunk length failed with %d", n)
+		return nil, nil, fmt.Errorf("reading chunk length failed with %d", n)
 	}
 
 	chkEncStart := chkStart + n
@@ -531,25 +681,22 @@ func (s *Reader) Chunk(ref uint64) (chunkenc.Chunk, error) {
 	chkDataEnd := chkEnd - crc32.Size
 
 	if chkEnd > sgmBytes.Len() {
-		return nil, errors.Errorf("segment doesn't include enough bytes to read the chunk - required:%v, available:%v", chkEnd, sgmBytes.Len())
+		return nil, nil, fmt.Errorf("segment doesn't include enough bytes to read the chunk - required:%v, available:%v", chkEnd, sgmBytes.Len())
 	}
 
 	sum := sgmBytes.Range(chkDataEnd, chkEnd)
-	if _, err := chkCRC32.Write(sgmBytes.Range(chkEncStart, chkDataEnd)); err != nil {
-		return nil, err
-	}
-
-	if act := chkCRC32.Sum(nil); !bytes.Equal(act, sum) {
-		return nil, errors.Errorf("checksum mismatch expected:%x, actual:%x", sum, act)
+	if err := checkCRC32(sgmBytes.Range(chkEncStart, chkDataEnd), sum); err != nil {
+		return nil, nil, err
 	}
 
 	chkData := sgmBytes.Range(chkDataStart, chkDataEnd)
 	chkEnc := sgmBytes.Range(chkEncStart, chkEncStart+ChunkEncodingSize)[0]
-	return s.pool.Get(chunkenc.Encoding(chkEnc), chkData)
+	chk, err := s.pool.Get(chunkenc.Encoding(chkEnc), chkData)
+	return chk, nil, err
 }
 
 func nextSequenceFile(dir string) (string, int, error) {
-	files, err := ioutil.ReadDir(dir)
+	files, err := os.ReadDir(dir)
 	if err != nil {
 		return "", 0, err
 	}
@@ -562,7 +709,7 @@ func nextSequenceFile(dir string) (string, int, error) {
 		}
 		// It is not necessary that we find the files in number order,
 		// for example with '1000000' and '200000', '1000000' would come first.
-		// Though this is a very very race case, we check anyway for the max id.
+		// Though this is a very very rare case, we check anyway for the max id.
 		if j > i {
 			i = j
 		}
@@ -575,7 +722,7 @@ func segmentFile(baseDir string, index int) string {
 }
 
 func sequenceFiles(dir string) ([]string, error) {
-	files, err := ioutil.ReadDir(dir)
+	files, err := os.ReadDir(dir)
 	if err != nil {
 		return nil, err
 	}
@@ -588,12 +735,3 @@ func sequenceFiles(dir string) ([]string, error) {
 	}
 	return res, nil
 }
-
-func closeAll(cs []io.Closer) error {
-	var merr tsdb_errors.MultiError
-
-	for _, c := range cs {
-		merr.Add(c.Close())
-	}
-	return merr.Err()
-}
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/chunks/head_chunks.go b/vendor/github.com/prometheus/prometheus/tsdb/chunks/head_chunks.go
index 6f67b8318ac2087fa35fad6fcf0f41389c80c152..876b42cb26a3c56c110593efe41b5dd9c4aefa18 100644
--- a/vendor/github.com/prometheus/prometheus/tsdb/chunks/head_chunks.go
+++ b/vendor/github.com/prometheus/prometheus/tsdb/chunks/head_chunks.go
@@ -15,18 +15,19 @@ package chunks
 
 import (
 	"bufio"
-	"bytes"
 	"encoding/binary"
+	"errors"
+	"fmt"
 	"hash"
 	"io"
-	"io/ioutil"
 	"os"
 	"path/filepath"
-	"sort"
+	"slices"
 	"strconv"
 	"sync"
 
-	"github.com/pkg/errors"
+	"github.com/dennwc/varint"
+	"github.com/prometheus/client_golang/prometheus"
 	"go.uber.org/atomic"
 
 	"github.com/prometheus/prometheus/tsdb/chunkenc"
@@ -40,21 +41,18 @@ const (
 	MagicHeadChunks = 0x0130BC91
 
 	headChunksFormatV1 = 1
-	writeBufferSize    = 4 * 1024 * 1024 // 4 MiB.
 )
 
-var (
-	// ErrChunkDiskMapperClosed returned by any method indicates
-	// that the ChunkDiskMapper was closed.
-	ErrChunkDiskMapperClosed = errors.New("ChunkDiskMapper closed")
-)
+// ErrChunkDiskMapperClosed returned by any method indicates
+// that the ChunkDiskMapper was closed.
+var ErrChunkDiskMapperClosed = errors.New("ChunkDiskMapper closed")
 
 const (
 	// MintMaxtSize is the size of the mint/maxt for head chunk file and chunks.
 	MintMaxtSize = 8
 	// SeriesRefSize is the size of series reference on disk.
 	SeriesRefSize = 8
-	// HeadChunkFileHeaderSize is the total size of the header for the head chunk file.
+	// HeadChunkFileHeaderSize is the total size of the header for a head chunk file.
 	HeadChunkFileHeaderSize = SegmentHeaderSize
 	// MaxHeadChunkFileSize is the max size of a head chunk file.
 	MaxHeadChunkFileSize = 128 * 1024 * 1024 // 128 MiB.
@@ -62,10 +60,46 @@ const (
 	CRCSize = 4
 	// MaxHeadChunkMetaSize is the max size of an mmapped chunks minus the chunks data.
 	// Max because the uvarint size can be smaller.
-	MaxHeadChunkMetaSize = SeriesRefSize + 2*MintMaxtSize + ChunksFormatVersionSize + MaxChunkLengthFieldSize + CRCSize
+	MaxHeadChunkMetaSize = SeriesRefSize + 2*MintMaxtSize + ChunkEncodingSize + MaxChunkLengthFieldSize + CRCSize
+	// MinWriteBufferSize is the minimum write buffer size allowed.
+	MinWriteBufferSize = 64 * 1024 // 64KB.
+	// MaxWriteBufferSize is the maximum write buffer size allowed.
+	MaxWriteBufferSize = 8 * 1024 * 1024 // 8 MiB.
+	// DefaultWriteBufferSize is the default write buffer size.
+	DefaultWriteBufferSize = 4 * 1024 * 1024 // 4 MiB.
+	// DefaultWriteQueueSize is the default size of the in-memory queue used before flushing chunks to the disk.
+	// A value of 0 completely disables this feature.
+	DefaultWriteQueueSize = 0
 )
 
-// corruptionErr is an error that's returned when corruption is encountered.
+// ChunkDiskMapperRef represents the location of a head chunk on disk.
+// The upper 4 bytes hold the index of the head chunk file and
+// the lower 4 bytes hold the byte offset in the head chunk file where the chunk starts.
+type ChunkDiskMapperRef uint64
+
+func newChunkDiskMapperRef(seq, offset uint64) ChunkDiskMapperRef {
+	return ChunkDiskMapperRef((seq << 32) | offset)
+}
+
+func (ref ChunkDiskMapperRef) Unpack() (seq, offset int) {
+	seq = int(ref >> 32)
+	offset = int((ref << 32) >> 32)
+	return seq, offset
+}
+
+func (ref ChunkDiskMapperRef) GreaterThanOrEqualTo(r ChunkDiskMapperRef) bool {
+	s1, o1 := ref.Unpack()
+	s2, o2 := r.Unpack()
+	return s1 > s2 || (s1 == s2 && o1 >= o2)
+}
+
+func (ref ChunkDiskMapperRef) GreaterThan(r ChunkDiskMapperRef) bool {
+	s1, o1 := ref.Unpack()
+	s2, o2 := r.Unpack()
+	return s1 > s2 || (s1 == s2 && o1 > o2)
+}
+
+// CorruptionErr is an error that's returned when corruption is encountered.
 type CorruptionErr struct {
 	Dir       string
 	FileIndex int
@@ -73,27 +107,110 @@ type CorruptionErr struct {
 }
 
 func (e *CorruptionErr) Error() string {
-	return errors.Wrapf(e.Err, "corruption in head chunk file %s", segmentFile(e.Dir, e.FileIndex)).Error()
+	return fmt.Errorf("corruption in head chunk file %s: %w", segmentFile(e.Dir, e.FileIndex), e.Err).Error()
+}
+
+func (e *CorruptionErr) Unwrap() error {
+	return e.Err
+}
+
+// chunkPos keeps track of the position in the head chunk files.
+// chunkPos is not thread-safe, a lock must be used to protect it.
+type chunkPos struct {
+	seq     uint64 // Index of chunk file.
+	offset  uint64 // Offset within chunk file.
+	cutFile bool   // When true then the next chunk will be written to a new file.
+}
+
+// getNextChunkRef takes a chunk and returns the chunk reference which will refer to it once it has been written.
+// getNextChunkRef also decides whether a new file should be cut before writing this chunk, and it returns the decision via the second return value.
+// The order of calling getNextChunkRef must be the order in which chunks are written to the disk.
+func (f *chunkPos) getNextChunkRef(chk chunkenc.Chunk) (chkRef ChunkDiskMapperRef, cutFile bool) {
+	chkLen := uint64(len(chk.Bytes()))
+	bytesToWrite := f.bytesToWriteForChunk(chkLen)
+
+	if f.shouldCutNewFile(bytesToWrite) {
+		f.toNewFile()
+		f.cutFile = false
+		cutFile = true
+	}
+
+	chkOffset := f.offset
+	f.offset += bytesToWrite
+
+	return newChunkDiskMapperRef(f.seq, chkOffset), cutFile
 }
 
-// ChunkDiskMapper is for writing the Head block chunks to the disk
-// and access chunks via mmapped file.
+// toNewFile updates the seq/offset position to point to the beginning of a new chunk file.
+func (f *chunkPos) toNewFile() {
+	f.seq++
+	f.offset = SegmentHeaderSize
+}
+
+// cutFileOnNextChunk triggers that the next chunk will be written in to a new file.
+// Not thread safe, a lock must be held when calling this.
+func (f *chunkPos) cutFileOnNextChunk() {
+	f.cutFile = true
+}
+
+// setSeq sets the sequence number of the head chunk file.
+func (f *chunkPos) setSeq(seq uint64) {
+	f.seq = seq
+}
+
+// shouldCutNewFile returns whether a new file should be cut based on the file size.
+// Not thread safe, a lock must be held when calling this.
+func (f *chunkPos) shouldCutNewFile(bytesToWrite uint64) bool {
+	if f.cutFile {
+		return true
+	}
+
+	return f.offset == 0 || // First head chunk file.
+		f.offset+bytesToWrite > MaxHeadChunkFileSize // Exceeds the max head chunk file size.
+}
+
+// bytesToWriteForChunk returns the number of bytes that will need to be written for the given chunk size,
+// including all meta data before and after the chunk data.
+// Head chunk format: https://github.com/prometheus/prometheus/blob/main/tsdb/docs/format/head_chunks.md#chunk
+func (f *chunkPos) bytesToWriteForChunk(chkLen uint64) uint64 {
+	// Headers.
+	bytes := uint64(SeriesRefSize) + 2*MintMaxtSize + ChunkEncodingSize
+
+	// Size of chunk length encoded as uvarint.
+	bytes += uint64(varint.UvarintSize(chkLen))
+
+	// Chunk length.
+	bytes += chkLen
+
+	// crc32.
+	bytes += CRCSize
+
+	return bytes
+}
+
+// ChunkDiskMapper is for writing the Head block chunks to disk
+// and access chunks via mmapped files.
 type ChunkDiskMapper struct {
-	curFileNumBytes atomic.Int64 // Bytes written in current open file.
+	// Writer.
+	dir             *os.File
+	writeBufferSize int
 
-	/// Writer.
-	dir *os.File
+	curFile         *os.File      // File being written to.
+	curFileSequence int           // Index of current open file being appended to. 0 if no file is active.
+	curFileOffset   atomic.Uint64 // Bytes written in current open file.
+	curFileMaxt     int64         // Used for the size retention.
 
-	curFile         *os.File // File being written to.
-	curFileSequence int      // Index of current open file being appended to.
-	curFileMaxt     int64    // Used for the size retention.
+	// The values in evtlPos represent the file position which will eventually be
+	// reached once the content of the write queue has been fully processed.
+	evtlPosMtx sync.Mutex
+	evtlPos    chunkPos
 
 	byteBuf      [MaxHeadChunkMetaSize]byte // Buffer used to write the header of the chunk.
 	chkWriter    *bufio.Writer              // Writer for the current open file.
 	crc32        hash.Hash
 	writePathMtx sync.Mutex
 
-	/// Reader.
+	// Reader.
 	// The int key in the map is the file number on the disk.
 	mmappedChunkFiles map[int]*mmappedChunkFile // Contains the m-mapped files for each chunk file mapped with its index.
 	closers           map[int]io.Closer         // Closers for resources behind the byte slices.
@@ -105,28 +222,35 @@ type ChunkDiskMapper struct {
 	// from which chunks are served till they are flushed and are ready for m-mapping.
 	chunkBuffer *chunkBuffer
 
-	// The total size of bytes in the closed files.
-	// Needed to calculate the total size of all segments on disk.
-	size atomic.Int64
-
-	// If 'true', it indicated that the maxt of all the on-disk files were set
-	// after iterating through all the chunks in those files.
+	// Whether the maxt field is set for all mmapped chunk files tracked within the mmappedChunkFiles map.
+	// This is done after iterating through all the chunks in those files using the IterateAllChunks method.
 	fileMaxtSet bool
 
+	writeQueue *chunkWriteQueue
+
 	closed bool
 }
 
+// mmappedChunkFile provides mmap access to an entire head chunks file that holds many chunks.
 type mmappedChunkFile struct {
 	byteSlice ByteSlice
-	maxt      int64
+	maxt      int64 // Max timestamp among all of this file's chunks.
 }
 
-// NewChunkDiskMapper returns a new writer against the given directory
+// NewChunkDiskMapper returns a new ChunkDiskMapper against the given directory
 // using the default head chunk file duration.
 // NOTE: 'IterateAllChunks' method needs to be called at least once after creating ChunkDiskMapper
-// to set the maxt of all the file.
-func NewChunkDiskMapper(dir string, pool chunkenc.Pool) (*ChunkDiskMapper, error) {
-	if err := os.MkdirAll(dir, 0777); err != nil {
+// to set the maxt of all files.
+func NewChunkDiskMapper(reg prometheus.Registerer, dir string, pool chunkenc.Pool, writeBufferSize, writeQueueSize int) (*ChunkDiskMapper, error) {
+	// Validate write buffer size.
+	if writeBufferSize < MinWriteBufferSize || writeBufferSize > MaxWriteBufferSize {
+		return nil, fmt.Errorf("ChunkDiskMapper write buffer size should be between %d and %d (actual: %d)", MinWriteBufferSize, MaxWriteBufferSize, writeBufferSize)
+	}
+	if writeBufferSize%1024 != 0 {
+		return nil, fmt.Errorf("ChunkDiskMapper write buffer size should be a multiple of 1024 (actual: %d)", writeBufferSize)
+	}
+
+	if err := os.MkdirAll(dir, 0o777); err != nil {
 		return nil, err
 	}
 	dirFile, err := fileutil.OpenDir(dir)
@@ -135,10 +259,15 @@ func NewChunkDiskMapper(dir string, pool chunkenc.Pool) (*ChunkDiskMapper, error
 	}
 
 	m := &ChunkDiskMapper{
-		dir:         dirFile,
-		pool:        pool,
-		crc32:       newCRC32(),
-		chunkBuffer: newChunkBuffer(),
+		dir:             dirFile,
+		pool:            pool,
+		writeBufferSize: writeBufferSize,
+		crc32:           newCRC32(),
+		chunkBuffer:     newChunkBuffer(),
+	}
+
+	if writeQueueSize > 0 {
+		m.writeQueue = newChunkWriteQueue(reg, writeQueueSize, m.writeChunk)
 	}
 
 	if m.pool == nil {
@@ -148,15 +277,33 @@ func NewChunkDiskMapper(dir string, pool chunkenc.Pool) (*ChunkDiskMapper, error
 	return m, m.openMMapFiles()
 }
 
+// Chunk encodings for out-of-order chunks.
+// These encodings must be only used by the Head block for its internal bookkeeping.
+const (
+	OutOfOrderMask = uint8(0b10000000)
+)
+
+func (cdm *ChunkDiskMapper) ApplyOutOfOrderMask(sourceEncoding chunkenc.Encoding) chunkenc.Encoding {
+	enc := uint8(sourceEncoding) | OutOfOrderMask
+	return chunkenc.Encoding(enc)
+}
+
+func (cdm *ChunkDiskMapper) IsOutOfOrderChunk(e chunkenc.Encoding) bool {
+	return (uint8(e) & OutOfOrderMask) != 0
+}
+
+func (cdm *ChunkDiskMapper) RemoveMasks(sourceEncoding chunkenc.Encoding) chunkenc.Encoding {
+	restored := uint8(sourceEncoding) & (^OutOfOrderMask)
+	return chunkenc.Encoding(restored)
+}
+
+// openMMapFiles opens all files within dir for mmapping.
 func (cdm *ChunkDiskMapper) openMMapFiles() (returnErr error) {
 	cdm.mmappedChunkFiles = map[int]*mmappedChunkFile{}
 	cdm.closers = map[int]io.Closer{}
 	defer func() {
 		if returnErr != nil {
-			var merr tsdb_errors.MultiError
-			merr.Add(returnErr)
-			merr.Add(closeAllFromMap(cdm.closers))
-			returnErr = merr.Err()
+			returnErr = tsdb_errors.NewMulti(returnErr, closeAllFromMap(cdm.closers)).Err()
 
 			cdm.mmappedChunkFiles = nil
 			cdm.closers = nil
@@ -177,50 +324,48 @@ func (cdm *ChunkDiskMapper) openMMapFiles() (returnErr error) {
 	for seq, fn := range files {
 		f, err := fileutil.OpenMmapFile(fn)
 		if err != nil {
-			return errors.Wrapf(err, "mmap files, file: %s", fn)
+			return fmt.Errorf("mmap files, file: %s: %w", fn, err)
 		}
 		cdm.closers[seq] = f
 		cdm.mmappedChunkFiles[seq] = &mmappedChunkFile{byteSlice: realByteSlice(f.Bytes())}
 		chkFileIndices = append(chkFileIndices, seq)
 	}
 
-	cdm.size.Store(int64(0))
-
 	// Check for gaps in the files.
-	sort.Ints(chkFileIndices)
+	slices.Sort(chkFileIndices)
 	if len(chkFileIndices) == 0 {
 		return nil
 	}
 	lastSeq := chkFileIndices[0]
 	for _, seq := range chkFileIndices[1:] {
 		if seq != lastSeq+1 {
-			return errors.Errorf("found unsequential head chunk files %s (index: %d) and %s (index: %d)", files[lastSeq], lastSeq, files[seq], seq)
+			return fmt.Errorf("found unsequential head chunk files %s (index: %d) and %s (index: %d)", files[lastSeq], lastSeq, files[seq], seq)
 		}
 		lastSeq = seq
 	}
 
 	for i, b := range cdm.mmappedChunkFiles {
 		if b.byteSlice.Len() < HeadChunkFileHeaderSize {
-			return errors.Wrapf(errInvalidSize, "%s: invalid head chunk file header", files[i])
+			return fmt.Errorf("%s: invalid head chunk file header: %w", files[i], errInvalidSize)
 		}
 		// Verify magic number.
 		if m := binary.BigEndian.Uint32(b.byteSlice.Range(0, MagicChunksSize)); m != MagicHeadChunks {
-			return errors.Errorf("%s: invalid magic number %x", files[i], m)
+			return fmt.Errorf("%s: invalid magic number %x", files[i], m)
 		}
 
 		// Verify chunk format version.
 		if v := int(b.byteSlice.Range(MagicChunksSize, MagicChunksSize+ChunksFormatVersionSize)[0]); v != chunksFormatV1 {
-			return errors.Errorf("%s: invalid chunk format version %d", files[i], v)
+			return fmt.Errorf("%s: invalid chunk format version %d", files[i], v)
 		}
-
-		cdm.size.Add(int64(b.byteSlice.Len()))
 	}
 
+	cdm.evtlPos.setSeq(uint64(lastSeq))
+
 	return nil
 }
 
 func listChunkFiles(dir string) (map[int]string, error) {
-	files, err := ioutil.ReadDir(dir)
+	files, err := os.ReadDir(dir)
 	if err != nil {
 		return nil, err
 	}
@@ -236,8 +381,35 @@ func listChunkFiles(dir string) (map[int]string, error) {
 	return res, nil
 }
 
+// HardLinkChunkFiles creates hardlinks for chunk files from src to dst.
+// It does nothing if src doesn't exist and ensures dst is created if not.
+func HardLinkChunkFiles(src, dst string) error {
+	_, err := os.Stat(src)
+	if os.IsNotExist(err) {
+		return nil
+	}
+	if err != nil {
+		return fmt.Errorf("check source chunks dir: %w", err)
+	}
+	if err := os.MkdirAll(dst, 0o777); err != nil {
+		return fmt.Errorf("set up destination chunks dir: %w", err)
+	}
+	files, err := listChunkFiles(src)
+	if err != nil {
+		return fmt.Errorf("list chunks: %w", err)
+	}
+	for _, filePath := range files {
+		_, fileName := filepath.Split(filePath)
+		err := os.Link(filepath.Join(src, fileName), filepath.Join(dst, fileName))
+		if err != nil {
+			return fmt.Errorf("hardlink a chunk: %w", err)
+		}
+	}
+	return nil
+}
+
 // repairLastChunkFile deletes the last file if it's empty.
-// Because we don't fsync when creating these file, we could end
+// Because we don't fsync when creating these files, we could end
 // up with an empty file at the end during an abrupt shutdown.
 func repairLastChunkFile(files map[int]string) (_ map[int]string, returnErr error) {
 	lastFile := -1
@@ -251,14 +423,28 @@ func repairLastChunkFile(files map[int]string) (_ map[int]string, returnErr erro
 		return files, nil
 	}
 
-	info, err := os.Stat(files[lastFile])
+	f, err := os.Open(files[lastFile])
 	if err != nil {
-		return files, errors.Wrap(err, "file stat during last head chunk file repair")
+		return files, fmt.Errorf("open file during last head chunk file repair: %w", err)
+	}
+
+	buf := make([]byte, MagicChunksSize)
+	size, err := f.Read(buf)
+	if err != nil && !errors.Is(err, io.EOF) {
+		return files, fmt.Errorf("failed to read magic number during last head chunk file repair: %w", err)
 	}
-	if info.Size() == 0 {
+	if err := f.Close(); err != nil {
+		return files, fmt.Errorf("close file during last head chunk file repair: %w", err)
+	}
+
+	// We either don't have enough bytes for the magic number or the magic number is 0.
+	// NOTE: we should not check for wrong magic number here because that error
+	// needs to be sent up the function called (already done elsewhere)
+	// for proper repair mechanism to happen in the Head.
+	if size < MagicChunksSize || binary.BigEndian.Uint32(buf) == 0 {
 		// Corrupt file, hence remove it.
 		if err := os.RemoveAll(files[lastFile]); err != nil {
-			return files, errors.Wrap(err, "delete corrupted, empty head chunk file during last file repair")
+			return files, fmt.Errorf("delete corrupted, empty head chunk file during last file repair: %w", err)
 		}
 		delete(files, lastFile)
 	}
@@ -266,119 +452,172 @@ func repairLastChunkFile(files map[int]string) (_ map[int]string, returnErr erro
 	return files, nil
 }
 
-// WriteChunk writes the chunk to the disk.
+// WriteChunk writes the chunk to disk.
 // The returned chunk ref is the reference from where the chunk encoding starts for the chunk.
-func (cdm *ChunkDiskMapper) WriteChunk(seriesRef uint64, mint, maxt int64, chk chunkenc.Chunk) (chkRef uint64, err error) {
+func (cdm *ChunkDiskMapper) WriteChunk(seriesRef HeadSeriesRef, mint, maxt int64, chk chunkenc.Chunk, isOOO bool, callback func(err error)) (chkRef ChunkDiskMapperRef) {
+	// cdm.evtlPosMtx must be held to serialize the calls to cdm.evtlPos.getNextChunkRef() and the writing of the chunk (either with or without queue).
+	cdm.evtlPosMtx.Lock()
+	defer cdm.evtlPosMtx.Unlock()
+	ref, cutFile := cdm.evtlPos.getNextChunkRef(chk)
+
+	if cdm.writeQueue != nil {
+		return cdm.writeChunkViaQueue(ref, isOOO, cutFile, seriesRef, mint, maxt, chk, callback)
+	}
+
+	err := cdm.writeChunk(seriesRef, mint, maxt, chk, ref, isOOO, cutFile)
+	if callback != nil {
+		callback(err)
+	}
+
+	return ref
+}
+
+func (cdm *ChunkDiskMapper) writeChunkViaQueue(ref ChunkDiskMapperRef, isOOO, cutFile bool, seriesRef HeadSeriesRef, mint, maxt int64, chk chunkenc.Chunk, callback func(err error)) (chkRef ChunkDiskMapperRef) {
+	var err error
+	if callback != nil {
+		defer func() {
+			if err != nil {
+				callback(err)
+			}
+		}()
+	}
+
+	err = cdm.writeQueue.addJob(chunkWriteJob{
+		cutFile:   cutFile,
+		seriesRef: seriesRef,
+		mint:      mint,
+		maxt:      maxt,
+		chk:       chk,
+		ref:       ref,
+		isOOO:     isOOO,
+		callback:  callback,
+	})
+
+	return ref
+}
+
+func (cdm *ChunkDiskMapper) writeChunk(seriesRef HeadSeriesRef, mint, maxt int64, chk chunkenc.Chunk, ref ChunkDiskMapperRef, isOOO, cutFile bool) (err error) {
 	cdm.writePathMtx.Lock()
 	defer cdm.writePathMtx.Unlock()
 
 	if cdm.closed {
-		return 0, ErrChunkDiskMapperClosed
+		return ErrChunkDiskMapperClosed
 	}
 
-	if cdm.shouldCutNewFile(len(chk.Bytes())) {
-		if err := cdm.cut(); err != nil {
-			return 0, err
+	if cutFile {
+		err := cdm.cutAndExpectRef(ref)
+		if err != nil {
+			return err
 		}
 	}
 
 	// if len(chk.Bytes())+MaxHeadChunkMetaSize >= writeBufferSize, it means that chunk >= the buffer size;
 	// so no need to flush here, as we have to flush at the end (to not keep partial chunks in buffer).
-	if len(chk.Bytes())+MaxHeadChunkMetaSize < writeBufferSize && cdm.chkWriter.Available() < MaxHeadChunkMetaSize+len(chk.Bytes()) {
+	if len(chk.Bytes())+MaxHeadChunkMetaSize < cdm.writeBufferSize && cdm.chkWriter.Available() < MaxHeadChunkMetaSize+len(chk.Bytes()) {
 		if err := cdm.flushBuffer(); err != nil {
-			return 0, err
+			return err
 		}
 	}
 
 	cdm.crc32.Reset()
 	bytesWritten := 0
 
-	// The upper 4 bytes are for the head chunk file index and
-	// the lower 4 bytes are for the head chunk file offset where to start reading this chunk.
-	chkRef = chunkRef(uint64(cdm.curFileSequence), uint64(cdm.curFileSize()))
-
-	binary.BigEndian.PutUint64(cdm.byteBuf[bytesWritten:], seriesRef)
+	binary.BigEndian.PutUint64(cdm.byteBuf[bytesWritten:], uint64(seriesRef))
 	bytesWritten += SeriesRefSize
 	binary.BigEndian.PutUint64(cdm.byteBuf[bytesWritten:], uint64(mint))
 	bytesWritten += MintMaxtSize
 	binary.BigEndian.PutUint64(cdm.byteBuf[bytesWritten:], uint64(maxt))
 	bytesWritten += MintMaxtSize
-	cdm.byteBuf[bytesWritten] = byte(chk.Encoding())
+	enc := chk.Encoding()
+	if isOOO {
+		enc = cdm.ApplyOutOfOrderMask(enc)
+	}
+	cdm.byteBuf[bytesWritten] = byte(enc)
 	bytesWritten += ChunkEncodingSize
 	n := binary.PutUvarint(cdm.byteBuf[bytesWritten:], uint64(len(chk.Bytes())))
 	bytesWritten += n
 
 	if err := cdm.writeAndAppendToCRC32(cdm.byteBuf[:bytesWritten]); err != nil {
-		return 0, err
+		return err
 	}
 	if err := cdm.writeAndAppendToCRC32(chk.Bytes()); err != nil {
-		return 0, err
+		return err
 	}
 	if err := cdm.writeCRC32(); err != nil {
-		return 0, err
+		return err
 	}
 
 	if maxt > cdm.curFileMaxt {
 		cdm.curFileMaxt = maxt
 	}
 
-	cdm.chunkBuffer.put(chkRef, chk)
+	cdm.chunkBuffer.put(ref, chk)
 
-	if len(chk.Bytes())+MaxHeadChunkMetaSize >= writeBufferSize {
+	if len(chk.Bytes())+MaxHeadChunkMetaSize >= cdm.writeBufferSize {
 		// The chunk was bigger than the buffer itself.
 		// Flushing to not keep partial chunks in buffer.
 		if err := cdm.flushBuffer(); err != nil {
-			return 0, err
+			return err
 		}
 	}
 
-	return chkRef, nil
+	return nil
 }
 
-func chunkRef(seq, offset uint64) (chunkRef uint64) {
-	return (seq << 32) | offset
+// CutNewFile makes that a new file will be created the next time a chunk is written.
+func (cdm *ChunkDiskMapper) CutNewFile() {
+	cdm.evtlPosMtx.Lock()
+	defer cdm.evtlPosMtx.Unlock()
+
+	cdm.evtlPos.cutFileOnNextChunk()
 }
 
-// shouldCutNewFile decides the cutting of a new file based on time and size retention.
-// Size retention: because depending on the system architecture, there is a limit on how big of a file we can m-map.
-// Time retention: so that we can delete old chunks with some time guarantee in low load environments.
-func (cdm *ChunkDiskMapper) shouldCutNewFile(chunkSize int) bool {
-	return cdm.curFileSize() == 0 || // First head chunk file.
-		cdm.curFileSize()+int64(chunkSize+MaxHeadChunkMetaSize) > MaxHeadChunkFileSize // Exceeds the max head chunk file size.
+func (cdm *ChunkDiskMapper) IsQueueEmpty() bool {
+	if cdm.writeQueue == nil {
+		return true
+	}
+
+	return cdm.writeQueue.queueIsEmpty()
 }
 
-// CutNewFile creates a new m-mapped file.
-func (cdm *ChunkDiskMapper) CutNewFile() (returnErr error) {
-	cdm.writePathMtx.Lock()
-	defer cdm.writePathMtx.Unlock()
+// cutAndExpectRef creates a new m-mapped file.
+// The write lock should be held before calling this.
+// It ensures that the position in the new file matches the given chunk reference, if not then it errors.
+func (cdm *ChunkDiskMapper) cutAndExpectRef(chkRef ChunkDiskMapperRef) (err error) {
+	seq, offset, err := cdm.cut()
+	if err != nil {
+		return err
+	}
+
+	if expSeq, expOffset := chkRef.Unpack(); seq != expSeq || offset != expOffset {
+		return fmt.Errorf("expected newly cut file to have sequence:offset %d:%d, got %d:%d", expSeq, expOffset, seq, offset)
+	}
 
-	return cdm.cut()
+	return nil
 }
 
 // cut creates a new m-mapped file. The write lock should be held before calling this.
-func (cdm *ChunkDiskMapper) cut() (returnErr error) {
+// It returns the file sequence and the offset in that file to start writing chunks.
+func (cdm *ChunkDiskMapper) cut() (seq, offset int, returnErr error) {
 	// Sync current tail to disk and close.
 	if err := cdm.finalizeCurFile(); err != nil {
-		return err
+		return 0, 0, err
 	}
 
-	n, newFile, seq, err := cutSegmentFile(cdm.dir, MagicHeadChunks, headChunksFormatV1, HeadChunkFilePreallocationSize)
+	offset, newFile, seq, err := cutSegmentFile(cdm.dir, MagicHeadChunks, headChunksFormatV1, HeadChunkFilePreallocationSize)
 	if err != nil {
-		return err
+		return 0, 0, err
 	}
+
 	defer func() {
 		// The file should not be closed if there is no error,
 		// its kept open in the ChunkDiskMapper.
 		if returnErr != nil {
-			var merr tsdb_errors.MultiError
-			merr.Add(returnErr)
-			merr.Add(newFile.Close())
-			returnErr = merr.Err()
+			returnErr = tsdb_errors.NewMulti(returnErr, newFile.Close()).Err()
 		}
 	}()
 
-	cdm.size.Add(cdm.curFileSize())
-	cdm.curFileNumBytes.Store(int64(n))
+	cdm.curFileOffset.Store(uint64(offset))
 
 	if cdm.curFile != nil {
 		cdm.readPathMtx.Lock()
@@ -386,9 +625,9 @@ func (cdm *ChunkDiskMapper) cut() (returnErr error) {
 		cdm.readPathMtx.Unlock()
 	}
 
-	mmapFile, err := fileutil.OpenMmapFileWithSize(newFile.Name(), int(MaxHeadChunkFileSize))
+	mmapFile, err := fileutil.OpenMmapFileWithSize(newFile.Name(), MaxHeadChunkFileSize)
 	if err != nil {
-		return err
+		return 0, 0, err
 	}
 
 	cdm.readPathMtx.Lock()
@@ -397,7 +636,7 @@ func (cdm *ChunkDiskMapper) cut() (returnErr error) {
 	if cdm.chkWriter != nil {
 		cdm.chkWriter.Reset(newFile)
 	} else {
-		cdm.chkWriter = bufio.NewWriterSize(newFile, writeBufferSize)
+		cdm.chkWriter = bufio.NewWriterSize(newFile, cdm.writeBufferSize)
 	}
 
 	cdm.closers[cdm.curFileSequence] = mmapFile
@@ -406,7 +645,7 @@ func (cdm *ChunkDiskMapper) cut() (returnErr error) {
 
 	cdm.curFileMaxt = 0
 
-	return nil
+	return seq, offset, nil
 }
 
 // finalizeCurFile writes all pending data to the current tail file,
@@ -429,7 +668,7 @@ func (cdm *ChunkDiskMapper) finalizeCurFile() error {
 
 func (cdm *ChunkDiskMapper) write(b []byte) error {
 	n, err := cdm.chkWriter.Write(b)
-	cdm.curFileNumBytes.Add(int64(n))
+	cdm.curFileOffset.Add(uint64(n))
 	return err
 }
 
@@ -456,28 +695,28 @@ func (cdm *ChunkDiskMapper) flushBuffer() error {
 }
 
 // Chunk returns a chunk from a given reference.
-func (cdm *ChunkDiskMapper) Chunk(ref uint64) (chunkenc.Chunk, error) {
+func (cdm *ChunkDiskMapper) Chunk(ref ChunkDiskMapperRef) (chunkenc.Chunk, error) {
 	cdm.readPathMtx.RLock()
-	// We hold this read lock for the entire duration because if the Close()
+	// We hold this read lock for the entire duration because if Close()
 	// is called, the data in the byte slice will get corrupted as the mmapped
 	// file will be closed.
 	defer cdm.readPathMtx.RUnlock()
 
-	var (
-		// Get the upper 4 bytes.
-		// These contain the head chunk file index.
-		sgmIndex = int(ref >> 32)
-		// Get the lower 4 bytes.
-		// These contain the head chunk file offset where the chunk starts.
-		// We skip the series ref and the mint/maxt beforehand.
-		chkStart = int((ref<<32)>>32) + SeriesRefSize + (2 * MintMaxtSize)
-		chkCRC32 = newCRC32()
-	)
-
 	if cdm.closed {
 		return nil, ErrChunkDiskMapperClosed
 	}
 
+	if cdm.writeQueue != nil {
+		chunk := cdm.writeQueue.get(ref)
+		if chunk != nil {
+			return chunk, nil
+		}
+	}
+
+	sgmIndex, chkStart := ref.Unpack()
+	// We skip the series ref and the mint/maxt beforehand.
+	chkStart += SeriesRefSize + (2 * MintMaxtSize)
+
 	// If it is the current open file, then the chunks can be in the buffer too.
 	if sgmIndex == cdm.curFileSequence {
 		chunk := cdm.chunkBuffer.get(ref)
@@ -492,13 +731,13 @@ func (cdm *ChunkDiskMapper) Chunk(ref uint64) (chunkenc.Chunk, error) {
 			return nil, &CorruptionErr{
 				Dir:       cdm.dir.Name(),
 				FileIndex: -1,
-				Err:       errors.Errorf("head chunk file index %d more than current open file", sgmIndex),
+				Err:       fmt.Errorf("head chunk file index %d more than current open file", sgmIndex),
 			}
 		}
 		return nil, &CorruptionErr{
 			Dir:       cdm.dir.Name(),
 			FileIndex: sgmIndex,
-			Err:       errors.New("head chunk file index %d does not exist on disk"),
+			Err:       fmt.Errorf("head chunk file index %d does not exist on disk", sgmIndex),
 		}
 	}
 
@@ -506,13 +745,15 @@ func (cdm *ChunkDiskMapper) Chunk(ref uint64) (chunkenc.Chunk, error) {
 		return nil, &CorruptionErr{
 			Dir:       cdm.dir.Name(),
 			FileIndex: sgmIndex,
-			Err:       errors.Errorf("head chunk file doesn't include enough bytes to read the chunk size data field - required:%v, available:%v", chkStart+MaxChunkLengthFieldSize, mmapFile.byteSlice.Len()),
+			Err:       fmt.Errorf("head chunk file doesn't include enough bytes to read the chunk size data field - required:%v, available:%v", chkStart+MaxChunkLengthFieldSize, mmapFile.byteSlice.Len()),
 		}
 	}
 
 	// Encoding.
 	chkEnc := mmapFile.byteSlice.Range(chkStart, chkStart+ChunkEncodingSize)[0]
-
+	sourceChkEnc := chunkenc.Encoding(chkEnc)
+	// Extract the encoding from the byte. ChunkDiskMapper uses only the last 7 bits for the encoding.
+	chkEnc = byte(cdm.RemoveMasks(sourceChkEnc))
 	// Data length.
 	// With the minimum chunk length this should never cause us reading
 	// over the end of the slice.
@@ -523,7 +764,7 @@ func (cdm *ChunkDiskMapper) Chunk(ref uint64) (chunkenc.Chunk, error) {
 		return nil, &CorruptionErr{
 			Dir:       cdm.dir.Name(),
 			FileIndex: sgmIndex,
-			Err:       errors.Errorf("reading chunk length failed with %d", n),
+			Err:       fmt.Errorf("reading chunk length failed with %d", n),
 		}
 	}
 
@@ -533,30 +774,30 @@ func (cdm *ChunkDiskMapper) Chunk(ref uint64) (chunkenc.Chunk, error) {
 		return nil, &CorruptionErr{
 			Dir:       cdm.dir.Name(),
 			FileIndex: sgmIndex,
-			Err:       errors.Errorf("head chunk file doesn't include enough bytes to read the chunk - required:%v, available:%v", chkDataEnd, mmapFile.byteSlice.Len()),
+			Err:       fmt.Errorf("head chunk file doesn't include enough bytes to read the chunk - required:%v, available:%v", chkDataEnd, mmapFile.byteSlice.Len()),
 		}
 	}
 
 	// Check the CRC.
 	sum := mmapFile.byteSlice.Range(chkDataEnd, chkDataEnd+CRCSize)
-	if _, err := chkCRC32.Write(mmapFile.byteSlice.Range(chkStart-(SeriesRefSize+2*MintMaxtSize), chkDataEnd)); err != nil {
+	if err := checkCRC32(mmapFile.byteSlice.Range(chkStart-(SeriesRefSize+2*MintMaxtSize), chkDataEnd), sum); err != nil {
 		return nil, &CorruptionErr{
 			Dir:       cdm.dir.Name(),
 			FileIndex: sgmIndex,
 			Err:       err,
 		}
 	}
-	if act := chkCRC32.Sum(nil); !bytes.Equal(act, sum) {
-		return nil, &CorruptionErr{
-			Dir:       cdm.dir.Name(),
-			FileIndex: sgmIndex,
-			Err:       errors.Errorf("checksum mismatch expected:%x, actual:%x", sum, act),
-		}
-	}
 
 	// The chunk data itself.
 	chkData := mmapFile.byteSlice.Range(chkDataEnd-int(chkDataLen), chkDataEnd)
-	chk, err := cdm.pool.Get(chunkenc.Encoding(chkEnc), chkData)
+
+	// Make a copy of the chunk data to prevent a panic occurring because the returned
+	// chunk data slice references an mmap-ed file which could be closed after the
+	// function returns but while the chunk is still in use.
+	chkDataCopy := make([]byte, len(chkData))
+	copy(chkDataCopy, chkData)
+
+	chk, err := cdm.pool.Get(chunkenc.Encoding(chkEnc), chkDataCopy)
 	if err != nil {
 		return nil, &CorruptionErr{
 			Dir:       cdm.dir.Name(),
@@ -567,11 +808,11 @@ func (cdm *ChunkDiskMapper) Chunk(ref uint64) (chunkenc.Chunk, error) {
 	return chk, nil
 }
 
-// IterateAllChunks iterates on all the chunks in its byte slices in the order of the head chunk file sequence
-// and runs the provided function on each chunk. It returns on the first error encountered.
+// IterateAllChunks iterates all mmappedChunkFiles (in order of head chunk file name/number) and all the chunks within it
+// and runs the provided function with information about each chunk. It returns on the first error encountered.
 // NOTE: This method needs to be called at least once after creating ChunkDiskMapper
-// to set the maxt of all the file.
-func (cdm *ChunkDiskMapper) IterateAllChunks(f func(seriesRef, chunkRef uint64, mint, maxt int64, numSamples uint16) error) (err error) {
+// to set the maxt of all files.
+func (cdm *ChunkDiskMapper) IterateAllChunks(f func(seriesRef HeadSeriesRef, chunkRef ChunkDiskMapperRef, mint, maxt int64, numSamples uint16, encoding chunkenc.Encoding, isOOO bool) error) (err error) {
 	cdm.writePathMtx.Lock()
 	defer cdm.writePathMtx.Unlock()
 
@@ -579,14 +820,12 @@ func (cdm *ChunkDiskMapper) IterateAllChunks(f func(seriesRef, chunkRef uint64,
 		cdm.fileMaxtSet = true
 	}()
 
-	chkCRC32 := newCRC32()
-
 	// Iterate files in ascending order.
 	segIDs := make([]int, 0, len(cdm.mmappedChunkFiles))
 	for seg := range cdm.mmappedChunkFiles {
 		segIDs = append(segIDs, seg)
 	}
-	sort.Ints(segIDs)
+	slices.Sort(segIDs)
 	for _, segID := range segIDs {
 		mmapFile := cdm.mmappedChunkFiles[segID]
 		fileEnd := mmapFile.byteSlice.Len()
@@ -605,19 +844,20 @@ func (cdm *ChunkDiskMapper) IterateAllChunks(f func(seriesRef, chunkRef uint64,
 					}
 				}
 				if allZeros {
+					// End of segment chunk file content.
 					break
 				}
 				return &CorruptionErr{
 					Dir:       cdm.dir.Name(),
 					FileIndex: segID,
-					Err:       errors.Errorf("head chunk file doesn't include enough bytes to read the chunk header - required:%v, available:%v, file:%d", idx+MaxHeadChunkMetaSize, fileEnd, segID),
+					Err: fmt.Errorf("head chunk file has some unread data, but doesn't include enough bytes to read the chunk header"+
+						" - required:%v, available:%v, file:%d", idx+MaxHeadChunkMetaSize, fileEnd, segID),
 				}
 			}
-			chkCRC32.Reset()
-			chunkRef := chunkRef(uint64(segID), uint64(idx))
+			chunkRef := newChunkDiskMapperRef(uint64(segID), uint64(idx))
 
 			startIdx := idx
-			seriesRef := binary.BigEndian.Uint64(mmapFile.byteSlice.Range(idx, idx+SeriesRefSize))
+			seriesRef := HeadSeriesRef(binary.BigEndian.Uint64(mmapFile.byteSlice.Range(idx, idx+SeriesRefSize)))
 			idx += SeriesRefSize
 			mint := int64(binary.BigEndian.Uint64(mmapFile.byteSlice.Range(idx, idx+MintMaxtSize)))
 			idx += MintMaxtSize
@@ -632,7 +872,8 @@ func (cdm *ChunkDiskMapper) IterateAllChunks(f func(seriesRef, chunkRef uint64,
 				break
 			}
 
-			idx += ChunkEncodingSize // Skip encoding.
+			chkEnc := chunkenc.Encoding(mmapFile.byteSlice.Range(idx, idx+ChunkEncodingSize)[0])
+			idx += ChunkEncodingSize
 			dataLen, n := binary.Uvarint(mmapFile.byteSlice.Range(idx, idx+MaxChunkLengthFieldSize))
 			idx += n
 
@@ -645,20 +886,17 @@ func (cdm *ChunkDiskMapper) IterateAllChunks(f func(seriesRef, chunkRef uint64,
 				return &CorruptionErr{
 					Dir:       cdm.dir.Name(),
 					FileIndex: segID,
-					Err:       errors.Errorf("head chunk file doesn't include enough bytes to read the chunk header - required:%v, available:%v, file:%d", idx+CRCSize, fileEnd, segID),
+					Err:       fmt.Errorf("head chunk file doesn't include enough bytes to read the chunk header - required:%v, available:%v, file:%d", idx+CRCSize, fileEnd, segID),
 				}
 			}
 
 			// Check CRC.
 			sum := mmapFile.byteSlice.Range(idx, idx+CRCSize)
-			if _, err := chkCRC32.Write(mmapFile.byteSlice.Range(startIdx, idx)); err != nil {
-				return err
-			}
-			if act := chkCRC32.Sum(nil); !bytes.Equal(act, sum) {
+			if err := checkCRC32(mmapFile.byteSlice.Range(startIdx, idx), sum); err != nil {
 				return &CorruptionErr{
 					Dir:       cdm.dir.Name(),
 					FileIndex: segID,
-					Err:       errors.Errorf("checksum mismatch expected:%x, actual:%x", sum, act),
+					Err:       err,
 				}
 			}
 			idx += CRCSize
@@ -666,9 +904,12 @@ func (cdm *ChunkDiskMapper) IterateAllChunks(f func(seriesRef, chunkRef uint64,
 			if maxt > mmapFile.maxt {
 				mmapFile.maxt = maxt
 			}
-
-			if err := f(seriesRef, chunkRef, mint, maxt, numSamples); err != nil {
-				if cerr, ok := err.(*CorruptionErr); ok {
+			isOOO := cdm.IsOutOfOrderChunk(chkEnc)
+			// Extract the encoding from the byte. ChunkDiskMapper uses only the last 7 bits for the encoding.
+			chkEnc = cdm.RemoveMasks(chkEnc)
+			if err := f(seriesRef, chunkRef, mint, maxt, numSamples, chkEnc, isOOO); err != nil {
+				var cerr *CorruptionErr
+				if errors.As(err, &cerr) {
 					cerr.Dir = cdm.dir.Name()
 					cerr.FileIndex = segID
 					return cerr
@@ -682,7 +923,7 @@ func (cdm *ChunkDiskMapper) IterateAllChunks(f func(seriesRef, chunkRef uint64,
 			return &CorruptionErr{
 				Dir:       cdm.dir.Name(),
 				FileIndex: segID,
-				Err:       errors.Errorf("head chunk file doesn't include enough bytes to read the last chunk data - required:%v, available:%v, file:%d", idx, fileEnd, segID),
+				Err:       fmt.Errorf("head chunk file doesn't include enough bytes to read the last chunk data - required:%v, available:%v, file:%d", idx, fileEnd, segID),
 			}
 		}
 	}
@@ -690,12 +931,8 @@ func (cdm *ChunkDiskMapper) IterateAllChunks(f func(seriesRef, chunkRef uint64,
 	return nil
 }
 
-// Truncate deletes the head chunk files which are strictly below the mint.
-// mint should be in milliseconds.
-func (cdm *ChunkDiskMapper) Truncate(mint int64) error {
-	if !cdm.fileMaxtSet {
-		return errors.New("maxt of the files are not set")
-	}
+// Truncate deletes the head chunk files with numbers less than the given fileNo.
+func (cdm *ChunkDiskMapper) Truncate(fileNo uint32) error {
 	cdm.readPathMtx.RLock()
 
 	// Sort the file indices, else if files deletion fails in between,
@@ -704,85 +941,133 @@ func (cdm *ChunkDiskMapper) Truncate(mint int64) error {
 	for seq := range cdm.mmappedChunkFiles {
 		chkFileIndices = append(chkFileIndices, seq)
 	}
-	sort.Ints(chkFileIndices)
+	slices.Sort(chkFileIndices)
 
 	var removedFiles []int
 	for _, seq := range chkFileIndices {
-		if seq == cdm.curFileSequence || cdm.mmappedChunkFiles[seq].maxt >= mint {
+		if seq == cdm.curFileSequence || uint32(seq) >= fileNo {
 			break
 		}
-		if cdm.mmappedChunkFiles[seq].maxt < mint {
-			removedFiles = append(removedFiles, seq)
-		}
+		removedFiles = append(removedFiles, seq)
 	}
 	cdm.readPathMtx.RUnlock()
 
-	var merr tsdb_errors.MultiError
+	errs := tsdb_errors.NewMulti()
 	// Cut a new file only if the current file has some chunks.
 	if cdm.curFileSize() > HeadChunkFileHeaderSize {
-		merr.Add(cdm.CutNewFile())
+		// There is a known race condition here because between the check of curFileSize() and the call to CutNewFile()
+		// a new file could already be cut, this is acceptable because it will simply result in an empty file which
+		// won't do any harm.
+		cdm.CutNewFile()
 	}
-	merr.Add(cdm.deleteFiles(removedFiles))
-	return merr.Err()
+	pendingDeletes, err := cdm.deleteFiles(removedFiles)
+	errs.Add(err)
+
+	if len(chkFileIndices) == len(removedFiles) {
+		// All files were deleted. Reset the current sequence.
+		cdm.evtlPosMtx.Lock()
+
+		// We can safely reset the sequence only if the write queue is empty. If it's not empty,
+		// then there may be a job in the queue that will create a new segment file with an ID
+		// generated before the sequence reset.
+		//
+		// The queueIsEmpty() function must be called while holding the cdm.evtlPosMtx to avoid
+		// a race condition with WriteChunk().
+		if cdm.writeQueue == nil || cdm.writeQueue.queueIsEmpty() {
+			if err == nil {
+				cdm.evtlPos.setSeq(0)
+			} else {
+				// In case of error, set it to the last file number on the disk that was not deleted.
+				cdm.evtlPos.setSeq(uint64(pendingDeletes[len(pendingDeletes)-1]))
+			}
+		}
+
+		cdm.evtlPosMtx.Unlock()
+	}
+
+	return errs.Err()
 }
 
-func (cdm *ChunkDiskMapper) deleteFiles(removedFiles []int) error {
+// deleteFiles deletes the given file sequences in order of the sequence.
+// In case of an error, it returns the sorted file sequences that were not deleted from the _disk_.
+func (cdm *ChunkDiskMapper) deleteFiles(removedFiles []int) ([]int, error) {
+	slices.Sort(removedFiles) // To delete them in order.
 	cdm.readPathMtx.Lock()
 	for _, seq := range removedFiles {
 		if err := cdm.closers[seq].Close(); err != nil {
 			cdm.readPathMtx.Unlock()
-			return err
+			return removedFiles, err
 		}
-		cdm.size.Sub(int64(cdm.mmappedChunkFiles[seq].byteSlice.Len()))
 		delete(cdm.mmappedChunkFiles, seq)
 		delete(cdm.closers, seq)
 	}
 	cdm.readPathMtx.Unlock()
 
 	// We actually delete the files separately to not block the readPathMtx for long.
-	for _, seq := range removedFiles {
+	for i, seq := range removedFiles {
 		if err := os.Remove(segmentFile(cdm.dir.Name(), seq)); err != nil {
-			return err
+			return removedFiles[i:], err
 		}
 	}
 
-	return nil
+	return nil, nil
 }
 
 // DeleteCorrupted deletes all the head chunk files after the one which had the corruption
 // (including the corrupt file).
 func (cdm *ChunkDiskMapper) DeleteCorrupted(originalErr error) error {
-	err := errors.Cause(originalErr) // So that we can pick up errors even if wrapped.
-	cerr, ok := err.(*CorruptionErr)
-	if !ok {
-		return errors.Wrap(originalErr, "cannot handle error")
+	var cerr *CorruptionErr
+	if !errors.As(originalErr, &cerr) {
+		return fmt.Errorf("cannot handle error: %w", originalErr)
 	}
 
 	// Delete all the head chunk files following the corrupt head chunk file.
 	segs := []int{}
 	cdm.readPathMtx.RLock()
+	lastSeq := 0
 	for seg := range cdm.mmappedChunkFiles {
-		if seg >= cerr.FileIndex {
+		switch {
+		case seg >= cerr.FileIndex:
 			segs = append(segs, seg)
+		case seg > lastSeq:
+			lastSeq = seg
 		}
 	}
 	cdm.readPathMtx.RUnlock()
 
-	return cdm.deleteFiles(segs)
+	pendingDeletes, err := cdm.deleteFiles(segs)
+	cdm.evtlPosMtx.Lock()
+	if err == nil {
+		cdm.evtlPos.setSeq(uint64(lastSeq))
+	} else {
+		// In case of error, set it to the last file number on the disk that was not deleted.
+		cdm.evtlPos.setSeq(uint64(pendingDeletes[len(pendingDeletes)-1]))
+	}
+	cdm.evtlPosMtx.Unlock()
+
+	return err
 }
 
 // Size returns the size of the chunk files.
-func (cdm *ChunkDiskMapper) Size() int64 {
-	return cdm.size.Load() + cdm.curFileSize()
+func (cdm *ChunkDiskMapper) Size() (int64, error) {
+	return fileutil.DirSize(cdm.dir.Name())
 }
 
-func (cdm *ChunkDiskMapper) curFileSize() int64 {
-	return cdm.curFileNumBytes.Load()
+func (cdm *ChunkDiskMapper) curFileSize() uint64 {
+	return cdm.curFileOffset.Load()
 }
 
 // Close closes all the open files in ChunkDiskMapper.
 // It is not longer safe to access chunks from this struct after calling Close.
 func (cdm *ChunkDiskMapper) Close() error {
+	// Locking the eventual position lock blocks WriteChunk()
+	cdm.evtlPosMtx.Lock()
+	defer cdm.evtlPosMtx.Unlock()
+
+	if cdm.writeQueue != nil {
+		cdm.writeQueue.stop()
+	}
+
 	// 'WriteChunk' locks writePathMtx first and then readPathMtx for cutting head chunk file.
 	// The lock order should not be reversed here else it can cause deadlocks.
 	cdm.writePathMtx.Lock()
@@ -795,42 +1080,42 @@ func (cdm *ChunkDiskMapper) Close() error {
 	}
 	cdm.closed = true
 
-	var merr tsdb_errors.MultiError
-	merr.Add(closeAllFromMap(cdm.closers))
-	merr.Add(cdm.finalizeCurFile())
-	merr.Add(cdm.dir.Close())
-
+	errs := tsdb_errors.NewMulti(
+		closeAllFromMap(cdm.closers),
+		cdm.finalizeCurFile(),
+		cdm.dir.Close(),
+	)
 	cdm.mmappedChunkFiles = map[int]*mmappedChunkFile{}
 	cdm.closers = map[int]io.Closer{}
 
-	return merr.Err()
+	return errs.Err()
 }
 
 func closeAllFromMap(cs map[int]io.Closer) error {
-	var merr tsdb_errors.MultiError
+	errs := tsdb_errors.NewMulti()
 	for _, c := range cs {
-		merr.Add(c.Close())
+		errs.Add(c.Close())
 	}
-	return merr.Err()
+	return errs.Err()
 }
 
 const inBufferShards = 128 // 128 is a randomly chosen number.
 
-// chunkBuffer is a thread safe buffer for chunks.
+// chunkBuffer is a thread safe lookup table for chunks by their ref.
 type chunkBuffer struct {
-	inBufferChunks     [inBufferShards]map[uint64]chunkenc.Chunk
+	inBufferChunks     [inBufferShards]map[ChunkDiskMapperRef]chunkenc.Chunk
 	inBufferChunksMtxs [inBufferShards]sync.RWMutex
 }
 
 func newChunkBuffer() *chunkBuffer {
 	cb := &chunkBuffer{}
 	for i := 0; i < inBufferShards; i++ {
-		cb.inBufferChunks[i] = make(map[uint64]chunkenc.Chunk)
+		cb.inBufferChunks[i] = make(map[ChunkDiskMapperRef]chunkenc.Chunk)
 	}
 	return cb
 }
 
-func (cb *chunkBuffer) put(ref uint64, chk chunkenc.Chunk) {
+func (cb *chunkBuffer) put(ref ChunkDiskMapperRef, chk chunkenc.Chunk) {
 	shardIdx := ref % inBufferShards
 
 	cb.inBufferChunksMtxs[shardIdx].Lock()
@@ -838,7 +1123,7 @@ func (cb *chunkBuffer) put(ref uint64, chk chunkenc.Chunk) {
 	cb.inBufferChunksMtxs[shardIdx].Unlock()
 }
 
-func (cb *chunkBuffer) get(ref uint64) chunkenc.Chunk {
+func (cb *chunkBuffer) get(ref ChunkDiskMapperRef) chunkenc.Chunk {
 	shardIdx := ref % inBufferShards
 
 	cb.inBufferChunksMtxs[shardIdx].RLock()
@@ -850,7 +1135,7 @@ func (cb *chunkBuffer) get(ref uint64) chunkenc.Chunk {
 func (cb *chunkBuffer) clear() {
 	for i := 0; i < inBufferShards; i++ {
 		cb.inBufferChunksMtxs[i].Lock()
-		cb.inBufferChunks[i] = make(map[uint64]chunkenc.Chunk)
+		cb.inBufferChunks[i] = make(map[ChunkDiskMapperRef]chunkenc.Chunk)
 		cb.inBufferChunksMtxs[i].Unlock()
 	}
 }
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/chunks/head_chunks_other.go b/vendor/github.com/prometheus/prometheus/tsdb/chunks/head_chunks_other.go
index a1de87370758d8bf7a378f4052c8bd04fd73f917..f30c5e55e97102dfef725232ea4f67ca8b8121ce 100644
--- a/vendor/github.com/prometheus/prometheus/tsdb/chunks/head_chunks_other.go
+++ b/vendor/github.com/prometheus/prometheus/tsdb/chunks/head_chunks_other.go
@@ -11,12 +11,11 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-// +build !windows
+//go:build !windows
 
 package chunks
 
-var (
-	// HeadChunkFilePreallocationSize is the size to which the m-map file should be preallocated when a new file is cut.
-	// Windows needs pre-allocations while the other OS does not.
-	HeadChunkFilePreallocationSize int64
-)
+// HeadChunkFilePreallocationSize is the size to which the m-map file should be preallocated when a new file is cut.
+// Windows needs pre-allocations while the other OS does not. But we observed that a 0 pre-allocation causes unit tests to flake.
+// This small allocation for non-Windows OSes removes the flake.
+var HeadChunkFilePreallocationSize int64 = MinWriteBufferSize * 2
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/chunks/head_chunks_windows.go b/vendor/github.com/prometheus/prometheus/tsdb/chunks/head_chunks_windows.go
index b772b64b468efb32ac6c835d46efc507e0a06496..214ee42f5965fade822e31b7f3215bb1a6b6c6c7 100644
--- a/vendor/github.com/prometheus/prometheus/tsdb/chunks/head_chunks_windows.go
+++ b/vendor/github.com/prometheus/prometheus/tsdb/chunks/head_chunks_windows.go
@@ -13,8 +13,6 @@
 
 package chunks
 
-var (
-	// HeadChunkFilePreallocationSize is the size to which the m-map file should be preallocated when a new file is cut.
-	// Windows needs pre-allocation to m-map the file.
-	HeadChunkFilePreallocationSize int64 = MaxHeadChunkFileSize
-)
+// HeadChunkFilePreallocationSize is the size to which the m-map file should be preallocated when a new file is cut.
+// Windows needs pre-allocation to m-map the file.
+var HeadChunkFilePreallocationSize int64 = MaxHeadChunkFileSize
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/chunks/queue.go b/vendor/github.com/prometheus/prometheus/tsdb/chunks/queue.go
new file mode 100644
index 0000000000000000000000000000000000000000..860381a5fef77dc62eb6df763bf1d30b5d2ace99
--- /dev/null
+++ b/vendor/github.com/prometheus/prometheus/tsdb/chunks/queue.go
@@ -0,0 +1,141 @@
+// Copyright 2022 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package chunks
+
+import "sync"
+
+// writeJobQueue is similar to buffered channel of chunkWriteJob, but manages its own buffers
+// to avoid using a lot of memory when it's empty. It does that by storing elements into segments
+// of equal size (segmentSize). When segment is not used anymore, reference to it are removed,
+// so it can be treated as a garbage.
+type writeJobQueue struct {
+	maxSize     int
+	segmentSize int
+
+	mtx            sync.Mutex            // protects all following variables
+	pushed, popped *sync.Cond            // signalled when something is pushed into the queue or popped from it
+	first, last    *writeJobQueueSegment // pointer to first and last segment, if any
+	size           int                   // total size of the queue
+	closed         bool                  // after closing the queue, nothing can be pushed to it
+}
+
+type writeJobQueueSegment struct {
+	segment             []chunkWriteJob
+	nextRead, nextWrite int                   // index of next read and next write in this segment.
+	nextSegment         *writeJobQueueSegment // next segment, if any
+}
+
+func newWriteJobQueue(maxSize, segmentSize int) *writeJobQueue {
+	if maxSize <= 0 || segmentSize <= 0 {
+		panic("invalid queue")
+	}
+
+	q := &writeJobQueue{
+		maxSize:     maxSize,
+		segmentSize: segmentSize,
+	}
+
+	q.pushed = sync.NewCond(&q.mtx)
+	q.popped = sync.NewCond(&q.mtx)
+	return q
+}
+
+func (q *writeJobQueue) close() {
+	q.mtx.Lock()
+	defer q.mtx.Unlock()
+
+	q.closed = true
+
+	// Unblock all blocked goroutines.
+	q.pushed.Broadcast()
+	q.popped.Broadcast()
+}
+
+// push blocks until there is space available in the queue, and then adds job to the queue.
+// If queue is closed or gets closed while waiting for space, push returns false.
+func (q *writeJobQueue) push(job chunkWriteJob) bool {
+	q.mtx.Lock()
+	defer q.mtx.Unlock()
+
+	// Wait until queue has more space or is closed.
+	for !q.closed && q.size >= q.maxSize {
+		q.popped.Wait()
+	}
+
+	if q.closed {
+		return false
+	}
+
+	// Check if this segment has more space for writing, and create new one if not.
+	if q.last == nil || q.last.nextWrite >= q.segmentSize {
+		prevLast := q.last
+		q.last = &writeJobQueueSegment{
+			segment: make([]chunkWriteJob, q.segmentSize),
+		}
+
+		if prevLast != nil {
+			prevLast.nextSegment = q.last
+		}
+		if q.first == nil {
+			q.first = q.last
+		}
+	}
+
+	q.last.segment[q.last.nextWrite] = job
+	q.last.nextWrite++
+	q.size++
+	q.pushed.Signal()
+	return true
+}
+
+// pop returns first job from the queue, and true.
+// If queue is empty, pop blocks until there is a job (returns true), or until queue is closed (returns false).
+// If queue was already closed, pop first returns all remaining elements from the queue (with true value), and only then returns false.
+func (q *writeJobQueue) pop() (chunkWriteJob, bool) {
+	q.mtx.Lock()
+	defer q.mtx.Unlock()
+
+	// wait until something is pushed to the queue, or queue is closed.
+	for q.size == 0 {
+		if q.closed {
+			return chunkWriteJob{}, false
+		}
+
+		q.pushed.Wait()
+	}
+
+	res := q.first.segment[q.first.nextRead]
+	q.first.segment[q.first.nextRead] = chunkWriteJob{} // clear just-read element
+	q.first.nextRead++
+	q.size--
+
+	// If we have read all possible elements from first segment, we can drop it.
+	if q.first.nextRead >= q.segmentSize {
+		q.first = q.first.nextSegment
+		if q.first == nil {
+			q.last = nil
+		}
+	}
+
+	q.popped.Signal()
+	return res, true
+}
+
+// length returns number of all jobs in the queue.
+func (q *writeJobQueue) length() int {
+	q.mtx.Lock()
+	defer q.mtx.Unlock()
+
+	return q.size
+}
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/chunks/samples.go b/vendor/github.com/prometheus/prometheus/tsdb/chunks/samples.go
new file mode 100644
index 0000000000000000000000000000000000000000..a5b16094df49aa5954b526db6874ba4544b0ca6c
--- /dev/null
+++ b/vendor/github.com/prometheus/prometheus/tsdb/chunks/samples.go
@@ -0,0 +1,101 @@
+// Copyright 2023 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package chunks
+
+import (
+	"github.com/prometheus/prometheus/model/histogram"
+	"github.com/prometheus/prometheus/tsdb/chunkenc"
+)
+
+type Samples interface {
+	Get(i int) Sample
+	Len() int
+}
+
+type Sample interface {
+	T() int64
+	F() float64
+	H() *histogram.Histogram
+	FH() *histogram.FloatHistogram
+	Type() chunkenc.ValueType
+	Copy() Sample // Returns a deep copy.
+}
+
+type SampleSlice []Sample
+
+func (s SampleSlice) Get(i int) Sample { return s[i] }
+func (s SampleSlice) Len() int         { return len(s) }
+
+type sample struct {
+	t  int64
+	f  float64
+	h  *histogram.Histogram
+	fh *histogram.FloatHistogram
+}
+
+func (s sample) T() int64 {
+	return s.t
+}
+
+func (s sample) F() float64 {
+	return s.f
+}
+
+func (s sample) H() *histogram.Histogram {
+	return s.h
+}
+
+func (s sample) FH() *histogram.FloatHistogram {
+	return s.fh
+}
+
+func (s sample) Type() chunkenc.ValueType {
+	switch {
+	case s.h != nil:
+		return chunkenc.ValHistogram
+	case s.fh != nil:
+		return chunkenc.ValFloatHistogram
+	default:
+		return chunkenc.ValFloat
+	}
+}
+
+func (s sample) Copy() Sample {
+	c := sample{t: s.t, f: s.f}
+	if s.h != nil {
+		c.h = s.h.Copy()
+	}
+	if s.fh != nil {
+		c.fh = s.fh.Copy()
+	}
+	return c
+}
+
+// GenerateSamples starting at start and counting up numSamples.
+func GenerateSamples(start, numSamples int) []Sample {
+	return generateSamples(start, numSamples, func(i int) Sample {
+		return sample{
+			t: int64(i),
+			f: float64(i),
+		}
+	})
+}
+
+func generateSamples(start, numSamples int, gen func(int) Sample) []Sample {
+	samples := make([]Sample, 0, numSamples)
+	for i := start; i < start+numSamples; i++ {
+		samples = append(samples, gen(i))
+	}
+	return samples
+}
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/errors/errors.go b/vendor/github.com/prometheus/prometheus/tsdb/errors/errors.go
index 69d36624800f49af17f8a4281dceadd6aeb0ac03..a86ce59bd8725f68c014f5818e757f323adf408b 100644
--- a/vendor/github.com/prometheus/prometheus/tsdb/errors/errors.go
+++ b/vendor/github.com/prometheus/prometheus/tsdb/errors/errors.go
@@ -16,22 +16,62 @@ package errors
 
 import (
 	"bytes"
+	"errors"
 	"fmt"
+	"io"
 )
 
-// The MultiError type implements the error interface, and contains the
-// Errors used to construct it.
-type MultiError []error
+// multiError type allows combining multiple errors into one.
+type multiError []error
 
-// Returns a concatenated string of the contained errors
-func (es MultiError) Error() string {
+// NewMulti returns multiError with provided errors added if not nil.
+func NewMulti(errs ...error) multiError { //nolint:revive // unexported-return
+	m := multiError{}
+	m.Add(errs...)
+	return m
+}
+
+// Add adds single or many errors to the error list. Each error is added only if not nil.
+// If the error is a nonNilMultiError type, the errors inside nonNilMultiError are added to the main multiError.
+func (es *multiError) Add(errs ...error) {
+	for _, err := range errs {
+		if err == nil {
+			continue
+		}
+		var merr nonNilMultiError
+		if errors.As(err, &merr) {
+			*es = append(*es, merr.errs...)
+			continue
+		}
+		*es = append(*es, err)
+	}
+}
+
+// Err returns the error list as an error or nil if it is empty.
+func (es multiError) Err() error {
+	if len(es) == 0 {
+		return nil
+	}
+	return nonNilMultiError{errs: es}
+}
+
+// nonNilMultiError implements the error interface, and it represents
+// multiError with at least one error inside it.
+// This type is needed to make sure that nil is returned when no error is combined in multiError for err != nil
+// check to work.
+type nonNilMultiError struct {
+	errs multiError
+}
+
+// Error returns a concatenated string of the contained errors.
+func (es nonNilMultiError) Error() string {
 	var buf bytes.Buffer
 
-	if len(es) > 1 {
-		fmt.Fprintf(&buf, "%d errors: ", len(es))
+	if len(es.errs) > 1 {
+		fmt.Fprintf(&buf, "%d errors: ", len(es.errs))
 	}
 
-	for i, err := range es {
+	for i, err := range es.errs {
 		if i != 0 {
 			buf.WriteString("; ")
 		}
@@ -41,22 +81,24 @@ func (es MultiError) Error() string {
 	return buf.String()
 }
 
-// Add adds the error to the error list if it is not nil.
-func (es *MultiError) Add(err error) {
-	if err == nil {
-		return
-	}
-	if merr, ok := err.(MultiError); ok {
-		*es = append(*es, merr...)
-	} else {
-		*es = append(*es, err)
+// Is attempts to match the provided error against errors in the error list.
+//
+// This function allows errors.Is to traverse the values stored in the MultiError.
+// It returns true if any of the errors in the list match the target.
+func (es nonNilMultiError) Is(target error) bool {
+	for _, err := range es.errs {
+		if errors.Is(err, target) {
+			return true
+		}
 	}
+	return false
 }
 
-// Err returns the error list as an error or nil if it is empty.
-func (es MultiError) Err() error {
-	if len(es) == 0 {
-		return nil
+// CloseAll closes all given closers while recording error in MultiError.
+func CloseAll(cs []io.Closer) error {
+	errs := NewMulti()
+	for _, c := range cs {
+		errs.Add(c.Close())
 	}
-	return es
+	return errs.Err()
 }
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/fileutil/dir.go b/vendor/github.com/prometheus/prometheus/tsdb/fileutil/dir.go
index e6ac4ec989229614ca51c687ff307c0daf2cdad6..1672a92d4c93a6d89cf2757dd4a010eae26d3c6d 100644
--- a/vendor/github.com/prometheus/prometheus/tsdb/fileutil/dir.go
+++ b/vendor/github.com/prometheus/prometheus/tsdb/fileutil/dir.go
@@ -20,7 +20,7 @@ import (
 
 func DirSize(dir string) (int64, error) {
 	var size int64
-	err := filepath.Walk(dir, func(filePath string, info os.FileInfo, err error) error {
+	err := filepath.Walk(dir, func(_ string, info os.FileInfo, err error) error {
 		if err != nil {
 			return err
 		}
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/fileutil/dir_unix.go b/vendor/github.com/prometheus/prometheus/tsdb/fileutil/dir_unix.go
index 58a77dfc1a99dadb9ab6ba94949a9ca372cc1d5b..2afb2aeaba0ebb91d27a77c485ae6825815417df 100644
--- a/vendor/github.com/prometheus/prometheus/tsdb/fileutil/dir_unix.go
+++ b/vendor/github.com/prometheus/prometheus/tsdb/fileutil/dir_unix.go
@@ -12,7 +12,7 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-// +build !windows
+//go:build !windows
 
 package fileutil
 
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/fileutil/dir_windows.go b/vendor/github.com/prometheus/prometheus/tsdb/fileutil/dir_windows.go
index c123395c0040e5c326e852a3feeb90074877cc34..307077ebc318d4d73dc926665b8a01d39b8cf64f 100644
--- a/vendor/github.com/prometheus/prometheus/tsdb/fileutil/dir_windows.go
+++ b/vendor/github.com/prometheus/prometheus/tsdb/fileutil/dir_windows.go
@@ -12,7 +12,7 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-// +build windows
+//go:build windows
 
 package fileutil
 
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/fileutil/fileutil.go b/vendor/github.com/prometheus/prometheus/tsdb/fileutil/fileutil.go
index 927ebe004dabbf32655145a7960a6b0143ecd71b..523f99292ce47b49116851bfceaabb40079831a4 100644
--- a/vendor/github.com/prometheus/prometheus/tsdb/fileutil/fileutil.go
+++ b/vendor/github.com/prometheus/prometheus/tsdb/fileutil/fileutil.go
@@ -18,7 +18,6 @@
 package fileutil
 
 import (
-	"io/ioutil"
 	"os"
 	"path/filepath"
 	"strings"
@@ -27,7 +26,7 @@ import (
 // CopyDirs copies all directories, subdirectories and files recursively including the empty folders.
 // Source and destination must be full paths.
 func CopyDirs(src, dest string) error {
-	if err := os.MkdirAll(dest, 0777); err != nil {
+	if err := os.MkdirAll(dest, 0o777); err != nil {
 		return err
 	}
 	files, err := readDirs(src)
@@ -46,7 +45,7 @@ func CopyDirs(src, dest string) error {
 
 		// Empty directories are also created.
 		if stat.IsDir() {
-			if err := os.MkdirAll(dp, 0777); err != nil {
+			if err := os.MkdirAll(dp, 0o777); err != nil {
 				return err
 			}
 			continue
@@ -60,12 +59,12 @@ func CopyDirs(src, dest string) error {
 }
 
 func copyFile(src, dest string) error {
-	data, err := ioutil.ReadFile(src)
+	data, err := os.ReadFile(src)
 	if err != nil {
 		return err
 	}
 
-	err = ioutil.WriteFile(dest, data, 0666)
+	err = os.WriteFile(dest, data, 0o666)
 	if err != nil {
 		return err
 	}
@@ -77,7 +76,7 @@ func copyFile(src, dest string) error {
 func readDirs(src string) ([]string, error) {
 	var files []string
 
-	err := filepath.Walk(src, func(path string, f os.FileInfo, err error) error {
+	err := filepath.Walk(src, func(path string, _ os.FileInfo, _ error) error {
 		relativePath := strings.TrimPrefix(path, src)
 		if len(relativePath) > 0 {
 			files = append(files, relativePath)
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/fileutil/flock.go b/vendor/github.com/prometheus/prometheus/tsdb/fileutil/flock.go
index d5eaa7ca2adf8d59e8f249e0f59c87ac7e5ab0df..e0082e2f2cf016f566e3fe0382e39e0f8beb66ac 100644
--- a/vendor/github.com/prometheus/prometheus/tsdb/fileutil/flock.go
+++ b/vendor/github.com/prometheus/prometheus/tsdb/fileutil/flock.go
@@ -29,7 +29,7 @@ type Releaser interface {
 // locking has failed. Neither this function nor the returned Releaser is
 // goroutine-safe.
 func Flock(fileName string) (r Releaser, existed bool, err error) {
-	if err = os.MkdirAll(filepath.Dir(fileName), 0755); err != nil {
+	if err = os.MkdirAll(filepath.Dir(fileName), 0o755); err != nil {
 		return nil, false, err
 	}
 
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/fileutil/flock_js.go b/vendor/github.com/prometheus/prometheus/tsdb/fileutil/flock_js.go
new file mode 100644
index 0000000000000000000000000000000000000000..6029cdf4d82cbade3baef8f9b49534c8f506dbcf
--- /dev/null
+++ b/vendor/github.com/prometheus/prometheus/tsdb/fileutil/flock_js.go
@@ -0,0 +1,32 @@
+// Copyright 2022 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build js
+
+package fileutil
+
+import "errors"
+
+type unixLock struct{}
+
+func (l *unixLock) Release() error {
+	return errors.New("unsupported")
+}
+
+func (l *unixLock) set(lock bool) error {
+	return errors.New("unsupported")
+}
+
+func newLock(fileName string) (Releaser, error) {
+	return nil, errors.New("unsupported")
+}
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/fileutil/flock_plan9.go b/vendor/github.com/prometheus/prometheus/tsdb/fileutil/flock_plan9.go
index 71ed67e8c3e8f2f940e0aae40a0dcff6eb657c73..3b9550e7f234162b38246c7e5a13416c2ac5819d 100644
--- a/vendor/github.com/prometheus/prometheus/tsdb/fileutil/flock_plan9.go
+++ b/vendor/github.com/prometheus/prometheus/tsdb/fileutil/flock_plan9.go
@@ -24,7 +24,7 @@ func (l *plan9Lock) Release() error {
 }
 
 func newLock(fileName string) (Releaser, error) {
-	f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE, os.ModeExclusive|0666)
+	f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE, os.ModeExclusive|0o666)
 	if err != nil {
 		return nil, err
 	}
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/fileutil/flock_solaris.go b/vendor/github.com/prometheus/prometheus/tsdb/fileutil/flock_solaris.go
index cfff8e42f8bfdea6cd82a0e9fdbede57fd7823cc..8ca919f3b001e5d77cd939670469eaa1a61fb71a 100644
--- a/vendor/github.com/prometheus/prometheus/tsdb/fileutil/flock_solaris.go
+++ b/vendor/github.com/prometheus/prometheus/tsdb/fileutil/flock_solaris.go
@@ -11,7 +11,7 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-// +build solaris
+//go:build solaris
 
 package fileutil
 
@@ -45,7 +45,7 @@ func (l *unixLock) set(lock bool) error {
 }
 
 func newLock(fileName string) (Releaser, error) {
-	f, err := os.OpenFile(fileName, os.O_RDWR|os.O_CREATE, 0666)
+	f, err := os.OpenFile(fileName, os.O_RDWR|os.O_CREATE, 0o666)
 	if err != nil {
 		return nil, err
 	}
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/fileutil/flock_unix.go b/vendor/github.com/prometheus/prometheus/tsdb/fileutil/flock_unix.go
index f6f78d367d87cdef972f302be8ea0ee4072b3106..25de0ffb22adb133a7696eeb5433dbe885ff8951 100644
--- a/vendor/github.com/prometheus/prometheus/tsdb/fileutil/flock_unix.go
+++ b/vendor/github.com/prometheus/prometheus/tsdb/fileutil/flock_unix.go
@@ -11,7 +11,7 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-// +build darwin dragonfly freebsd linux netbsd openbsd
+//go:build darwin || dragonfly || freebsd || linux || netbsd || openbsd
 
 package fileutil
 
@@ -40,7 +40,7 @@ func (l *unixLock) set(lock bool) error {
 }
 
 func newLock(fileName string) (Releaser, error) {
-	f, err := os.OpenFile(fileName, os.O_RDWR|os.O_CREATE, 0666)
+	f, err := os.OpenFile(fileName, os.O_RDWR|os.O_CREATE, 0o666)
 	if err != nil {
 		return nil, err
 	}
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/fileutil/mmap.go b/vendor/github.com/prometheus/prometheus/tsdb/fileutil/mmap.go
index 4dbca4f9740f9b5ec8dd0dbc65fcfa151ee9ad8e..782ff27ec91d9d5388fdb39f48c2951b877f3444 100644
--- a/vendor/github.com/prometheus/prometheus/tsdb/fileutil/mmap.go
+++ b/vendor/github.com/prometheus/prometheus/tsdb/fileutil/mmap.go
@@ -14,9 +14,8 @@
 package fileutil
 
 import (
+	"fmt"
 	"os"
-
-	"github.com/pkg/errors"
 )
 
 type MmapFile struct {
@@ -31,7 +30,7 @@ func OpenMmapFile(path string) (*MmapFile, error) {
 func OpenMmapFileWithSize(path string, size int) (mf *MmapFile, retErr error) {
 	f, err := os.Open(path)
 	if err != nil {
-		return nil, errors.Wrap(err, "try lock file")
+		return nil, fmt.Errorf("try lock file: %w", err)
 	}
 	defer func() {
 		if retErr != nil {
@@ -41,14 +40,14 @@ func OpenMmapFileWithSize(path string, size int) (mf *MmapFile, retErr error) {
 	if size <= 0 {
 		info, err := f.Stat()
 		if err != nil {
-			return nil, errors.Wrap(err, "stat")
+			return nil, fmt.Errorf("stat: %w", err)
 		}
 		size = int(info.Size())
 	}
 
 	b, err := mmap(f, size)
 	if err != nil {
-		return nil, errors.Wrapf(err, "mmap, size %d", size)
+		return nil, fmt.Errorf("mmap, size %d: %w", size, err)
 	}
 
 	return &MmapFile{f: f, b: b}, nil
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/fileutil/mmap_386.go b/vendor/github.com/prometheus/prometheus/tsdb/fileutil/mmap_386.go
index 66b9d36803441d25ccb56d504e2cf34197be13f2..85c0cce09664e50470fc13f852dfd84652184d49 100644
--- a/vendor/github.com/prometheus/prometheus/tsdb/fileutil/mmap_386.go
+++ b/vendor/github.com/prometheus/prometheus/tsdb/fileutil/mmap_386.go
@@ -11,7 +11,7 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-// +build windows
+//go:build windows
 
 package fileutil
 
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/fileutil/mmap_amd64.go b/vendor/github.com/prometheus/prometheus/tsdb/fileutil/mmap_amd64.go
index 4b523bc67c2e930e545f039d6b81e80061371cbd..71fc568bd5f2bd85cc48467dfd9f2be8f7fe6f40 100644
--- a/vendor/github.com/prometheus/prometheus/tsdb/fileutil/mmap_amd64.go
+++ b/vendor/github.com/prometheus/prometheus/tsdb/fileutil/mmap_amd64.go
@@ -11,7 +11,7 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-// +build windows
+//go:build windows
 
 package fileutil
 
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/fileutil/mmap_arm64.go b/vendor/github.com/prometheus/prometheus/tsdb/fileutil/mmap_arm64.go
new file mode 100644
index 0000000000000000000000000000000000000000..71fc568bd5f2bd85cc48467dfd9f2be8f7fe6f40
--- /dev/null
+++ b/vendor/github.com/prometheus/prometheus/tsdb/fileutil/mmap_arm64.go
@@ -0,0 +1,18 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build windows
+
+package fileutil
+
+const maxMapSize = 0xFFFFFFFFFFFF // 256TB
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/fileutil/mmap_js.go b/vendor/github.com/prometheus/prometheus/tsdb/fileutil/mmap_js.go
new file mode 100644
index 0000000000000000000000000000000000000000..f29106fc1e098dbb32dc7bc65651d7c1f0ea2318
--- /dev/null
+++ b/vendor/github.com/prometheus/prometheus/tsdb/fileutil/mmap_js.go
@@ -0,0 +1,29 @@
+// Copyright 2022 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build js
+
+package fileutil
+
+import (
+	"errors"
+	"os"
+)
+
+func mmap(f *os.File, length int) ([]byte, error) {
+	return nil, errors.New("unsupported")
+}
+
+func munmap(b []byte) (err error) {
+	return errors.New("unsupported")
+}
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/fileutil/mmap_unix.go b/vendor/github.com/prometheus/prometheus/tsdb/fileutil/mmap_unix.go
index 043f4d408cc3abcdd372c728e3932c6c3bc8fc11..3d15e1a8c12cd9cc5b5d773e11dfcc6db36e7e77 100644
--- a/vendor/github.com/prometheus/prometheus/tsdb/fileutil/mmap_unix.go
+++ b/vendor/github.com/prometheus/prometheus/tsdb/fileutil/mmap_unix.go
@@ -11,7 +11,7 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-// +build !windows,!plan9
+//go:build !windows && !plan9 && !js
 
 package fileutil
 
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/fileutil/preallocate_linux.go b/vendor/github.com/prometheus/prometheus/tsdb/fileutil/preallocate_linux.go
index ada0462213e3f2e3cd726f70b9414ea3bfd2f3f1..026c69b354ad2616f84832d91c2f7b3e7abb480f 100644
--- a/vendor/github.com/prometheus/prometheus/tsdb/fileutil/preallocate_linux.go
+++ b/vendor/github.com/prometheus/prometheus/tsdb/fileutil/preallocate_linux.go
@@ -15,6 +15,7 @@
 package fileutil
 
 import (
+	"errors"
 	"os"
 	"syscall"
 )
@@ -23,10 +24,10 @@ func preallocExtend(f *os.File, sizeInBytes int64) error {
 	// use mode = 0 to change size
 	err := syscall.Fallocate(int(f.Fd()), 0, 0, sizeInBytes)
 	if err != nil {
-		errno, ok := err.(syscall.Errno)
+		var errno syscall.Errno
 		// not supported; fallback
 		// fallocate EINTRs frequently in some environments; fallback
-		if ok && (errno == syscall.ENOTSUP || errno == syscall.EINTR) {
+		if errors.As(err, &errno) && (errno == syscall.ENOTSUP || errno == syscall.EINTR) {
 			return preallocExtendTrunc(f, sizeInBytes)
 		}
 	}
@@ -37,9 +38,9 @@ func preallocFixed(f *os.File, sizeInBytes int64) error {
 	// use mode = 1 to keep size; see FALLOC_FL_KEEP_SIZE
 	err := syscall.Fallocate(int(f.Fd()), 1, 0, sizeInBytes)
 	if err != nil {
-		errno, ok := err.(syscall.Errno)
+		var errno syscall.Errno
 		// treat not supported as nil error
-		if ok && errno == syscall.ENOTSUP {
+		if errors.As(err, &errno) && errno == syscall.ENOTSUP {
 			return nil
 		}
 	}
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/fileutil/preallocate_other.go b/vendor/github.com/prometheus/prometheus/tsdb/fileutil/preallocate_other.go
index 162fbc5f7826c72e792306d0b0def0752b9a6bae..e7fd937a436963f22b59480a7a86dbb14fa19a6d 100644
--- a/vendor/github.com/prometheus/prometheus/tsdb/fileutil/preallocate_other.go
+++ b/vendor/github.com/prometheus/prometheus/tsdb/fileutil/preallocate_other.go
@@ -12,7 +12,7 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-// +build !linux,!darwin
+//go:build !linux && !darwin
 
 package fileutil
 
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/fileutil/sync.go b/vendor/github.com/prometheus/prometheus/tsdb/fileutil/sync.go
index 2e64a40880a7f44cb28eaad93ede4a0a1bafcd56..e1a4a7fd3db9fb0fc3d67a0bda0f8bcb627fa14c 100644
--- a/vendor/github.com/prometheus/prometheus/tsdb/fileutil/sync.go
+++ b/vendor/github.com/prometheus/prometheus/tsdb/fileutil/sync.go
@@ -12,7 +12,7 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-// +build !linux,!darwin
+//go:build !linux && !darwin
 
 package fileutil
 
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/fileutil/sync_darwin.go b/vendor/github.com/prometheus/prometheus/tsdb/fileutil/sync_darwin.go
index 2af1b0f411933d70c72f5af1028106a94248e976..d698b896af876eff09cd3dac99f829a77c180621 100644
--- a/vendor/github.com/prometheus/prometheus/tsdb/fileutil/sync_darwin.go
+++ b/vendor/github.com/prometheus/prometheus/tsdb/fileutil/sync_darwin.go
@@ -12,7 +12,7 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-// +build darwin
+//go:build darwin
 
 package fileutil
 
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/fileutil/sync_linux.go b/vendor/github.com/prometheus/prometheus/tsdb/fileutil/sync_linux.go
index 8b4fc8268e5c8463d05c420007786b04fb93c6a6..2b4c620bb0b94535efc9fddc06668c7c2fe48a70 100644
--- a/vendor/github.com/prometheus/prometheus/tsdb/fileutil/sync_linux.go
+++ b/vendor/github.com/prometheus/prometheus/tsdb/fileutil/sync_linux.go
@@ -12,7 +12,7 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-// +build linux
+//go:build linux
 
 package fileutil
 
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/tsdbutil/buffer.go b/vendor/github.com/prometheus/prometheus/tsdb/tsdbutil/buffer.go
deleted file mode 100644
index a24d5047293511af69657bbae3dc9d85130d2ad2..0000000000000000000000000000000000000000
--- a/vendor/github.com/prometheus/prometheus/tsdb/tsdbutil/buffer.go
+++ /dev/null
@@ -1,224 +0,0 @@
-// Copyright 2018 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tsdbutil
-
-import (
-	"math"
-
-	"github.com/prometheus/prometheus/tsdb/chunkenc"
-)
-
-// BufferedSeriesIterator wraps an iterator with a look-back buffer.
-type BufferedSeriesIterator struct {
-	it  chunkenc.Iterator
-	buf *sampleRing
-
-	lastTime int64
-}
-
-// NewBuffer returns a new iterator that buffers the values within the time range
-// of the current element and the duration of delta before.
-func NewBuffer(it chunkenc.Iterator, delta int64) *BufferedSeriesIterator {
-	return &BufferedSeriesIterator{
-		it:       it,
-		buf:      newSampleRing(delta, 16),
-		lastTime: math.MinInt64,
-	}
-}
-
-// PeekBack returns the previous element of the iterator. If there is none buffered,
-// ok is false.
-func (b *BufferedSeriesIterator) PeekBack() (t int64, v float64, ok bool) {
-	return b.buf.last()
-}
-
-// Buffer returns an iterator over the buffered data.
-func (b *BufferedSeriesIterator) Buffer() chunkenc.Iterator {
-	return b.buf.iterator()
-}
-
-// Seek advances the iterator to the element at time t or greater.
-func (b *BufferedSeriesIterator) Seek(t int64) bool {
-	t0 := t - b.buf.delta
-
-	// If the delta would cause us to seek backwards, preserve the buffer
-	// and just continue regular advancement while filling the buffer on the way.
-	if t0 > b.lastTime {
-		b.buf.reset()
-
-		ok := b.it.Seek(t0)
-		if !ok {
-			return false
-		}
-		b.lastTime, _ = b.At()
-	}
-
-	if b.lastTime >= t {
-		return true
-	}
-	for b.Next() {
-		if b.lastTime >= t {
-			return true
-		}
-	}
-
-	return false
-}
-
-// Next advances the iterator to the next element.
-func (b *BufferedSeriesIterator) Next() bool {
-	// Add current element to buffer before advancing.
-	b.buf.add(b.it.At())
-
-	ok := b.it.Next()
-	if ok {
-		b.lastTime, _ = b.At()
-	}
-	return ok
-}
-
-// At returns the current element of the iterator.
-func (b *BufferedSeriesIterator) At() (int64, float64) {
-	return b.it.At()
-}
-
-// Err returns the last encountered error.
-func (b *BufferedSeriesIterator) Err() error {
-	return b.it.Err()
-}
-
-type sample struct {
-	t int64
-	v float64
-}
-
-func (s sample) T() int64 {
-	return s.t
-}
-
-func (s sample) V() float64 {
-	return s.v
-}
-
-type sampleRing struct {
-	delta int64
-
-	buf []sample // lookback buffer
-	i   int      // position of most recent element in ring buffer
-	f   int      // position of first element in ring buffer
-	l   int      // number of elements in buffer
-}
-
-func newSampleRing(delta int64, sz int) *sampleRing {
-	r := &sampleRing{delta: delta, buf: make([]sample, sz)}
-	r.reset()
-
-	return r
-}
-
-func (r *sampleRing) reset() {
-	r.l = 0
-	r.i = -1
-	r.f = 0
-}
-
-func (r *sampleRing) iterator() chunkenc.Iterator {
-	return &sampleRingIterator{r: r, i: -1}
-}
-
-type sampleRingIterator struct {
-	r *sampleRing
-	i int
-}
-
-func (it *sampleRingIterator) Next() bool {
-	it.i++
-	return it.i < it.r.l
-}
-
-func (it *sampleRingIterator) Seek(int64) bool {
-	return false
-}
-
-func (it *sampleRingIterator) Err() error {
-	return nil
-}
-
-func (it *sampleRingIterator) At() (int64, float64) {
-	return it.r.at(it.i)
-}
-
-func (r *sampleRing) at(i int) (int64, float64) {
-	j := (r.f + i) % len(r.buf)
-	s := r.buf[j]
-	return s.t, s.v
-}
-
-// add adds a sample to the ring buffer and frees all samples that fall
-// out of the delta range.
-func (r *sampleRing) add(t int64, v float64) {
-	l := len(r.buf)
-	// Grow the ring buffer if it fits no more elements.
-	if l == r.l {
-		buf := make([]sample, 2*l)
-		copy(buf[l+r.f:], r.buf[r.f:])
-		copy(buf, r.buf[:r.f])
-
-		r.buf = buf
-		r.i = r.f
-		r.f += l
-	} else {
-		r.i++
-		if r.i >= l {
-			r.i -= l
-		}
-	}
-
-	r.buf[r.i] = sample{t: t, v: v}
-	r.l++
-
-	// Free head of the buffer of samples that just fell out of the range.
-	for r.buf[r.f].t < t-r.delta {
-		r.f++
-		if r.f >= l {
-			r.f -= l
-		}
-		r.l--
-	}
-}
-
-// last returns the most recent element added to the ring.
-func (r *sampleRing) last() (int64, float64, bool) {
-	if r.l == 0 {
-		return 0, 0, false
-	}
-	s := r.buf[r.i]
-	return s.t, s.v, true
-}
-
-func (r *sampleRing) samples() []sample {
-	res := make([]sample, r.l)
-
-	var k = r.f + r.l
-	var j int
-	if k > len(r.buf) {
-		k = len(r.buf)
-		j = r.l - k + r.f
-	}
-
-	n := copy(res, r.buf[r.f:k])
-	copy(res[n:], r.buf[:j])
-
-	return res
-}
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/tsdbutil/chunks.go b/vendor/github.com/prometheus/prometheus/tsdb/tsdbutil/chunks.go
deleted file mode 100644
index 47760453e346d0381855a1b3236d85d96ae87b26..0000000000000000000000000000000000000000
--- a/vendor/github.com/prometheus/prometheus/tsdb/tsdbutil/chunks.go
+++ /dev/null
@@ -1,67 +0,0 @@
-// Copyright 2018 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tsdbutil
-
-import (
-	"github.com/prometheus/prometheus/tsdb/chunkenc"
-	"github.com/prometheus/prometheus/tsdb/chunks"
-)
-
-type Samples interface {
-	Get(i int) Sample
-	Len() int
-}
-
-type Sample interface {
-	T() int64
-	V() float64
-}
-
-type SampleSlice []Sample
-
-func (s SampleSlice) Get(i int) Sample { return s[i] }
-func (s SampleSlice) Len() int         { return len(s) }
-
-func ChunkFromSamples(s []Sample) chunks.Meta {
-	return ChunkFromSamplesGeneric(SampleSlice(s))
-}
-
-func ChunkFromSamplesGeneric(s Samples) chunks.Meta {
-	mint, maxt := int64(0), int64(0)
-
-	if s.Len() > 0 {
-		mint, maxt = s.Get(0).T(), s.Get(s.Len()-1).T()
-	}
-
-	c := chunkenc.NewXORChunk()
-	ca, _ := c.Appender()
-
-	for i := 0; i < s.Len(); i++ {
-		ca.Append(s.Get(i).T(), s.Get(i).V())
-	}
-	return chunks.Meta{
-		MinTime: mint,
-		MaxTime: maxt,
-		Chunk:   c,
-	}
-}
-
-// PopulatedChunk creates a chunk populated with samples every second starting at minTime
-func PopulatedChunk(numSamples int, minTime int64) chunks.Meta {
-	samples := make([]Sample, numSamples)
-	for i := 0; i < numSamples; i++ {
-		samples[i] = sample{minTime + int64(i*1000), 1.0}
-	}
-	return ChunkFromSamples(samples)
-}
diff --git a/vendor/github.com/prometheus/prometheus/util/annotations/annotations.go b/vendor/github.com/prometheus/prometheus/util/annotations/annotations.go
new file mode 100644
index 0000000000000000000000000000000000000000..95783957a7efaa57575ba1c2e65372d16312fdd8
--- /dev/null
+++ b/vendor/github.com/prometheus/prometheus/util/annotations/annotations.go
@@ -0,0 +1,315 @@
+// Copyright 2023 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package annotations
+
+import (
+	"errors"
+	"fmt"
+
+	"github.com/prometheus/common/model"
+
+	"github.com/prometheus/prometheus/promql/parser/posrange"
+)
+
+// Annotations is a general wrapper for warnings and other information
+// that is returned by the query API along with the results.
+// Each individual annotation is modeled by a Go error.
+// They are deduplicated based on the string returned by error.Error().
+// The zero value is usable without further initialization, see New().
+type Annotations map[string]error
+
+// New returns new Annotations ready to use. Note that the zero value of
+// Annotations is also fully usable, but using this method is often more
+// readable.
+func New() *Annotations {
+	return &Annotations{}
+}
+
+// Add adds an annotation (modeled as a Go error) in-place and returns the
+// modified Annotations for convenience.
+func (a *Annotations) Add(err error) Annotations {
+	if *a == nil {
+		*a = Annotations{}
+	}
+	(*a)[err.Error()] = err
+	return *a
+}
+
+// Merge adds the contents of the second annotation to the first, modifying
+// the first in-place, and returns the merged first Annotation for convenience.
+func (a *Annotations) Merge(aa Annotations) Annotations {
+	if *a == nil {
+		if aa == nil {
+			return nil
+		}
+		*a = Annotations{}
+	}
+	for key, val := range aa {
+		(*a)[key] = val
+	}
+	return *a
+}
+
+// AsErrors is a convenience function to return the annotations map as a slice
+// of errors.
+func (a Annotations) AsErrors() []error {
+	arr := make([]error, 0, len(a))
+	for _, err := range a {
+		arr = append(arr, err)
+	}
+	return arr
+}
+
+// AsStrings is a convenience function to return the annotations map as 2 slices
+// of strings, separated into warnings and infos. The query string is used to get the
+// line number and character offset positioning info of the elements which trigger an
+// annotation. We limit the number of warnings and infos returned here with maxWarnings
+// and maxInfos respectively (0 for no limit).
+func (a Annotations) AsStrings(query string, maxWarnings, maxInfos int) (warnings, infos []string) {
+	warnings = make([]string, 0, maxWarnings+1)
+	infos = make([]string, 0, maxInfos+1)
+	warnSkipped := 0
+	infoSkipped := 0
+	for _, err := range a {
+		var anErr annoErr
+		if errors.As(err, &anErr) {
+			anErr.Query = query
+			err = anErr
+		}
+		switch {
+		case errors.Is(err, PromQLInfo):
+			if maxInfos == 0 || len(infos) < maxInfos {
+				infos = append(infos, err.Error())
+			} else {
+				infoSkipped++
+			}
+		default:
+			if maxWarnings == 0 || len(warnings) < maxWarnings {
+				warnings = append(warnings, err.Error())
+			} else {
+				warnSkipped++
+			}
+		}
+	}
+	if warnSkipped > 0 {
+		warnings = append(warnings, fmt.Sprintf("%d more warning annotations omitted", warnSkipped))
+	}
+	if infoSkipped > 0 {
+		infos = append(infos, fmt.Sprintf("%d more info annotations omitted", infoSkipped))
+	}
+	return
+}
+
+// CountWarningsAndInfo counts and returns the number of warnings and infos in the
+// annotations wrapper.
+func (a Annotations) CountWarningsAndInfo() (countWarnings, countInfo int) {
+	for _, err := range a {
+		if errors.Is(err, PromQLWarning) {
+			countWarnings++
+		}
+		if errors.Is(err, PromQLInfo) {
+			countInfo++
+		}
+	}
+	return
+}
+
+//nolint:revive // error-naming.
+var (
+	// Currently there are only 2 types, warnings and info.
+	// For now, info are visually identical with warnings as we have not updated
+	// the API spec or the frontend to show a different kind of warning. But we
+	// make the distinction here to prepare for adding them in future.
+	PromQLInfo    = errors.New("PromQL info")
+	PromQLWarning = errors.New("PromQL warning")
+
+	InvalidRatioWarning                        = fmt.Errorf("%w: ratio value should be between -1 and 1", PromQLWarning)
+	InvalidQuantileWarning                     = fmt.Errorf("%w: quantile value should be between 0 and 1", PromQLWarning)
+	BadBucketLabelWarning                      = fmt.Errorf("%w: bucket label %q is missing or has a malformed value", PromQLWarning, model.BucketLabel)
+	MixedFloatsHistogramsWarning               = fmt.Errorf("%w: encountered a mix of histograms and floats for", PromQLWarning)
+	MixedClassicNativeHistogramsWarning        = fmt.Errorf("%w: vector contains a mix of classic and native histograms for metric name", PromQLWarning)
+	NativeHistogramNotCounterWarning           = fmt.Errorf("%w: this native histogram metric is not a counter:", PromQLWarning)
+	NativeHistogramNotGaugeWarning             = fmt.Errorf("%w: this native histogram metric is not a gauge:", PromQLWarning)
+	MixedExponentialCustomHistogramsWarning    = fmt.Errorf("%w: vector contains a mix of histograms with exponential and custom buckets schemas for metric name", PromQLWarning)
+	IncompatibleCustomBucketsHistogramsWarning = fmt.Errorf("%w: vector contains histograms with incompatible custom buckets for metric name", PromQLWarning)
+	IncompatibleBucketLayoutInBinOpWarning     = fmt.Errorf("%w: incompatible bucket layout encountered for binary operator", PromQLWarning)
+
+	PossibleNonCounterInfo                  = fmt.Errorf("%w: metric might not be a counter, name does not end in _total/_sum/_count/_bucket:", PromQLInfo)
+	HistogramQuantileForcedMonotonicityInfo = fmt.Errorf("%w: input to histogram_quantile needed to be fixed for monotonicity (see https://prometheus.io/docs/prometheus/latest/querying/functions/#histogram_quantile) for metric name", PromQLInfo)
+	IncompatibleTypesInBinOpInfo            = fmt.Errorf("%w: incompatible sample types encountered for binary operator", PromQLInfo)
+	HistogramIgnoredInAggregationInfo       = fmt.Errorf("%w: ignored histogram in", PromQLInfo)
+	HistogramIgnoredInMixedRangeInfo        = fmt.Errorf("%w: ignored histograms in a range containing both floats and histograms for metric name", PromQLInfo)
+)
+
+type annoErr struct {
+	PositionRange posrange.PositionRange
+	Err           error
+	Query         string
+}
+
+func (e annoErr) Error() string {
+	if e.Query == "" {
+		return e.Err.Error()
+	}
+	return fmt.Sprintf("%s (%s)", e.Err, e.PositionRange.StartPosInput(e.Query, 0))
+}
+
+func (e annoErr) Unwrap() error {
+	return e.Err
+}
+
+// NewInvalidQuantileWarning is used when the user specifies an invalid quantile
+// value, i.e. a float that is outside the range [0, 1] or NaN.
+func NewInvalidQuantileWarning(q float64, pos posrange.PositionRange) error {
+	return annoErr{
+		PositionRange: pos,
+		Err:           fmt.Errorf("%w, got %g", InvalidQuantileWarning, q),
+	}
+}
+
+// NewInvalidRatioWarning is used when the user specifies an invalid ratio
+// value, i.e. a float that is outside the range [-1, 1] or NaN.
+func NewInvalidRatioWarning(q, to float64, pos posrange.PositionRange) error {
+	return annoErr{
+		PositionRange: pos,
+		Err:           fmt.Errorf("%w, got %g, capping to %g", InvalidRatioWarning, q, to),
+	}
+}
+
+// NewBadBucketLabelWarning is used when there is an error parsing the bucket label
+// of a classic histogram.
+func NewBadBucketLabelWarning(metricName, label string, pos posrange.PositionRange) error {
+	return annoErr{
+		PositionRange: pos,
+		Err:           fmt.Errorf("%w of %q for metric name %q", BadBucketLabelWarning, label, metricName),
+	}
+}
+
+// NewMixedFloatsHistogramsWarning is used when the queried series includes both
+// float samples and histogram samples for functions that do not support mixed
+// samples.
+func NewMixedFloatsHistogramsWarning(metricName string, pos posrange.PositionRange) error {
+	return annoErr{
+		PositionRange: pos,
+		Err:           fmt.Errorf("%w metric name %q", MixedFloatsHistogramsWarning, metricName),
+	}
+}
+
+// NewMixedFloatsHistogramsAggWarning is used when the queried series includes both
+// float samples and histogram samples in an aggregation.
+func NewMixedFloatsHistogramsAggWarning(pos posrange.PositionRange) error {
+	return annoErr{
+		PositionRange: pos,
+		Err:           fmt.Errorf("%w aggregation", MixedFloatsHistogramsWarning),
+	}
+}
+
+// NewMixedClassicNativeHistogramsWarning is used when the queried series includes
+// both classic and native histograms.
+func NewMixedClassicNativeHistogramsWarning(metricName string, pos posrange.PositionRange) error {
+	return annoErr{
+		PositionRange: pos,
+		Err:           fmt.Errorf("%w %q", MixedClassicNativeHistogramsWarning, metricName),
+	}
+}
+
+// NewNativeHistogramNotCounterWarning is used when histogramRate is called
+// with isCounter set to true on a gauge histogram.
+func NewNativeHistogramNotCounterWarning(metricName string, pos posrange.PositionRange) error {
+	return annoErr{
+		PositionRange: pos,
+		Err:           fmt.Errorf("%w %q", NativeHistogramNotCounterWarning, metricName),
+	}
+}
+
+// NewNativeHistogramNotGaugeWarning is used when histogramRate is called
+// with isCounter set to false on a counter histogram.
+func NewNativeHistogramNotGaugeWarning(metricName string, pos posrange.PositionRange) error {
+	return annoErr{
+		PositionRange: pos,
+		Err:           fmt.Errorf("%w %q", NativeHistogramNotGaugeWarning, metricName),
+	}
+}
+
+// NewMixedExponentialCustomHistogramsWarning is used when the queried series includes
+// histograms with both exponential and custom buckets schemas.
+func NewMixedExponentialCustomHistogramsWarning(metricName string, pos posrange.PositionRange) error {
+	return annoErr{
+		PositionRange: pos,
+		Err:           fmt.Errorf("%w %q", MixedExponentialCustomHistogramsWarning, metricName),
+	}
+}
+
+// NewIncompatibleCustomBucketsHistogramsWarning is used when the queried series includes
+// custom buckets histograms with incompatible custom bounds.
+func NewIncompatibleCustomBucketsHistogramsWarning(metricName string, pos posrange.PositionRange) error {
+	return annoErr{
+		PositionRange: pos,
+		Err:           fmt.Errorf("%w %q", IncompatibleCustomBucketsHistogramsWarning, metricName),
+	}
+}
+
+// NewPossibleNonCounterInfo is used when a named counter metric with only float samples does not
+// have the suffixes _total, _sum, _count, or _bucket.
+func NewPossibleNonCounterInfo(metricName string, pos posrange.PositionRange) error {
+	return annoErr{
+		PositionRange: pos,
+		Err:           fmt.Errorf("%w %q", PossibleNonCounterInfo, metricName),
+	}
+}
+
+// NewHistogramQuantileForcedMonotonicityInfo is used when the input (classic histograms) to
+// histogram_quantile needs to be forced to be monotonic.
+func NewHistogramQuantileForcedMonotonicityInfo(metricName string, pos posrange.PositionRange) error {
+	return annoErr{
+		PositionRange: pos,
+		Err:           fmt.Errorf("%w %q", HistogramQuantileForcedMonotonicityInfo, metricName),
+	}
+}
+
+// NewIncompatibleTypesInBinOpInfo is used if binary operators act on a
+// combination of types that doesn't work and therefore returns no result.
+func NewIncompatibleTypesInBinOpInfo(lhsType, operator, rhsType string, pos posrange.PositionRange) error {
+	return annoErr{
+		PositionRange: pos,
+		Err:           fmt.Errorf("%w %q: %s %s %s", IncompatibleTypesInBinOpInfo, operator, lhsType, operator, rhsType),
+	}
+}
+
+// NewHistogramIgnoredInAggregationInfo is used when a histogram is ignored by
+// an aggregation operator that cannot handle histograms.
+func NewHistogramIgnoredInAggregationInfo(aggregation string, pos posrange.PositionRange) error {
+	return annoErr{
+		PositionRange: pos,
+		Err:           fmt.Errorf("%w %s aggregation", HistogramIgnoredInAggregationInfo, aggregation),
+	}
+}
+
+// NewHistogramIgnoredInMixedRangeInfo is used when a histogram is ignored
+// in a range vector which contains mix of floats and histograms.
+func NewHistogramIgnoredInMixedRangeInfo(metricName string, pos posrange.PositionRange) error {
+	return annoErr{
+		PositionRange: pos,
+		Err:           fmt.Errorf("%w %q", HistogramIgnoredInMixedRangeInfo, metricName),
+	}
+}
+
+// NewIncompatibleBucketLayoutInBinOpWarning is used if binary operators act on a
+// combination of two incompatible histograms.
+func NewIncompatibleBucketLayoutInBinOpWarning(operator string, pos posrange.PositionRange) error {
+	return annoErr{
+		PositionRange: pos,
+		Err:           fmt.Errorf("%w %s", IncompatibleBucketLayoutInBinOpWarning, operator),
+	}
+}
diff --git a/vendor/github.com/prometheus/prometheus/util/strutil/quote.go b/vendor/github.com/prometheus/prometheus/util/strutil/quote.go
index 981ad473d20d4cb21a352d0cd9d4b5e5bbde4f69..95dcb6f694f6f3a1c34c738cc248aeb95f2112dd 100644
--- a/vendor/github.com/prometheus/prometheus/util/strutil/quote.go
+++ b/vendor/github.com/prometheus/prometheus/util/strutil/quote.go
@@ -10,6 +10,45 @@
 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 // See the License for the specific language governing permissions and
 // limitations under the License.
+//
+// NOTE: The functions in this file (Unquote, unquoteChar, contains, unhex)
+// have been adapted from the "strconv" package of the Go standard library
+// to work for Prometheus-style strings. Go's special-casing for single
+// quotes was removed and single quoted strings are now treated the same as
+// double-quoted ones.
+//
+// The original copyright notice from the Go project for these parts is
+// reproduced here:
+//
+// ========================================================================
+// Copyright (c) 2009 The Go Authors. All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//    * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//    * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//    * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// ========================================================================
 
 package strutil
 
@@ -24,13 +63,6 @@ var ErrSyntax = errors.New("invalid syntax")
 // Unquote interprets s as a single-quoted, double-quoted, or backquoted
 // Prometheus query language string literal, returning the string value that s
 // quotes.
-//
-// NOTE: This function as well as the necessary helper functions below
-// (unquoteChar, contains, unhex) and associated tests have been adapted from
-// the corresponding functions in the "strconv" package of the Go standard
-// library to work for Prometheus-style strings. Go's special-casing for single
-// quotes was removed and single quoted strings are now treated the same as
-// double quoted ones.
 func Unquote(s string) (t string, err error) {
 	n := len(s)
 	if n < 2 {
@@ -82,10 +114,10 @@ func Unquote(s string) (t string, err error) {
 // or character literal represented by the string s.
 // It returns four values:
 //
-//	1) value, the decoded Unicode code point or byte value;
-//	2) multibyte, a boolean indicating whether the decoded character requires a multibyte UTF-8 representation;
-//	3) tail, the remainder of the string after the character; and
-//	4) an error that will be nil if the character is syntactically valid.
+//  1. value, the decoded Unicode code point or byte value;
+//  2. multibyte, a boolean indicating whether the decoded character requires a multibyte UTF-8 representation;
+//  3. tail, the remainder of the string after the character; and
+//  4. an error that will be nil if the character is syntactically valid.
 //
 // The second argument, quote, specifies the type of literal being parsed
 // and therefore which escaped quote character is permitted.
diff --git a/vendor/github.com/prometheus/prometheus/util/strutil/strconv.go b/vendor/github.com/prometheus/prometheus/util/strutil/strconv.go
index 3d96e4faf6365e4fd7be9bcb084fdc30b7fa319f..8cdd7d48302c9c5dd7b4c59bf57b94ad5d78e8ed 100644
--- a/vendor/github.com/prometheus/prometheus/util/strutil/strconv.go
+++ b/vendor/github.com/prometheus/prometheus/util/strutil/strconv.go
@@ -16,13 +16,13 @@ package strutil
 import (
 	"fmt"
 	"net/url"
-	"regexp"
-)
+	"strings"
 
-var (
-	invalidLabelCharRE = regexp.MustCompile(`[^a-zA-Z0-9_]`)
+	"github.com/grafana/regexp"
 )
 
+var invalidLabelCharRE = regexp.MustCompile(`[^a-zA-Z0-9_]`)
+
 // TableLinkForExpression creates an escaped relative link to the table view of
 // the provided expression.
 func TableLinkForExpression(expr string) string {
@@ -39,6 +39,26 @@ func GraphLinkForExpression(expr string) string {
 
 // SanitizeLabelName replaces anything that doesn't match
 // client_label.LabelNameRE with an underscore.
+// Note: this does not handle all Prometheus label name restrictions (such as
+// not starting with a digit 0-9), and hence should only be used if the label
+// name is prefixed with a known valid string.
 func SanitizeLabelName(name string) string {
 	return invalidLabelCharRE.ReplaceAllString(name, "_")
 }
+
+// SanitizeFullLabelName replaces any invalid character with an underscore, and
+// if given an empty string, returns a string containing a single underscore.
+func SanitizeFullLabelName(name string) string {
+	if len(name) == 0 {
+		return "_"
+	}
+	var validSb strings.Builder
+	for i, b := range name {
+		if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) {
+			validSb.WriteRune('_')
+		} else {
+			validSb.WriteRune(b)
+		}
+	}
+	return validSb.String()
+}
diff --git a/vendor/github.com/safchain/ethtool/.golangci.yml b/vendor/github.com/safchain/ethtool/.golangci.yml
index 77ccf927e231f32e9bbc89eb8a347beb050c0d65..65552c98af29ebbb7055a9cdced9cfabd9d6b353 100644
--- a/vendor/github.com/safchain/ethtool/.golangci.yml
+++ b/vendor/github.com/safchain/ethtool/.golangci.yml
@@ -1,11 +1,15 @@
 linters:
-  disable:
-    - gosimple
-    - unused
   enable:
+    - gosimple
     - gci
     - gofmt
     - misspell
+    - goimports
+    - staticcheck
+    - errcheck
+    - govet
+    - misspell
+    - gocritic
 linters-settings:
   gci:
     sections:
diff --git a/vendor/github.com/safchain/ethtool/LICENSE b/vendor/github.com/safchain/ethtool/LICENSE
index 8f71f43fee3f78649d238238cbde51e6d7055c82..3c83e6b88414cc8b2d6917b24d0f68bd22a5ad5b 100644
--- a/vendor/github.com/safchain/ethtool/LICENSE
+++ b/vendor/github.com/safchain/ethtool/LICENSE
@@ -186,7 +186,7 @@
       same "printed page" as the copyright notice for easier
       identification within third-party archives.
 
-   Copyright {yyyy} {name of copyright owner}
+   Copyright (c) 2015 The Ethtool Authors
 
    Licensed under the Apache License, Version 2.0 (the "License");
    you may not use this file except in compliance with the License.
diff --git a/vendor/github.com/safchain/ethtool/Makefile b/vendor/github.com/safchain/ethtool/Makefile
index 67d2da395f94ce7b436331d6f71b628faba61e18..beb5ca2c088997e9688cd5dbf72c0b26e72a56dc 100644
--- a/vendor/github.com/safchain/ethtool/Makefile
+++ b/vendor/github.com/safchain/ethtool/Makefile
@@ -2,3 +2,4 @@ all: build
 
 build:
 	CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build
+	CGO_ENABLED=0 GOOS=darwin GOARCH=amd64 go build
diff --git a/vendor/github.com/safchain/ethtool/ethtool.go b/vendor/github.com/safchain/ethtool/ethtool.go
index 42fc3452046db08d598319417e9057fb5078a38d..62df2c10b1c0cfe851f35f054ef46c88ffc2cc2a 100644
--- a/vendor/github.com/safchain/ethtool/ethtool.go
+++ b/vendor/github.com/safchain/ethtool/ethtool.go
@@ -55,14 +55,17 @@ const (
 	// CMD supported
 	ETHTOOL_GSET     = 0x00000001 /* Get settings. */
 	ETHTOOL_SSET     = 0x00000002 /* Set settings. */
+	ETHTOOL_GWOL     = 0x00000005 /* Get wake-on-lan options. */
+	ETHTOOL_SWOL     = 0x00000006 /* Set wake-on-lan options. */
 	ETHTOOL_GDRVINFO = 0x00000003 /* Get driver info. */
 	ETHTOOL_GMSGLVL  = 0x00000007 /* Get driver message level */
 	ETHTOOL_SMSGLVL  = 0x00000008 /* Set driver msg level. */
 
-	/* Get link status for host, i.e. whether the interface *and* the
-	* physical port (if there is one) are up (ethtool_value). */
+	// Get link status for host, i.e. whether the interface *and* the
+	// physical port (if there is one) are up (ethtool_value).
 	ETHTOOL_GLINK         = 0x0000000a
 	ETHTOOL_GCOALESCE     = 0x0000000e /* Get coalesce config */
+	ETHTOOL_SCOALESCE     = 0x0000000f /* Set coalesce config */
 	ETHTOOL_GRINGPARAM    = 0x00000010 /* Get ring parameters */
 	ETHTOOL_SRINGPARAM    = 0x00000011 /* Set ring parameters. */
 	ETHTOOL_GPAUSEPARAM   = 0x00000012 /* Get pause parameters */
@@ -97,36 +100,6 @@ const (
 	MAX_SSET_INFO = 64
 )
 
-var supportedCapabilities = []struct {
-	name  string
-	mask  uint64
-	speed uint64
-}{
-	{"10baseT_Half", unix.ETHTOOL_LINK_MODE_10baseT_Half_BIT, 10_000_000},
-	{"10baseT_Full", unix.ETHTOOL_LINK_MODE_10baseT_Full_BIT, 10_000_000},
-	{"100baseT_Half", unix.ETHTOOL_LINK_MODE_100baseT_Half_BIT, 100_000_000},
-	{"100baseT_Full", unix.ETHTOOL_LINK_MODE_100baseT_Full_BIT, 100_000_000},
-	{"1000baseT_Half", unix.ETHTOOL_LINK_MODE_1000baseT_Half_BIT, 1_000_000_000},
-	{"1000baseT_Full", unix.ETHTOOL_LINK_MODE_1000baseT_Full_BIT, 1_000_000_000},
-	{"10000baseT_Full", unix.ETHTOOL_LINK_MODE_10000baseT_Full_BIT, 10_000_000_000},
-	{"2500baseT_Full", unix.ETHTOOL_LINK_MODE_2500baseT_Full_BIT, 2_500_000_000},
-	{"1000baseKX_Full", unix.ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, 1_000_000_000},
-	{"10000baseKX_Full", unix.ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, 10_000_000_000},
-	{"10000baseKR_Full", unix.ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, 10_000_000_000},
-	{"10000baseR_FEC", unix.ETHTOOL_LINK_MODE_10000baseR_FEC_BIT, 10_000_000_000},
-	{"20000baseMLD2_Full", unix.ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT, 20_000_000_000},
-	{"20000baseKR2_Full", unix.ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT, 20_000_000_000},
-	{"40000baseKR4_Full", unix.ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, 40_000_000_000},
-	{"40000baseCR4_Full", unix.ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, 40_000_000_000},
-	{"40000baseSR4_Full", unix.ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, 40_000_000_000},
-	{"40000baseLR4_Full", unix.ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, 40_000_000_000},
-	{"56000baseKR4_Full", unix.ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT, 56_000_000_000},
-	{"56000baseCR4_Full", unix.ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT, 56_000_000_000},
-	{"56000baseSR4_Full", unix.ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT, 56_000_000_000},
-	{"56000baseLR4_Full", unix.ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT, 56_000_000_000},
-	{"25000baseCR_Full", unix.ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, 25_000_000_000},
-}
-
 type ifreq struct {
 	ifr_name [IFNAMSIZ]byte
 	ifr_data uintptr
@@ -236,23 +209,55 @@ type Coalesce struct {
 	RateSampleInterval       uint32
 }
 
+// WoL options
 const (
-	SOF_TIMESTAMPING_TX_HARDWARE  = (1 << 0)
-	SOF_TIMESTAMPING_TX_SOFTWARE  = (1 << 1)
-	SOF_TIMESTAMPING_RX_HARDWARE  = (1 << 2)
-	SOF_TIMESTAMPING_RX_SOFTWARE  = (1 << 3)
-	SOF_TIMESTAMPING_SOFTWARE     = (1 << 4)
-	SOF_TIMESTAMPING_SYS_HARDWARE = (1 << 5)
-	SOF_TIMESTAMPING_RAW_HARDWARE = (1 << 6)
-	SOF_TIMESTAMPING_OPT_ID       = (1 << 7)
-	SOF_TIMESTAMPING_TX_SCHED     = (1 << 8)
-	SOF_TIMESTAMPING_TX_ACK       = (1 << 9)
-	SOF_TIMESTAMPING_OPT_CMSG     = (1 << 10)
-	SOF_TIMESTAMPING_OPT_TSONLY   = (1 << 11)
-	SOF_TIMESTAMPING_OPT_STATS    = (1 << 12)
-	SOF_TIMESTAMPING_OPT_PKTINFO  = (1 << 13)
-	SOF_TIMESTAMPING_OPT_TX_SWHW  = (1 << 14)
-	SOF_TIMESTAMPING_BIND_PHC     = (1 << 15)
+	WAKE_PHY         = 1 << 0
+	WAKE_UCAST       = 1 << 1
+	WAKE_MCAST       = 1 << 2
+	WAKE_BCAST       = 1 << 3
+	WAKE_ARP         = 1 << 4
+	WAKE_MAGIC       = 1 << 5
+	WAKE_MAGICSECURE = 1 << 6 // only meaningful if WAKE_MAGIC
+)
+
+var WoLMap = map[uint32]string{
+	WAKE_PHY:         "p", // Wake on PHY activity
+	WAKE_UCAST:       "u", // Wake on unicast messages
+	WAKE_MCAST:       "m", // Wake on multicast messages
+	WAKE_BCAST:       "b", // Wake on broadcast messages
+	WAKE_ARP:         "a", // Wake on ARP
+	WAKE_MAGIC:       "g", // Wake on MagicPacket™
+	WAKE_MAGICSECURE: "s", // Enable SecureOn™ password for MagicPacket™
+	// f Wake on filter(s)
+	// d Disable (wake on  nothing). This option clears all previous options.
+}
+
+// WakeOnLan contains WoL config for an interface
+type WakeOnLan struct {
+	Cmd       uint32 // ETHTOOL_GWOL or ETHTOOL_SWOL
+	Supported uint32 // r/o bitmask of WAKE_* flags for supported WoL modes
+	Opts      uint32 // Bitmask of WAKE_* flags for enabled WoL modes
+}
+
+// Timestamping options
+// see: https://www.kernel.org/doc/Documentation/networking/timestamping.txt
+const (
+	SOF_TIMESTAMPING_TX_HARDWARE  = (1 << 0)  /* Request tx timestamps generated by the network adapter. */
+	SOF_TIMESTAMPING_TX_SOFTWARE  = (1 << 1)  /* Request tx timestamps when data leaves the kernel. */
+	SOF_TIMESTAMPING_RX_HARDWARE  = (1 << 2)  /* Request rx timestamps generated by the network adapter. */
+	SOF_TIMESTAMPING_RX_SOFTWARE  = (1 << 3)  /* Request rx timestamps when data enters the kernel. */
+	SOF_TIMESTAMPING_SOFTWARE     = (1 << 4)  /* Report any software timestamps when available. */
+	SOF_TIMESTAMPING_SYS_HARDWARE = (1 << 5)  /* This option is deprecated and ignored. */
+	SOF_TIMESTAMPING_RAW_HARDWARE = (1 << 6)  /* Report hardware timestamps. */
+	SOF_TIMESTAMPING_OPT_ID       = (1 << 7)  /* Generate a unique identifier along with each packet. */
+	SOF_TIMESTAMPING_TX_SCHED     = (1 << 8)  /* Request tx timestamps prior to entering the packet scheduler. */
+	SOF_TIMESTAMPING_TX_ACK       = (1 << 9)  /* Request tx timestamps when all data in the send buffer has been acknowledged. */
+	SOF_TIMESTAMPING_OPT_CMSG     = (1 << 10) /* Support recv() cmsg for all timestamped packets. */
+	SOF_TIMESTAMPING_OPT_TSONLY   = (1 << 11) /* Applies to transmit timestamps only. */
+	SOF_TIMESTAMPING_OPT_STATS    = (1 << 12) /* Optional stats that are obtained along with the transmit timestamps. */
+	SOF_TIMESTAMPING_OPT_PKTINFO  = (1 << 13) /* Enable the SCM_TIMESTAMPING_PKTINFO control message for incoming packets with hardware timestamps. */
+	SOF_TIMESTAMPING_OPT_TX_SWHW  = (1 << 14) /* Request both hardware and software timestamps for outgoing packets when SOF_TIMESTAMPING_TX_HARDWARE and SOF_TIMESTAMPING_TX_SOFTWARE are enabled at the same time. */
+	SOF_TIMESTAMPING_BIND_PHC     = (1 << 15) /* Bind the socket to a specific PTP Hardware Clock. */
 )
 
 const (
@@ -308,6 +313,7 @@ const (
 	HWTSTAMP_FILTER_NTP_ALL                    /* NTP, UDP, all versions and packet modes */
 )
 
+// TimestampingInformation contains PTP timetstapming information
 type TimestampingInformation struct {
 	Cmd            uint32
 	SoTimestamping uint32 /* SOF_TIMESTAMPING_* bitmask */
@@ -378,6 +384,7 @@ type Pause struct {
 	TxPause uint32
 }
 
+// Ethtool is a struct that contains the file descriptor for the ethtool
 type Ethtool struct {
 	fd int
 }
@@ -386,7 +393,7 @@ type Ethtool struct {
 func goString(s []byte) string {
 	strEnd := bytes.IndexByte(s, 0)
 	if strEnd == -1 {
-		return string(s[:])
+		return string(s)
 	}
 	return string(s[:strEnd])
 }
@@ -484,6 +491,15 @@ func (e *Ethtool) GetCoalesce(intf string) (Coalesce, error) {
 	return coalesce, nil
 }
 
+// SetCoalesce sets the coalesce config for the given interface name.
+func (e *Ethtool) SetCoalesce(intf string, coalesce Coalesce) (Coalesce, error) {
+	coalesce, err := e.setCoalesce(intf, coalesce)
+	if err != nil {
+		return Coalesce{}, err
+	}
+	return coalesce, nil
+}
+
 // GetTimestampingInformation returns the PTP timestamping information for the given interface name.
 func (e *Ethtool) GetTimestampingInformation(intf string) (TimestampingInformation, error) {
 	ts, err := e.getTimestampingInformation(intf)
@@ -516,6 +532,31 @@ func (e *Ethtool) PermAddr(intf string) (string, error) {
 	), nil
 }
 
+// GetWakeOnLan returns the WoL config for the given interface name.
+func (e *Ethtool) GetWakeOnLan(intf string) (WakeOnLan, error) {
+	wol := WakeOnLan{
+		Cmd: ETHTOOL_GWOL,
+	}
+
+	if err := e.ioctl(intf, uintptr(unsafe.Pointer(&wol))); err != nil {
+		return WakeOnLan{}, err
+	}
+
+	return wol, nil
+}
+
+// SetWakeOnLan sets the WoL config for the given interface name and
+// returns the new WoL config.
+func (e *Ethtool) SetWakeOnLan(intf string, wol WakeOnLan) (WakeOnLan, error) {
+	wol.Cmd = ETHTOOL_SWOL
+
+	if err := e.ioctl(intf, uintptr(unsafe.Pointer(&wol))); err != nil {
+		return WakeOnLan{}, err
+	}
+
+	return wol, nil
+}
+
 func (e *Ethtool) ioctl(intf string, data uintptr) error {
 	var name [IFNAMSIZ]byte
 	copy(name[:], []byte(intf))
@@ -579,6 +620,16 @@ func (e *Ethtool) getCoalesce(intf string) (Coalesce, error) {
 	return coalesce, nil
 }
 
+func (e *Ethtool) setCoalesce(intf string, coalesce Coalesce) (Coalesce, error) {
+	coalesce.Cmd = ETHTOOL_SCOALESCE
+
+	if err := e.ioctl(intf, uintptr(unsafe.Pointer(&coalesce))); err != nil {
+		return Coalesce{}, err
+	}
+
+	return coalesce, nil
+}
+
 func (e *Ethtool) getTimestampingInformation(intf string) (TimestampingInformation, error) {
 	ts := TimestampingInformation{
 		Cmd: ETHTOOL_GET_TS_INFO,
@@ -682,6 +733,23 @@ func isFeatureBitSet(blocks [MAX_FEATURE_BLOCKS]ethtoolGetFeaturesBlock, index u
 	return (blocks)[index/32].active&(1<<(index%32)) != 0
 }
 
+// FeatureState contains the state of a feature.
+type FeatureState struct {
+	Available    bool
+	Requested    bool
+	Active       bool
+	NeverChanged bool
+}
+
+func getFeatureStateBits(blocks [MAX_FEATURE_BLOCKS]ethtoolGetFeaturesBlock, index uint) FeatureState {
+	return FeatureState{
+		Available:    (blocks)[index/32].available&(1<<(index%32)) != 0,
+		Requested:    (blocks)[index/32].requested&(1<<(index%32)) != 0,
+		Active:       (blocks)[index/32].active&(1<<(index%32)) != 0,
+		NeverChanged: (blocks)[index/32].never_changed&(1<<(index%32)) != 0,
+	}
+}
+
 func setFeatureBit(blocks *[MAX_FEATURE_BLOCKS]ethtoolSetFeaturesBlock, index uint, value bool) {
 	blockIndex, bitIndex := index/32, index%32
 
@@ -770,6 +838,36 @@ func (e *Ethtool) Features(intf string) (map[string]bool, error) {
 	return result, nil
 }
 
+// FeaturesWithState retrieves features of the given interface name,
+// with extra flags to explain if they can be enabled
+func (e *Ethtool) FeaturesWithState(intf string) (map[string]FeatureState, error) {
+	names, err := e.FeatureNames(intf)
+	if err != nil {
+		return nil, err
+	}
+
+	length := uint32(len(names))
+	if length == 0 {
+		return map[string]FeatureState{}, nil
+	}
+
+	features := ethtoolGfeatures{
+		cmd:  ETHTOOL_GFEATURES,
+		size: (length + 32 - 1) / 32,
+	}
+
+	if err := e.ioctl(intf, uintptr(unsafe.Pointer(&features))); err != nil {
+		return nil, err
+	}
+
+	var result = make(map[string]FeatureState, length)
+	for key, index := range names {
+		result[key] = getFeatureStateBits(features.blocks, index)
+	}
+
+	return result, nil
+}
+
 // Change requests a change in the given device's features.
 func (e *Ethtool) Change(intf string, config map[string]bool) error {
 	names, err := e.FeatureNames(intf)
@@ -857,7 +955,7 @@ func (e *Ethtool) UpdatePrivFlags(intf string, config map[string]bool) error {
 	return e.ioctl(intf, uintptr(unsafe.Pointer(&update)))
 }
 
-// Get state of a link.
+// LinkState get the state of a link.
 func (e *Ethtool) LinkState(intf string) (uint32, error) {
 	x := ethtoolLink{
 		cmd: ETHTOOL_GLINK,
@@ -928,7 +1026,7 @@ func (e *Ethtool) Close() {
 
 // NewEthtool returns a new ethtool handler
 func NewEthtool() (*Ethtool, error) {
-	fd, err := unix.Socket(unix.AF_INET, unix.SOCK_DGRAM, unix.IPPROTO_IP)
+	fd, err := unix.Socket(unix.AF_INET, unix.SOCK_DGRAM|unix.SOCK_CLOEXEC, unix.IPPROTO_IP)
 	if err != nil {
 		return nil, err
 	}
diff --git a/vendor/github.com/safchain/ethtool/ethtool_cmd.go b/vendor/github.com/safchain/ethtool/ethtool_cmd.go
index e94d6dd892f8c0928fba0aec0cce122c6bc8e819..09499fea8c046dd9ae5c1da2343eb78d181291ee 100644
--- a/vendor/github.com/safchain/ethtool/ethtool_cmd.go
+++ b/vendor/github.com/safchain/ethtool/ethtool_cmd.go
@@ -33,7 +33,9 @@ import (
 	"golang.org/x/sys/unix"
 )
 
-type EthtoolCmd struct { /* ethtool.c: struct ethtool_cmd */
+// EthtoolCmd is the Go version of the Linux kerne ethtool_cmd struct
+// see ethtool.c
+type EthtoolCmd struct {
 	Cmd            uint32
 	Supported      uint32
 	Advertising    uint32
@@ -102,10 +104,6 @@ func (f *EthtoolCmd) reflect(retv *map[string]uint64) {
 		default:
 			(*retv)[typeField.Name+"_unknown_type"] = 0
 		}
-
-		// tag := typeField.Tag
-		// fmt.Printf("Field Name: %s,\t Field Value: %v,\t Tag Value: %s\n",
-		//	typeField.Name, valueField.Interface(), tag.Get("tag_name"))
 	}
 }
 
@@ -198,6 +196,7 @@ func (e *Ethtool) CmdGetMapped(intf string) (map[string]uint64, error) {
 	return result, nil
 }
 
+// CmdGetMapped returns the interface settings in a map
 func CmdGetMapped(intf string) (map[string]uint64, error) {
 	e, err := NewEthtool()
 	if err != nil {
diff --git a/vendor/github.com/safchain/ethtool/ethtool_darwin.go b/vendor/github.com/safchain/ethtool/ethtool_darwin.go
new file mode 100644
index 0000000000000000000000000000000000000000..721a214c4a77f0e529fc09fe31bb9835d7200004
--- /dev/null
+++ b/vendor/github.com/safchain/ethtool/ethtool_darwin.go
@@ -0,0 +1,30 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *  http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+package ethtool
+
+var supportedCapabilities = []struct {
+	name  string
+	mask  uint64
+	speed uint64
+}{
+	// no supported capabilities on darwin
+}
diff --git a/vendor/github.com/safchain/ethtool/ethtool_linux.go b/vendor/github.com/safchain/ethtool/ethtool_linux.go
new file mode 100644
index 0000000000000000000000000000000000000000..70fb8d7186f38b9fc43fad82d31223c2bdaeb0c5
--- /dev/null
+++ b/vendor/github.com/safchain/ethtool/ethtool_linux.go
@@ -0,0 +1,56 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *  http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+package ethtool
+
+import (
+	"golang.org/x/sys/unix"
+)
+
+var supportedCapabilities = []struct {
+	name  string
+	mask  uint64
+	speed uint64
+}{
+	{"10baseT_Half", unix.ETHTOOL_LINK_MODE_10baseT_Half_BIT, 10_000_000},
+	{"10baseT_Full", unix.ETHTOOL_LINK_MODE_10baseT_Full_BIT, 10_000_000},
+	{"100baseT_Half", unix.ETHTOOL_LINK_MODE_100baseT_Half_BIT, 100_000_000},
+	{"100baseT_Full", unix.ETHTOOL_LINK_MODE_100baseT_Full_BIT, 100_000_000},
+	{"1000baseT_Half", unix.ETHTOOL_LINK_MODE_1000baseT_Half_BIT, 1_000_000_000},
+	{"1000baseT_Full", unix.ETHTOOL_LINK_MODE_1000baseT_Full_BIT, 1_000_000_000},
+	{"10000baseT_Full", unix.ETHTOOL_LINK_MODE_10000baseT_Full_BIT, 10_000_000_000},
+	{"2500baseT_Full", unix.ETHTOOL_LINK_MODE_2500baseT_Full_BIT, 2_500_000_000},
+	{"1000baseKX_Full", unix.ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, 1_000_000_000},
+	{"10000baseKX_Full", unix.ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, 10_000_000_000},
+	{"10000baseKR_Full", unix.ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, 10_000_000_000},
+	{"10000baseR_FEC", unix.ETHTOOL_LINK_MODE_10000baseR_FEC_BIT, 10_000_000_000},
+	{"20000baseMLD2_Full", unix.ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT, 20_000_000_000},
+	{"20000baseKR2_Full", unix.ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT, 20_000_000_000},
+	{"40000baseKR4_Full", unix.ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, 40_000_000_000},
+	{"40000baseCR4_Full", unix.ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, 40_000_000_000},
+	{"40000baseSR4_Full", unix.ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, 40_000_000_000},
+	{"40000baseLR4_Full", unix.ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, 40_000_000_000},
+	{"56000baseKR4_Full", unix.ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT, 56_000_000_000},
+	{"56000baseCR4_Full", unix.ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT, 56_000_000_000},
+	{"56000baseSR4_Full", unix.ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT, 56_000_000_000},
+	{"56000baseLR4_Full", unix.ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT, 56_000_000_000},
+	{"25000baseCR_Full", unix.ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, 25_000_000_000},
+}
diff --git a/vendor/github.com/urfave/cli/v2/flag_string_slice.go b/vendor/github.com/urfave/cli/v2/flag_string_slice.go
index 28f4798f5589d1cdfe9b118871ef00993a68ab18..66bdf1afcd76c9330a3faddbf50c8cd9c456a2c5 100644
--- a/vendor/github.com/urfave/cli/v2/flag_string_slice.go
+++ b/vendor/github.com/urfave/cli/v2/flag_string_slice.go
@@ -150,8 +150,8 @@ func (f *StringSliceFlag) Apply(set *flag.FlagSet) error {
 		setValue = f.Value.clone()
 	default:
 		setValue = new(StringSlice)
-		setValue.WithSeparatorSpec(f.separator)
 	}
+	setValue.WithSeparatorSpec(f.separator)
 
 	setValue.keepSpace = f.KeepSpace
 
diff --git a/vendor/github.com/urfave/cli/v2/godoc-current.txt b/vendor/github.com/urfave/cli/v2/godoc-current.txt
index 4b620feeb0954c757b2892e6d9857e5d35443795..2f3d76e3198a2e93a39622f2ae1d5399a29d8369 100644
--- a/vendor/github.com/urfave/cli/v2/godoc-current.txt
+++ b/vendor/github.com/urfave/cli/v2/godoc-current.txt
@@ -35,7 +35,7 @@ var AppHelpTemplate = `NAME:
    {{template "helpNameTemplate" .}}
 
 USAGE:
-   {{if .UsageText}}{{wrap .UsageText 3}}{{else}}{{.HelpName}} {{if .VisibleFlags}}[global options]{{end}}{{if .Commands}} command [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}{{if .Args}}[arguments...]{{end}}{{end}}{{end}}{{if .Version}}{{if not .HideVersion}}
+   {{if .UsageText}}{{wrap .UsageText 3}}{{else}}{{.HelpName}} {{if .VisibleFlags}}[global options]{{end}}{{if .Commands}} command [command options]{{end}}{{if .ArgsUsage}} {{.ArgsUsage}}{{else}}{{if .Args}} [arguments...]{{end}}{{end}}{{end}}{{if .Version}}{{if not .HideVersion}}
 
 VERSION:
    {{.Version}}{{end}}{{end}}{{if .Description}}
@@ -136,7 +136,10 @@ var SubcommandHelpTemplate = `NAME:
    {{template "helpNameTemplate" .}}
 
 USAGE:
-   {{if .UsageText}}{{wrap .UsageText 3}}{{else}}{{.HelpName}} {{if .VisibleFlags}}command [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}{{if .Args}}[arguments...]{{end}}{{end}}{{end}}{{if .Description}}
+   {{template "usageTemplate" .}}{{if .Category}}
+
+CATEGORY:
+   {{.Category}}{{end}}{{if .Description}}
 
 DESCRIPTION:
    {{template "descriptionTemplate" .}}{{end}}{{if .VisibleCommands}}
diff --git a/vendor/github.com/urfave/cli/v2/help.go b/vendor/github.com/urfave/cli/v2/help.go
index 640e290452ddc46be50e70b989aea1bf83de1a7b..d27e8ce3859c947d3b5e1a13a32a87d11053010f 100644
--- a/vendor/github.com/urfave/cli/v2/help.go
+++ b/vendor/github.com/urfave/cli/v2/help.go
@@ -54,7 +54,7 @@ var helpCommand = &Command{
 			cCtx = cCtx.parentContext
 		}
 
-		// Case 4. $ app hello foo
+		// Case 4. $ app help foo
 		// foo is the command for which help needs to be shown
 		if argsPresent {
 			return ShowCommandHelp(cCtx, firstArg)
@@ -150,7 +150,7 @@ func printCommandSuggestions(commands []*Command, writer io.Writer) {
 		if command.Hidden {
 			continue
 		}
-		if strings.HasSuffix(os.Getenv("SHELL"), "zsh") {
+		if strings.HasSuffix(os.Getenv("0"), "zsh") {
 			for _, name := range command.Names() {
 				_, _ = fmt.Fprintf(writer, "%s:%s\n", name, command.Usage)
 			}
@@ -248,7 +248,6 @@ func ShowCommandHelpAndExit(c *Context, command string, code int) {
 
 // ShowCommandHelp prints help for the given command
 func ShowCommandHelp(ctx *Context, command string) error {
-
 	commands := ctx.App.Commands
 	if ctx.Command.Subcommands != nil {
 		commands = ctx.Command.Subcommands
@@ -337,7 +336,6 @@ func ShowCommandCompletions(ctx *Context, command string) {
 			DefaultCompleteWithFlags(c)(ctx)
 		}
 	}
-
 }
 
 // printHelpCustom is the default implementation of HelpPrinterCustom.
@@ -345,7 +343,6 @@ func ShowCommandCompletions(ctx *Context, command string) {
 // The customFuncs map will be combined with a default template.FuncMap to
 // allow using arbitrary functions in template rendering.
 func printHelpCustom(out io.Writer, templ string, data interface{}, customFuncs map[string]interface{}) {
-
 	const maxLineLength = 10000
 
 	funcMap := template.FuncMap{
@@ -450,6 +447,15 @@ func checkShellCompleteFlag(a *App, arguments []string) (bool, []string) {
 		return false, arguments
 	}
 
+	for _, arg := range arguments {
+		// If arguments include "--", shell completion is disabled
+		// because after "--" only positional arguments are accepted.
+		// https://unix.stackexchange.com/a/11382
+		if arg == "--" {
+			return false, arguments
+		}
+	}
+
 	return true, arguments[:pos]
 }
 
@@ -499,7 +505,6 @@ func wrap(input string, offset int, wrapAt int) string {
 				ss = append(ss, wrapped)
 			} else {
 				ss = append(ss, padding+wrapped)
-
 			}
 
 		}
diff --git a/vendor/github.com/urfave/cli/v2/template.go b/vendor/github.com/urfave/cli/v2/template.go
index 5748f4c2014013557e1e108b0294d74c46e8d6df..ccb22f1d533dc4418d528b93d70de2c1e68764c2 100644
--- a/vendor/github.com/urfave/cli/v2/template.go
+++ b/vendor/github.com/urfave/cli/v2/template.go
@@ -1,7 +1,7 @@
 package cli
 
 var helpNameTemplate = `{{$v := offset .HelpName 6}}{{wrap .HelpName 3}}{{if .Usage}} - {{wrap .Usage $v}}{{end}}`
-var usageTemplate = `{{if .UsageText}}{{wrap .UsageText 3}}{{else}}{{.HelpName}}{{if .VisibleFlags}} [command options]{{end}}{{if .ArgsUsage}}{{.ArgsUsage}}{{else}}{{if .Args}} [arguments...]{{end}}{{end}}{{end}}`
+var usageTemplate = `{{if .UsageText}}{{wrap .UsageText 3}}{{else}}{{.HelpName}}{{if .VisibleFlags}} [command options]{{end}}{{if .ArgsUsage}} {{.ArgsUsage}}{{else}}{{if .Args}} [arguments...]{{end}}{{end}}{{end}}`
 var descriptionTemplate = `{{wrap .Description 3}}`
 var authorsTemplate = `{{with $length := len .Authors}}{{if ne 1 $length}}S{{end}}{{end}}:
    {{range $index, $author := .Authors}}{{if $index}}
@@ -35,7 +35,7 @@ var AppHelpTemplate = `NAME:
    {{template "helpNameTemplate" .}}
 
 USAGE:
-   {{if .UsageText}}{{wrap .UsageText 3}}{{else}}{{.HelpName}} {{if .VisibleFlags}}[global options]{{end}}{{if .Commands}} command [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}{{if .Args}}[arguments...]{{end}}{{end}}{{end}}{{if .Version}}{{if not .HideVersion}}
+   {{if .UsageText}}{{wrap .UsageText 3}}{{else}}{{.HelpName}} {{if .VisibleFlags}}[global options]{{end}}{{if .Commands}} command [command options]{{end}}{{if .ArgsUsage}} {{.ArgsUsage}}{{else}}{{if .Args}} [arguments...]{{end}}{{end}}{{end}}{{if .Version}}{{if not .HideVersion}}
 
 VERSION:
    {{.Version}}{{end}}{{end}}{{if .Description}}
@@ -83,7 +83,10 @@ var SubcommandHelpTemplate = `NAME:
    {{template "helpNameTemplate" .}}
 
 USAGE:
-   {{if .UsageText}}{{wrap .UsageText 3}}{{else}}{{.HelpName}} {{if .VisibleFlags}}command [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}{{if .Args}}[arguments...]{{end}}{{end}}{{end}}{{if .Description}}
+   {{template "usageTemplate" .}}{{if .Category}}
+
+CATEGORY:
+   {{.Category}}{{end}}{{if .Description}}
 
 DESCRIPTION:
    {{template "descriptionTemplate" .}}{{end}}{{if .VisibleCommands}}
diff --git a/vendor/github.com/vishvananda/netlink/addr_linux.go b/vendor/github.com/vishvananda/netlink/addr_linux.go
index 218ab23796553634acff00204b4a0bcdf61f3a60..01c2306cb28d8f0bbed07d4e832cc97993822c5e 100644
--- a/vendor/github.com/vishvananda/netlink/addr_linux.go
+++ b/vendor/github.com/vishvananda/netlink/addr_linux.go
@@ -1,6 +1,7 @@
 package netlink
 
 import (
+	"errors"
 	"fmt"
 	"net"
 	"strings"
@@ -17,6 +18,7 @@ import (
 //
 // If `addr` is an IPv4 address and the broadcast address is not given, it
 // will be automatically computed based on the IP mask if /30 or larger.
+// If `net.IPv4zero` is given as the broadcast address, broadcast is disabled.
 func AddrAdd(link Link, addr *Addr) error {
 	return pkgHandle.AddrAdd(link, addr)
 }
@@ -27,6 +29,7 @@ func AddrAdd(link Link, addr *Addr) error {
 //
 // If `addr` is an IPv4 address and the broadcast address is not given, it
 // will be automatically computed based on the IP mask if /30 or larger.
+// If `net.IPv4zero` is given as the broadcast address, broadcast is disabled.
 func (h *Handle) AddrAdd(link Link, addr *Addr) error {
 	req := h.newNetlinkRequest(unix.RTM_NEWADDR, unix.NLM_F_CREATE|unix.NLM_F_EXCL|unix.NLM_F_ACK)
 	return h.addrHandle(link, addr, req)
@@ -38,6 +41,7 @@ func (h *Handle) AddrAdd(link Link, addr *Addr) error {
 //
 // If `addr` is an IPv4 address and the broadcast address is not given, it
 // will be automatically computed based on the IP mask if /30 or larger.
+// If `net.IPv4zero` is given as the broadcast address, broadcast is disabled.
 func AddrReplace(link Link, addr *Addr) error {
 	return pkgHandle.AddrReplace(link, addr)
 }
@@ -48,6 +52,7 @@ func AddrReplace(link Link, addr *Addr) error {
 //
 // If `addr` is an IPv4 address and the broadcast address is not given, it
 // will be automatically computed based on the IP mask if /30 or larger.
+// If `net.IPv4zero` is given as the broadcast address, broadcast is disabled.
 func (h *Handle) AddrReplace(link Link, addr *Addr) error {
 	req := h.newNetlinkRequest(unix.RTM_NEWADDR, unix.NLM_F_CREATE|unix.NLM_F_REPLACE|unix.NLM_F_ACK)
 	return h.addrHandle(link, addr, req)
@@ -56,18 +61,13 @@ func (h *Handle) AddrReplace(link Link, addr *Addr) error {
 // AddrDel will delete an IP address from a link device.
 //
 // Equivalent to: `ip addr del $addr dev $link`
-//
-// If `addr` is an IPv4 address and the broadcast address is not given, it
-// will be automatically computed based on the IP mask if /30 or larger.
 func AddrDel(link Link, addr *Addr) error {
 	return pkgHandle.AddrDel(link, addr)
 }
 
 // AddrDel will delete an IP address from a link device.
-// Equivalent to: `ip addr del $addr dev $link`
 //
-// If `addr` is an IPv4 address and the broadcast address is not given, it
-// will be automatically computed based on the IP mask if /30 or larger.
+// Equivalent to: `ip addr del $addr dev $link`
 func (h *Handle) AddrDel(link Link, addr *Addr) error {
 	req := h.newNetlinkRequest(unix.RTM_DELADDR, unix.NLM_F_ACK)
 	return h.addrHandle(link, addr, req)
@@ -141,6 +141,10 @@ func (h *Handle) addrHandle(link Link, addr *Addr, req *nl.NetlinkRequest) error
 			addr.Broadcast = calcBroadcast
 		}
 
+		if net.IPv4zero.Equal(addr.Broadcast) {
+			addr.Broadcast = nil
+		}
+
 		if addr.Broadcast != nil {
 			req.AddData(nl.NewRtAttr(unix.IFA_BROADCAST, addr.Broadcast))
 		}
@@ -169,6 +173,9 @@ func (h *Handle) addrHandle(link Link, addr *Addr, req *nl.NetlinkRequest) error
 // AddrList gets a list of IP addresses in the system.
 // Equivalent to: `ip addr show`.
 // The list can be filtered by link and ip family.
+//
+// If the returned error is [ErrDumpInterrupted], results may be inconsistent
+// or incomplete.
 func AddrList(link Link, family int) ([]Addr, error) {
 	return pkgHandle.AddrList(link, family)
 }
@@ -176,14 +183,17 @@ func AddrList(link Link, family int) ([]Addr, error) {
 // AddrList gets a list of IP addresses in the system.
 // Equivalent to: `ip addr show`.
 // The list can be filtered by link and ip family.
+//
+// If the returned error is [ErrDumpInterrupted], results may be inconsistent
+// or incomplete.
 func (h *Handle) AddrList(link Link, family int) ([]Addr, error) {
 	req := h.newNetlinkRequest(unix.RTM_GETADDR, unix.NLM_F_DUMP)
 	msg := nl.NewIfAddrmsg(family)
 	req.AddData(msg)
 
-	msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWADDR)
-	if err != nil {
-		return nil, err
+	msgs, executeErr := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWADDR)
+	if executeErr != nil && !errors.Is(executeErr, ErrDumpInterrupted) {
+		return nil, executeErr
 	}
 
 	indexFilter := 0
@@ -212,7 +222,7 @@ func (h *Handle) AddrList(link Link, family int) ([]Addr, error) {
 		res = append(res, addr)
 	}
 
-	return res, nil
+	return res, executeErr
 }
 
 func parseAddr(m []byte) (addr Addr, family int, err error) {
diff --git a/vendor/github.com/vishvananda/netlink/bridge_linux.go b/vendor/github.com/vishvananda/netlink/bridge_linux.go
index 6c340b0ce9ad253da408df2536d0478ea9689c26..fa5766b801182166894d706fcb46e592c2ca692e 100644
--- a/vendor/github.com/vishvananda/netlink/bridge_linux.go
+++ b/vendor/github.com/vishvananda/netlink/bridge_linux.go
@@ -1,6 +1,7 @@
 package netlink
 
 import (
+	"errors"
 	"fmt"
 
 	"github.com/vishvananda/netlink/nl"
@@ -9,21 +10,27 @@ import (
 
 // BridgeVlanList gets a map of device id to bridge vlan infos.
 // Equivalent to: `bridge vlan show`
+//
+// If the returned error is [ErrDumpInterrupted], results may be inconsistent
+// or incomplete.
 func BridgeVlanList() (map[int32][]*nl.BridgeVlanInfo, error) {
 	return pkgHandle.BridgeVlanList()
 }
 
 // BridgeVlanList gets a map of device id to bridge vlan infos.
 // Equivalent to: `bridge vlan show`
+//
+// If the returned error is [ErrDumpInterrupted], results may be inconsistent
+// or incomplete.
 func (h *Handle) BridgeVlanList() (map[int32][]*nl.BridgeVlanInfo, error) {
 	req := h.newNetlinkRequest(unix.RTM_GETLINK, unix.NLM_F_DUMP)
 	msg := nl.NewIfInfomsg(unix.AF_BRIDGE)
 	req.AddData(msg)
 	req.AddData(nl.NewRtAttr(unix.IFLA_EXT_MASK, nl.Uint32Attr(uint32(nl.RTEXT_FILTER_BRVLAN))))
 
-	msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWLINK)
-	if err != nil {
-		return nil, err
+	msgs, executeErr := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWLINK)
+	if executeErr != nil && !errors.Is(executeErr, ErrDumpInterrupted) {
+		return nil, executeErr
 	}
 	ret := make(map[int32][]*nl.BridgeVlanInfo)
 	for _, m := range msgs {
@@ -51,7 +58,7 @@ func (h *Handle) BridgeVlanList() (map[int32][]*nl.BridgeVlanInfo, error) {
 			}
 		}
 	}
-	return ret, nil
+	return ret, executeErr
 }
 
 // BridgeVlanAdd adds a new vlan filter entry
diff --git a/vendor/github.com/vishvananda/netlink/chain_linux.go b/vendor/github.com/vishvananda/netlink/chain_linux.go
index d9f441613cc7c21520a9f8f5477d0127c1e7dc05..5008e7101f5097b4b5a6db6357d2129888301997 100644
--- a/vendor/github.com/vishvananda/netlink/chain_linux.go
+++ b/vendor/github.com/vishvananda/netlink/chain_linux.go
@@ -1,6 +1,8 @@
 package netlink
 
 import (
+	"errors"
+
 	"github.com/vishvananda/netlink/nl"
 	"golang.org/x/sys/unix"
 )
@@ -56,6 +58,9 @@ func (h *Handle) chainModify(cmd, flags int, link Link, chain Chain) error {
 // ChainList gets a list of chains in the system.
 // Equivalent to: `tc chain list`.
 // The list can be filtered by link.
+//
+// If the returned error is [ErrDumpInterrupted], results may be inconsistent
+// or incomplete.
 func ChainList(link Link, parent uint32) ([]Chain, error) {
 	return pkgHandle.ChainList(link, parent)
 }
@@ -63,6 +68,9 @@ func ChainList(link Link, parent uint32) ([]Chain, error) {
 // ChainList gets a list of chains in the system.
 // Equivalent to: `tc chain list`.
 // The list can be filtered by link.
+//
+// If the returned error is [ErrDumpInterrupted], results may be inconsistent
+// or incomplete.
 func (h *Handle) ChainList(link Link, parent uint32) ([]Chain, error) {
 	req := h.newNetlinkRequest(unix.RTM_GETCHAIN, unix.NLM_F_DUMP)
 	index := int32(0)
@@ -78,9 +86,9 @@ func (h *Handle) ChainList(link Link, parent uint32) ([]Chain, error) {
 	}
 	req.AddData(msg)
 
-	msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWCHAIN)
-	if err != nil {
-		return nil, err
+	msgs, executeErr := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWCHAIN)
+	if executeErr != nil && !errors.Is(executeErr, ErrDumpInterrupted) {
+		return nil, executeErr
 	}
 
 	var res []Chain
@@ -108,5 +116,5 @@ func (h *Handle) ChainList(link Link, parent uint32) ([]Chain, error) {
 		res = append(res, chain)
 	}
 
-	return res, nil
+	return res, executeErr
 }
diff --git a/vendor/github.com/vishvananda/netlink/class_linux.go b/vendor/github.com/vishvananda/netlink/class_linux.go
index a82eb09de2433c62587be7753902d4eddf136c7f..08fb16c2bc228a24b61ede5693b706212bf71898 100644
--- a/vendor/github.com/vishvananda/netlink/class_linux.go
+++ b/vendor/github.com/vishvananda/netlink/class_linux.go
@@ -201,14 +201,20 @@ func classPayload(req *nl.NetlinkRequest, class Class) error {
 
 // ClassList gets a list of classes in the system.
 // Equivalent to: `tc class show`.
+//
 // Generally returns nothing if link and parent are not specified.
+// If the returned error is [ErrDumpInterrupted], results may be inconsistent
+// or incomplete.
 func ClassList(link Link, parent uint32) ([]Class, error) {
 	return pkgHandle.ClassList(link, parent)
 }
 
 // ClassList gets a list of classes in the system.
 // Equivalent to: `tc class show`.
+//
 // Generally returns nothing if link and parent are not specified.
+// If the returned error is [ErrDumpInterrupted], results may be inconsistent
+// or incomplete.
 func (h *Handle) ClassList(link Link, parent uint32) ([]Class, error) {
 	req := h.newNetlinkRequest(unix.RTM_GETTCLASS, unix.NLM_F_DUMP)
 	msg := &nl.TcMsg{
@@ -222,9 +228,9 @@ func (h *Handle) ClassList(link Link, parent uint32) ([]Class, error) {
 	}
 	req.AddData(msg)
 
-	msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWTCLASS)
-	if err != nil {
-		return nil, err
+	msgs, executeErr := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWTCLASS)
+	if executeErr != nil && !errors.Is(executeErr, ErrDumpInterrupted) {
+		return nil, executeErr
 	}
 
 	var res []Class
@@ -295,7 +301,7 @@ func (h *Handle) ClassList(link Link, parent uint32) ([]Class, error) {
 		res = append(res, class)
 	}
 
-	return res, nil
+	return res, executeErr
 }
 
 func parseHtbClassData(class Class, data []syscall.NetlinkRouteAttr) (bool, error) {
diff --git a/vendor/github.com/vishvananda/netlink/conntrack_linux.go b/vendor/github.com/vishvananda/netlink/conntrack_linux.go
index ba022453b3b1de2d277dd804afa052c79c932ec7..b3d354d75e7b29071b3f856c1540a1020d7d4bcd 100644
--- a/vendor/github.com/vishvananda/netlink/conntrack_linux.go
+++ b/vendor/github.com/vishvananda/netlink/conntrack_linux.go
@@ -5,6 +5,7 @@ import (
 	"encoding/binary"
 	"errors"
 	"fmt"
+	"io/fs"
 	"net"
 	"time"
 
@@ -44,6 +45,9 @@ type InetFamily uint8
 
 // ConntrackTableList returns the flow list of a table of a specific family
 // conntrack -L [table] [options]          List conntrack or expectation table
+//
+// If the returned error is [ErrDumpInterrupted], results may be inconsistent
+// or incomplete.
 func ConntrackTableList(table ConntrackTableType, family InetFamily) ([]*ConntrackFlow, error) {
 	return pkgHandle.ConntrackTableList(table, family)
 }
@@ -70,7 +74,7 @@ func ConntrackUpdate(table ConntrackTableType, family InetFamily, flow *Conntrac
 // ConntrackDeleteFilter deletes entries on the specified table on the base of the filter
 // conntrack -D [table] parameters         Delete conntrack or expectation
 //
-// Deprecated: use [ConntrackDeleteFilter] instead.
+// Deprecated: use [ConntrackDeleteFilters] instead.
 func ConntrackDeleteFilter(table ConntrackTableType, family InetFamily, filter CustomConntrackFilter) (uint, error) {
 	return pkgHandle.ConntrackDeleteFilters(table, family, filter)
 }
@@ -83,10 +87,13 @@ func ConntrackDeleteFilters(table ConntrackTableType, family InetFamily, filters
 
 // ConntrackTableList returns the flow list of a table of a specific family using the netlink handle passed
 // conntrack -L [table] [options]          List conntrack or expectation table
+//
+// If the returned error is [ErrDumpInterrupted], results may be inconsistent
+// or incomplete.
 func (h *Handle) ConntrackTableList(table ConntrackTableType, family InetFamily) ([]*ConntrackFlow, error) {
-	res, err := h.dumpConntrackTable(table, family)
-	if err != nil {
-		return nil, err
+	res, executeErr := h.dumpConntrackTable(table, family)
+	if executeErr != nil && !errors.Is(executeErr, ErrDumpInterrupted) {
+		return nil, executeErr
 	}
 
 	// Deserialize all the flows
@@ -95,7 +102,7 @@ func (h *Handle) ConntrackTableList(table ConntrackTableType, family InetFamily)
 		result = append(result, parseRawData(dataRaw))
 	}
 
-	return result, nil
+	return result, executeErr
 }
 
 // ConntrackTableFlush flushes all the flows of a specified table using the netlink handle passed
@@ -152,11 +159,18 @@ func (h *Handle) ConntrackDeleteFilter(table ConntrackTableType, family InetFami
 // ConntrackDeleteFilters deletes entries on the specified table matching any of the specified filters using the netlink handle passed
 // conntrack -D [table] parameters         Delete conntrack or expectation
 func (h *Handle) ConntrackDeleteFilters(table ConntrackTableType, family InetFamily, filters ...CustomConntrackFilter) (uint, error) {
+	var finalErr error
 	res, err := h.dumpConntrackTable(table, family)
 	if err != nil {
-		return 0, err
+		if !errors.Is(err, ErrDumpInterrupted) {
+			return 0, err
+		}
+		// This allows us to at least do a best effort to try to clean the
+		// entries matching the filter.
+		finalErr = err
 	}
 
+	var totalFilterErrors int
 	var matched uint
 	for _, dataRaw := range res {
 		flow := parseRawData(dataRaw)
@@ -165,15 +179,20 @@ func (h *Handle) ConntrackDeleteFilters(table ConntrackTableType, family InetFam
 				req2 := h.newConntrackRequest(table, family, nl.IPCTNL_MSG_CT_DELETE, unix.NLM_F_ACK)
 				// skip the first 4 byte that are the netfilter header, the newConntrackRequest is adding it already
 				req2.AddRawData(dataRaw[4:])
-				req2.Execute(unix.NETLINK_NETFILTER, 0)
-				matched++
-				// flow is already deleted, no need to match on other filters and continue to the next flow.
-				break
+				if _, err = req2.Execute(unix.NETLINK_NETFILTER, 0); err == nil || errors.Is(err, fs.ErrNotExist) {
+					matched++
+					// flow is already deleted, no need to match on other filters and continue to the next flow.
+					break
+				} else {
+					totalFilterErrors++
+				}
 			}
 		}
 	}
-
-	return matched, nil
+	if totalFilterErrors > 0 {
+		finalErr = errors.Join(finalErr, fmt.Errorf("failed to delete %d conntrack flows with %d filters", totalFilterErrors, len(filters)))
+	}
+	return matched, finalErr
 }
 
 func (h *Handle) newConntrackRequest(table ConntrackTableType, family InetFamily, operation, flags int) *nl.NetlinkRequest {
diff --git a/vendor/github.com/vishvananda/netlink/conntrack_unspecified.go b/vendor/github.com/vishvananda/netlink/conntrack_unspecified.go
index 0bfdf422d1e3d0c463074f0dc7f1a3e369f37280..0049048dc34e07e6c6590a22789e70d2f023900f 100644
--- a/vendor/github.com/vishvananda/netlink/conntrack_unspecified.go
+++ b/vendor/github.com/vishvananda/netlink/conntrack_unspecified.go
@@ -33,7 +33,7 @@ func ConntrackTableFlush(table ConntrackTableType) error {
 // ConntrackDeleteFilter deletes entries on the specified table on the base of the filter
 // conntrack -D [table] parameters         Delete conntrack or expectation
 //
-// Deprecated: use [ConntrackDeleteFilter] instead.
+// Deprecated: use [ConntrackDeleteFilters] instead.
 func ConntrackDeleteFilter(table ConntrackTableType, family InetFamily, filter *ConntrackFilter) (uint, error) {
 	return 0, ErrNotImplemented
 }
diff --git a/vendor/github.com/vishvananda/netlink/devlink_linux.go b/vendor/github.com/vishvananda/netlink/devlink_linux.go
index d98801dbbe5e6b34e3bd205ba737506e804a5090..45d8ee4b6b08f344a21012427b7b3fe62ca26b07 100644
--- a/vendor/github.com/vishvananda/netlink/devlink_linux.go
+++ b/vendor/github.com/vishvananda/netlink/devlink_linux.go
@@ -1,6 +1,7 @@
 package netlink
 
 import (
+	"errors"
 	"fmt"
 	"net"
 	"strings"
@@ -466,6 +467,8 @@ func (h *Handle) getEswitchAttrs(family *GenlFamily, dev *DevlinkDevice) {
 
 // DevLinkGetDeviceList provides a pointer to devlink devices and nil error,
 // otherwise returns an error code.
+// If the returned error is [ErrDumpInterrupted], results may be inconsistent
+// or incomplete.
 func (h *Handle) DevLinkGetDeviceList() ([]*DevlinkDevice, error) {
 	f, err := h.GenlFamilyGet(nl.GENL_DEVLINK_NAME)
 	if err != nil {
@@ -478,9 +481,9 @@ func (h *Handle) DevLinkGetDeviceList() ([]*DevlinkDevice, error) {
 	req := h.newNetlinkRequest(int(f.ID),
 		unix.NLM_F_REQUEST|unix.NLM_F_ACK|unix.NLM_F_DUMP)
 	req.AddData(msg)
-	msgs, err := req.Execute(unix.NETLINK_GENERIC, 0)
-	if err != nil {
-		return nil, err
+	msgs, executeErr := req.Execute(unix.NETLINK_GENERIC, 0)
+	if executeErr != nil && !errors.Is(executeErr, ErrDumpInterrupted) {
+		return nil, executeErr
 	}
 	devices, err := parseDevLinkDeviceList(msgs)
 	if err != nil {
@@ -489,11 +492,14 @@ func (h *Handle) DevLinkGetDeviceList() ([]*DevlinkDevice, error) {
 	for _, d := range devices {
 		h.getEswitchAttrs(f, d)
 	}
-	return devices, nil
+	return devices, executeErr
 }
 
 // DevLinkGetDeviceList provides a pointer to devlink devices and nil error,
 // otherwise returns an error code.
+//
+// If the returned error is [ErrDumpInterrupted], results may be inconsistent
+// or incomplete.
 func DevLinkGetDeviceList() ([]*DevlinkDevice, error) {
 	return pkgHandle.DevLinkGetDeviceList()
 }
@@ -646,6 +652,8 @@ func parseDevLinkAllPortList(msgs [][]byte) ([]*DevlinkPort, error) {
 
 // DevLinkGetPortList provides a pointer to devlink ports and nil error,
 // otherwise returns an error code.
+// If the returned error is [ErrDumpInterrupted], results may be inconsistent
+// or incomplete.
 func (h *Handle) DevLinkGetAllPortList() ([]*DevlinkPort, error) {
 	f, err := h.GenlFamilyGet(nl.GENL_DEVLINK_NAME)
 	if err != nil {
@@ -658,19 +666,21 @@ func (h *Handle) DevLinkGetAllPortList() ([]*DevlinkPort, error) {
 	req := h.newNetlinkRequest(int(f.ID),
 		unix.NLM_F_REQUEST|unix.NLM_F_ACK|unix.NLM_F_DUMP)
 	req.AddData(msg)
-	msgs, err := req.Execute(unix.NETLINK_GENERIC, 0)
-	if err != nil {
-		return nil, err
+	msgs, executeErr := req.Execute(unix.NETLINK_GENERIC, 0)
+	if executeErr != nil && !errors.Is(executeErr, ErrDumpInterrupted) {
+		return nil, executeErr
 	}
 	ports, err := parseDevLinkAllPortList(msgs)
 	if err != nil {
 		return nil, err
 	}
-	return ports, nil
+	return ports, executeErr
 }
 
 // DevLinkGetPortList provides a pointer to devlink ports and nil error,
 // otherwise returns an error code.
+// If the returned error is [ErrDumpInterrupted], results may be inconsistent
+// or incomplete.
 func DevLinkGetAllPortList() ([]*DevlinkPort, error) {
 	return pkgHandle.DevLinkGetAllPortList()
 }
@@ -738,15 +748,18 @@ func (h *Handle) DevlinkGetDeviceResources(bus string, device string) (*DevlinkR
 
 // DevlinkGetDeviceParams returns parameters for devlink device
 // Equivalent to: `devlink dev param show <bus>/<device>`
+//
+// If the returned error is [ErrDumpInterrupted], results may be inconsistent
+// or incomplete.
 func (h *Handle) DevlinkGetDeviceParams(bus string, device string) ([]*DevlinkParam, error) {
 	_, req, err := h.createCmdReq(nl.DEVLINK_CMD_PARAM_GET, bus, device)
 	if err != nil {
 		return nil, err
 	}
 	req.Flags |= unix.NLM_F_DUMP
-	respmsg, err := req.Execute(unix.NETLINK_GENERIC, 0)
-	if err != nil {
-		return nil, err
+	respmsg, executeErr := req.Execute(unix.NETLINK_GENERIC, 0)
+	if executeErr != nil && !errors.Is(executeErr, ErrDumpInterrupted) {
+		return nil, executeErr
 	}
 	var params []*DevlinkParam
 	for _, m := range respmsg {
@@ -761,11 +774,14 @@ func (h *Handle) DevlinkGetDeviceParams(bus string, device string) ([]*DevlinkPa
 		params = append(params, p)
 	}
 
-	return params, nil
+	return params, executeErr
 }
 
 // DevlinkGetDeviceParams returns parameters for devlink device
 // Equivalent to: `devlink dev param show <bus>/<device>`
+//
+// If the returned error is [ErrDumpInterrupted], results may be inconsistent
+// or incomplete.
 func DevlinkGetDeviceParams(bus string, device string) ([]*DevlinkParam, error) {
 	return pkgHandle.DevlinkGetDeviceParams(bus, device)
 }
diff --git a/vendor/github.com/vishvananda/netlink/filter.go b/vendor/github.com/vishvananda/netlink/filter.go
index 84e1ca7a49ba8b740fa3162dfd040b9ca3fb0223..a722e0a27baa54bc952ec2914e2e91013a29f041 100644
--- a/vendor/github.com/vishvananda/netlink/filter.go
+++ b/vendor/github.com/vishvananda/netlink/filter.go
@@ -231,6 +231,35 @@ func NewCsumAction() *CsumAction {
 	}
 }
 
+type VlanAct int8
+
+type VlanAction struct {
+	ActionAttrs
+	Action VlanAct
+	VlanID uint16
+}
+
+const (
+	TCA_VLAN_ACT_POP  VlanAct = 1
+	TCA_VLAN_ACT_PUSH VlanAct = 2
+)
+
+func (action *VlanAction) Type() string {
+	return "vlan"
+}
+
+func (action *VlanAction) Attrs() *ActionAttrs {
+	return &action.ActionAttrs
+}
+
+func NewVlanAction() *VlanAction {
+	return &VlanAction{
+		ActionAttrs: ActionAttrs{
+			Action: TC_ACT_PIPE,
+		},
+	}
+}
+
 type MirredAct uint8
 
 func (a MirredAct) String() string {
diff --git a/vendor/github.com/vishvananda/netlink/filter_linux.go b/vendor/github.com/vishvananda/netlink/filter_linux.go
index 87cd18f8e41996ababb1303cb30e00478d1d9d58..404e50d524eabd05ff4197ee430aa3ca67cbafdd 100644
--- a/vendor/github.com/vishvananda/netlink/filter_linux.go
+++ b/vendor/github.com/vishvananda/netlink/filter_linux.go
@@ -65,6 +65,9 @@ type Flower struct {
 	EncSrcIPMask  net.IPMask
 	EncDestPort   uint16
 	EncKeyId      uint32
+	SrcMac        net.HardwareAddr
+	DestMac       net.HardwareAddr
+	VlanId        uint16
 	SkipHw        bool
 	SkipSw        bool
 	IPProto       *nl.IPProto
@@ -135,6 +138,15 @@ func (filter *Flower) encode(parent *nl.RtAttr) error {
 	if filter.EncKeyId != 0 {
 		parent.AddRtAttr(nl.TCA_FLOWER_KEY_ENC_KEY_ID, htonl(filter.EncKeyId))
 	}
+	if filter.SrcMac != nil {
+		parent.AddRtAttr(nl.TCA_FLOWER_KEY_ETH_SRC, filter.SrcMac)
+	}
+	if filter.DestMac != nil {
+		parent.AddRtAttr(nl.TCA_FLOWER_KEY_ETH_DST, filter.DestMac)
+	}
+	if filter.VlanId != 0 {
+		parent.AddRtAttr(nl.TCA_FLOWER_KEY_VLAN_ID, nl.Uint16Attr(filter.VlanId))
+	}
 	if filter.IPProto != nil {
 		ipproto := *filter.IPProto
 		parent.AddRtAttr(nl.TCA_FLOWER_KEY_IP_PROTO, ipproto.Serialize())
@@ -201,6 +213,13 @@ func (filter *Flower) decode(data []syscall.NetlinkRouteAttr) error {
 			filter.EncDestPort = ntohs(datum.Value)
 		case nl.TCA_FLOWER_KEY_ENC_KEY_ID:
 			filter.EncKeyId = ntohl(datum.Value)
+		case nl.TCA_FLOWER_KEY_ETH_SRC:
+			filter.SrcMac = datum.Value
+		case nl.TCA_FLOWER_KEY_ETH_DST:
+			filter.DestMac = datum.Value
+		case nl.TCA_FLOWER_KEY_VLAN_ID:
+			filter.VlanId = native.Uint16(datum.Value[0:2])
+			filter.EthType = unix.ETH_P_8021Q
 		case nl.TCA_FLOWER_KEY_IP_PROTO:
 			val := new(nl.IPProto)
 			*val = nl.IPProto(datum.Value[0])
@@ -405,14 +424,20 @@ func (h *Handle) filterModify(filter Filter, proto, flags int) error {
 
 // FilterList gets a list of filters in the system.
 // Equivalent to: `tc filter show`.
+//
 // Generally returns nothing if link and parent are not specified.
+// If the returned error is [ErrDumpInterrupted], results may be inconsistent
+// or incomplete.
 func FilterList(link Link, parent uint32) ([]Filter, error) {
 	return pkgHandle.FilterList(link, parent)
 }
 
 // FilterList gets a list of filters in the system.
 // Equivalent to: `tc filter show`.
+//
 // Generally returns nothing if link and parent are not specified.
+// If the returned error is [ErrDumpInterrupted], results may be inconsistent
+// or incomplete.
 func (h *Handle) FilterList(link Link, parent uint32) ([]Filter, error) {
 	req := h.newNetlinkRequest(unix.RTM_GETTFILTER, unix.NLM_F_DUMP)
 	msg := &nl.TcMsg{
@@ -426,9 +451,9 @@ func (h *Handle) FilterList(link Link, parent uint32) ([]Filter, error) {
 	}
 	req.AddData(msg)
 
-	msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWTFILTER)
-	if err != nil {
-		return nil, err
+	msgs, executeErr := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWTFILTER)
+	if executeErr != nil && !errors.Is(executeErr, ErrDumpInterrupted) {
+		return nil, executeErr
 	}
 
 	var res []Filter
@@ -516,7 +541,7 @@ func (h *Handle) FilterList(link Link, parent uint32) ([]Filter, error) {
 		}
 	}
 
-	return res, nil
+	return res, executeErr
 }
 
 func toTcGen(attrs *ActionAttrs, tcgen *nl.TcGen) {
@@ -616,6 +641,22 @@ func EncodeActions(attr *nl.RtAttr, actions []Action) error {
 			}
 			toTcGen(action.Attrs(), &mirred.TcGen)
 			aopts.AddRtAttr(nl.TCA_MIRRED_PARMS, mirred.Serialize())
+		case *VlanAction:
+			table := attr.AddRtAttr(tabIndex, nil)
+			tabIndex++
+			table.AddRtAttr(nl.TCA_ACT_KIND, nl.ZeroTerminated("vlan"))
+			aopts := table.AddRtAttr(nl.TCA_ACT_OPTIONS, nil)
+			vlan := nl.TcVlan{
+				Action: int32(action.Action),
+			}
+			toTcGen(action.Attrs(), &vlan.TcGen)
+			aopts.AddRtAttr(nl.TCA_VLAN_PARMS, vlan.Serialize())
+			if action.Action == TCA_VLAN_ACT_PUSH && action.VlanID == 0 {
+				return fmt.Errorf("vlan id is required for push action")
+			}
+			if action.VlanID != 0 {
+				aopts.AddRtAttr(nl.TCA_VLAN_PUSH_VLAN_ID, nl.Uint16Attr(action.VlanID))
+			}
 		case *TunnelKeyAction:
 			table := attr.AddRtAttr(tabIndex, nil)
 			tabIndex++
@@ -786,6 +827,8 @@ func parseActions(tables []syscall.NetlinkRouteAttr) ([]Action, error) {
 					action = &CsumAction{}
 				case "gact":
 					action = &GenericAction{}
+				case "vlan":
+					action = &VlanAction{}
 				case "tunnel_key":
 					action = &TunnelKeyAction{}
 				case "skbedit":
@@ -816,7 +859,17 @@ func parseActions(tables []syscall.NetlinkRouteAttr) ([]Action, error) {
 							tcTs := nl.DeserializeTcf(adatum.Value)
 							actionTimestamp = toTimeStamp(tcTs)
 						}
-
+					case "vlan":
+						switch adatum.Attr.Type {
+						case nl.TCA_VLAN_PARMS:
+							vlan := *nl.DeserializeTcVlan(adatum.Value)
+							action.(*VlanAction).ActionAttrs = ActionAttrs{}
+							toAttrs(&vlan.TcGen, action.Attrs())
+							action.(*VlanAction).Action = VlanAct(vlan.Action)
+						case nl.TCA_VLAN_PUSH_VLAN_ID:
+							vlanId := native.Uint16(adatum.Value[0:2])
+							action.(*VlanAction).VlanID = vlanId
+						}
 					case "tunnel_key":
 						switch adatum.Attr.Type {
 						case nl.TCA_TUNNEL_KEY_PARMS:
@@ -920,9 +973,11 @@ func parseActions(tables []syscall.NetlinkRouteAttr) ([]Action, error) {
 				actionnStatistic = (*ActionStatistic)(s)
 			}
 		}
-		action.Attrs().Statistics = actionnStatistic
-		action.Attrs().Timestamp = actionTimestamp
-		actions = append(actions, action)
+		if action != nil {
+			action.Attrs().Statistics = actionnStatistic
+			action.Attrs().Timestamp = actionTimestamp
+			actions = append(actions, action)
+		}
 	}
 	return actions, nil
 }
diff --git a/vendor/github.com/vishvananda/netlink/fou.go b/vendor/github.com/vishvananda/netlink/fou.go
index 71e73c37a0a33753b5df5d0d4e82d40ead8d4864..ea9f6cf6737100567510e58ba6a4f00d3739e883 100644
--- a/vendor/github.com/vishvananda/netlink/fou.go
+++ b/vendor/github.com/vishvananda/netlink/fou.go
@@ -1,16 +1,7 @@
 package netlink
 
 import (
-	"errors"
-)
-
-var (
-	// ErrAttrHeaderTruncated is returned when a netlink attribute's header is
-	// truncated.
-	ErrAttrHeaderTruncated = errors.New("attribute header truncated")
-	// ErrAttrBodyTruncated is returned when a netlink attribute's body is
-	// truncated.
-	ErrAttrBodyTruncated = errors.New("attribute body truncated")
+	"net"
 )
 
 type Fou struct {
@@ -18,4 +9,8 @@ type Fou struct {
 	Port      int
 	Protocol  int
 	EncapType int
+	Local     net.IP
+	Peer      net.IP
+	PeerPort  int
+	IfIndex   int
 }
diff --git a/vendor/github.com/vishvananda/netlink/fou_linux.go b/vendor/github.com/vishvananda/netlink/fou_linux.go
index ed55b2b790d4790d0b91f4ceb10341638095359d..7645a5a5c20cf871f2389b6110c4f63bd2e8d203 100644
--- a/vendor/github.com/vishvananda/netlink/fou_linux.go
+++ b/vendor/github.com/vishvananda/netlink/fou_linux.go
@@ -1,3 +1,4 @@
+//go:build linux
 // +build linux
 
 package netlink
@@ -5,6 +6,8 @@ package netlink
 import (
 	"encoding/binary"
 	"errors"
+	"log"
+	"net"
 
 	"github.com/vishvananda/netlink/nl"
 	"golang.org/x/sys/unix"
@@ -29,6 +32,12 @@ const (
 	FOU_ATTR_IPPROTO
 	FOU_ATTR_TYPE
 	FOU_ATTR_REMCSUM_NOPARTIAL
+	FOU_ATTR_LOCAL_V4
+	FOU_ATTR_LOCAL_V6
+	FOU_ATTR_PEER_V4
+	FOU_ATTR_PEER_V6
+	FOU_ATTR_PEER_PORT
+	FOU_ATTR_IFINDEX
 	FOU_ATTR_MAX = FOU_ATTR_REMCSUM_NOPARTIAL
 )
 
@@ -128,10 +137,14 @@ func (h *Handle) FouDel(f Fou) error {
 	return nil
 }
 
+// If the returned error is [ErrDumpInterrupted], results may be inconsistent
+// or incomplete.
 func FouList(fam int) ([]Fou, error) {
 	return pkgHandle.FouList(fam)
 }
 
+// If the returned error is [ErrDumpInterrupted], results may be inconsistent
+// or incomplete.
 func (h *Handle) FouList(fam int) ([]Fou, error) {
 	fam_id, err := FouFamilyId()
 	if err != nil {
@@ -150,9 +163,9 @@ func (h *Handle) FouList(fam int) ([]Fou, error) {
 
 	req.AddRawData(raw)
 
-	msgs, err := req.Execute(unix.NETLINK_GENERIC, 0)
-	if err != nil {
-		return nil, err
+	msgs, executeErr := req.Execute(unix.NETLINK_GENERIC, 0)
+	if executeErr != nil && !errors.Is(err, ErrDumpInterrupted) {
+		return nil, executeErr
 	}
 
 	fous := make([]Fou, 0, len(msgs))
@@ -165,45 +178,32 @@ func (h *Handle) FouList(fam int) ([]Fou, error) {
 		fous = append(fous, f)
 	}
 
-	return fous, nil
+	return fous, executeErr
 }
 
 func deserializeFouMsg(msg []byte) (Fou, error) {
-	// we'll skip to byte 4 to first attribute
-	msg = msg[3:]
-	var shift int
 	fou := Fou{}
 
-	for {
-		// attribute header is at least 16 bits
-		if len(msg) < 4 {
-			return fou, ErrAttrHeaderTruncated
-		}
-
-		lgt := int(binary.BigEndian.Uint16(msg[0:2]))
-		if len(msg) < lgt+4 {
-			return fou, ErrAttrBodyTruncated
-		}
-		attr := binary.BigEndian.Uint16(msg[2:4])
-
-		shift = lgt + 3
-		switch attr {
+	for attr := range nl.ParseAttributes(msg[4:]) {
+		switch attr.Type {
 		case FOU_ATTR_AF:
-			fou.Family = int(msg[5])
+			fou.Family = int(attr.Value[0])
 		case FOU_ATTR_PORT:
-			fou.Port = int(binary.BigEndian.Uint16(msg[5:7]))
-			// port is 2 bytes
-			shift = lgt + 2
+			fou.Port = int(networkOrder.Uint16(attr.Value))
 		case FOU_ATTR_IPPROTO:
-			fou.Protocol = int(msg[5])
+			fou.Protocol = int(attr.Value[0])
 		case FOU_ATTR_TYPE:
-			fou.EncapType = int(msg[5])
-		}
-
-		msg = msg[shift:]
-
-		if len(msg) < 4 {
-			break
+			fou.EncapType = int(attr.Value[0])
+		case FOU_ATTR_LOCAL_V4, FOU_ATTR_LOCAL_V6:
+			fou.Local = net.IP(attr.Value)
+		case FOU_ATTR_PEER_V4, FOU_ATTR_PEER_V6:
+			fou.Peer = net.IP(attr.Value)
+		case FOU_ATTR_PEER_PORT:
+			fou.PeerPort = int(networkOrder.Uint16(attr.Value))
+		case FOU_ATTR_IFINDEX:
+			fou.IfIndex = int(native.Uint16(attr.Value))
+		default:
+			log.Printf("unknown fou attribute from kernel: %+v %v", attr, attr.Type&nl.NLA_TYPE_MASK)
 		}
 	}
 
diff --git a/vendor/github.com/vishvananda/netlink/fou_unspecified.go b/vendor/github.com/vishvananda/netlink/fou_unspecified.go
index 3a8365bfe6232d22fbbb05b141439c14615193f6..7e550151adc5272ff2a7ccc5bcf502e65431eb43 100644
--- a/vendor/github.com/vishvananda/netlink/fou_unspecified.go
+++ b/vendor/github.com/vishvananda/netlink/fou_unspecified.go
@@ -1,3 +1,4 @@
+//go:build !linux
 // +build !linux
 
 package netlink
diff --git a/vendor/github.com/vishvananda/netlink/genetlink_linux.go b/vendor/github.com/vishvananda/netlink/genetlink_linux.go
index 772e5834a26a731b13939d27919861612e62f200..7bdaad97b4732b8d00446e28eeca4c2b1831ce76 100644
--- a/vendor/github.com/vishvananda/netlink/genetlink_linux.go
+++ b/vendor/github.com/vishvananda/netlink/genetlink_linux.go
@@ -1,6 +1,7 @@
 package netlink
 
 import (
+	"errors"
 	"fmt"
 	"syscall"
 
@@ -126,6 +127,8 @@ func parseFamilies(msgs [][]byte) ([]*GenlFamily, error) {
 	return families, nil
 }
 
+// If the returned error is [ErrDumpInterrupted], results may be inconsistent
+// or incomplete.
 func (h *Handle) GenlFamilyList() ([]*GenlFamily, error) {
 	msg := &nl.Genlmsg{
 		Command: nl.GENL_CTRL_CMD_GETFAMILY,
@@ -133,13 +136,19 @@ func (h *Handle) GenlFamilyList() ([]*GenlFamily, error) {
 	}
 	req := h.newNetlinkRequest(nl.GENL_ID_CTRL, unix.NLM_F_DUMP)
 	req.AddData(msg)
-	msgs, err := req.Execute(unix.NETLINK_GENERIC, 0)
+	msgs, executeErr := req.Execute(unix.NETLINK_GENERIC, 0)
+	if executeErr != nil && !errors.Is(executeErr, ErrDumpInterrupted) {
+		return nil, executeErr
+	}
+	families, err := parseFamilies(msgs)
 	if err != nil {
 		return nil, err
 	}
-	return parseFamilies(msgs)
+	return families, executeErr
 }
 
+// If the returned error is [ErrDumpInterrupted], results may be inconsistent
+// or incomplete.
 func GenlFamilyList() ([]*GenlFamily, error) {
 	return pkgHandle.GenlFamilyList()
 }
diff --git a/vendor/github.com/vishvananda/netlink/gtp_linux.go b/vendor/github.com/vishvananda/netlink/gtp_linux.go
index f5e160ba5c0656f3cd974c9d43b0e418d5445ae8..377dcae5c0355f3c6187f52ea639fbe92248730e 100644
--- a/vendor/github.com/vishvananda/netlink/gtp_linux.go
+++ b/vendor/github.com/vishvananda/netlink/gtp_linux.go
@@ -1,6 +1,7 @@
 package netlink
 
 import (
+	"errors"
 	"fmt"
 	"net"
 	"strings"
@@ -74,6 +75,8 @@ func parsePDP(msgs [][]byte) ([]*PDP, error) {
 	return pdps, nil
 }
 
+// If the returned error is [ErrDumpInterrupted], results may be inconsistent
+// or incomplete.
 func (h *Handle) GTPPDPList() ([]*PDP, error) {
 	f, err := h.GenlFamilyGet(nl.GENL_GTP_NAME)
 	if err != nil {
@@ -85,13 +88,19 @@ func (h *Handle) GTPPDPList() ([]*PDP, error) {
 	}
 	req := h.newNetlinkRequest(int(f.ID), unix.NLM_F_DUMP)
 	req.AddData(msg)
-	msgs, err := req.Execute(unix.NETLINK_GENERIC, 0)
+	msgs, executeErr := req.Execute(unix.NETLINK_GENERIC, 0)
+	if executeErr != nil && !errors.Is(err, ErrDumpInterrupted) {
+		return nil, executeErr
+	}
+	pdps, err := parsePDP(msgs)
 	if err != nil {
 		return nil, err
 	}
-	return parsePDP(msgs)
+	return pdps, executeErr
 }
 
+// If the returned error is [ErrDumpInterrupted], results may be inconsistent
+// or incomplete.
 func GTPPDPList() ([]*PDP, error) {
 	return pkgHandle.GTPPDPList()
 }
diff --git a/vendor/github.com/vishvananda/netlink/link.go b/vendor/github.com/vishvananda/netlink/link.go
index f820cdb678d5477c7c7142317f4f05a63b19addd..cccf5d792a785af4dee5faac59876540b9f84f36 100644
--- a/vendor/github.com/vishvananda/netlink/link.go
+++ b/vendor/github.com/vishvananda/netlink/link.go
@@ -56,6 +56,8 @@ type LinkAttrs struct {
 	Vfs            []VfInfo // virtual functions available on link
 	Group          uint32
 	PermHWAddr     net.HardwareAddr
+	ParentDev      string
+	ParentDevBus   string
 	Slave          LinkSlave
 }
 
@@ -377,6 +379,13 @@ const (
 	NETKIT_POLICY_BLACKHOLE NetkitPolicy = 2
 )
 
+type NetkitScrub int
+
+const (
+	NETKIT_SCRUB_NONE    NetkitScrub = 0
+	NETKIT_SCRUB_DEFAULT NetkitScrub = 1
+)
+
 func (n *Netkit) IsPrimary() bool {
 	return n.isPrimary
 }
@@ -391,6 +400,9 @@ type Netkit struct {
 	Mode          NetkitMode
 	Policy        NetkitPolicy
 	PeerPolicy    NetkitPolicy
+	Scrub         NetkitScrub
+	PeerScrub     NetkitScrub
+	supportsScrub bool
 	isPrimary     bool
 	peerLinkAttrs LinkAttrs
 }
@@ -403,6 +415,10 @@ func (n *Netkit) Type() string {
 	return "netkit"
 }
 
+func (n *Netkit) SupportsScrub() bool {
+	return n.supportsScrub
+}
+
 // Veth devices must specify PeerName on create
 type Veth struct {
 	LinkAttrs
@@ -761,19 +777,19 @@ const (
 )
 
 var bondXmitHashPolicyToString = map[BondXmitHashPolicy]string{
-	BOND_XMIT_HASH_POLICY_LAYER2:   "layer2",
-	BOND_XMIT_HASH_POLICY_LAYER3_4: "layer3+4",
-	BOND_XMIT_HASH_POLICY_LAYER2_3: "layer2+3",
-	BOND_XMIT_HASH_POLICY_ENCAP2_3: "encap2+3",
-	BOND_XMIT_HASH_POLICY_ENCAP3_4: "encap3+4",
+	BOND_XMIT_HASH_POLICY_LAYER2:      "layer2",
+	BOND_XMIT_HASH_POLICY_LAYER3_4:    "layer3+4",
+	BOND_XMIT_HASH_POLICY_LAYER2_3:    "layer2+3",
+	BOND_XMIT_HASH_POLICY_ENCAP2_3:    "encap2+3",
+	BOND_XMIT_HASH_POLICY_ENCAP3_4:    "encap3+4",
 	BOND_XMIT_HASH_POLICY_VLAN_SRCMAC: "vlan+srcmac",
 }
 var StringToBondXmitHashPolicyMap = map[string]BondXmitHashPolicy{
-	"layer2":   BOND_XMIT_HASH_POLICY_LAYER2,
-	"layer3+4": BOND_XMIT_HASH_POLICY_LAYER3_4,
-	"layer2+3": BOND_XMIT_HASH_POLICY_LAYER2_3,
-	"encap2+3": BOND_XMIT_HASH_POLICY_ENCAP2_3,
-	"encap3+4": BOND_XMIT_HASH_POLICY_ENCAP3_4,
+	"layer2":      BOND_XMIT_HASH_POLICY_LAYER2,
+	"layer3+4":    BOND_XMIT_HASH_POLICY_LAYER3_4,
+	"layer2+3":    BOND_XMIT_HASH_POLICY_LAYER2_3,
+	"encap2+3":    BOND_XMIT_HASH_POLICY_ENCAP2_3,
+	"encap3+4":    BOND_XMIT_HASH_POLICY_ENCAP3_4,
 	"vlan+srcmac": BOND_XMIT_HASH_POLICY_VLAN_SRCMAC,
 }
 
diff --git a/vendor/github.com/vishvananda/netlink/link_linux.go b/vendor/github.com/vishvananda/netlink/link_linux.go
index d713612a90797198ac9f9cbf25824befd9974cef..d6bffded3102e99e1bc8854f57af20fbe5f1d86f 100644
--- a/vendor/github.com/vishvananda/netlink/link_linux.go
+++ b/vendor/github.com/vishvananda/netlink/link_linux.go
@@ -3,6 +3,7 @@ package netlink
 import (
 	"bytes"
 	"encoding/binary"
+	"errors"
 	"fmt"
 	"io/ioutil"
 	"net"
@@ -1807,20 +1808,20 @@ func (h *Handle) LinkDel(link Link) error {
 }
 
 func (h *Handle) linkByNameDump(name string) (Link, error) {
-	links, err := h.LinkList()
-	if err != nil {
-		return nil, err
+	links, executeErr := h.LinkList()
+	if executeErr != nil && !errors.Is(executeErr, ErrDumpInterrupted) {
+		return nil, executeErr
 	}
 
 	for _, link := range links {
 		if link.Attrs().Name == name {
-			return link, nil
+			return link, executeErr
 		}
 
 		// support finding interfaces also via altnames
 		for _, altName := range link.Attrs().AltNames {
 			if altName == name {
-				return link, nil
+				return link, executeErr
 			}
 		}
 	}
@@ -1828,25 +1829,33 @@ func (h *Handle) linkByNameDump(name string) (Link, error) {
 }
 
 func (h *Handle) linkByAliasDump(alias string) (Link, error) {
-	links, err := h.LinkList()
-	if err != nil {
-		return nil, err
+	links, executeErr := h.LinkList()
+	if executeErr != nil && !errors.Is(executeErr, ErrDumpInterrupted) {
+		return nil, executeErr
 	}
 
 	for _, link := range links {
 		if link.Attrs().Alias == alias {
-			return link, nil
+			return link, executeErr
 		}
 	}
 	return nil, LinkNotFoundError{fmt.Errorf("Link alias %s not found", alias)}
 }
 
 // LinkByName finds a link by name and returns a pointer to the object.
+//
+// If the kernel doesn't support IFLA_IFNAME, this method will fall back to
+// filtering a dump of all link names. In this case, if the returned error is
+// [ErrDumpInterrupted] the result may be missing or outdated.
 func LinkByName(name string) (Link, error) {
 	return pkgHandle.LinkByName(name)
 }
 
 // LinkByName finds a link by name and returns a pointer to the object.
+//
+// If the kernel doesn't support IFLA_IFNAME, this method will fall back to
+// filtering a dump of all link names. In this case, if the returned error is
+// [ErrDumpInterrupted] the result may be missing or outdated.
 func (h *Handle) LinkByName(name string) (Link, error) {
 	if h.lookupByDump {
 		return h.linkByNameDump(name)
@@ -1879,12 +1888,20 @@ func (h *Handle) LinkByName(name string) (Link, error) {
 
 // LinkByAlias finds a link by its alias and returns a pointer to the object.
 // If there are multiple links with the alias it returns the first one
+//
+// If the kernel doesn't support IFLA_IFALIAS, this method will fall back to
+// filtering a dump of all link names. In this case, if the returned error is
+// [ErrDumpInterrupted] the result may be missing or outdated.
 func LinkByAlias(alias string) (Link, error) {
 	return pkgHandle.LinkByAlias(alias)
 }
 
 // LinkByAlias finds a link by its alias and returns a pointer to the object.
 // If there are multiple links with the alias it returns the first one
+//
+// If the kernel doesn't support IFLA_IFALIAS, this method will fall back to
+// filtering a dump of all link names. In this case, if the returned error is
+// [ErrDumpInterrupted] the result may be missing or outdated.
 func (h *Handle) LinkByAlias(alias string) (Link, error) {
 	if h.lookupByDump {
 		return h.linkByAliasDump(alias)
@@ -2246,6 +2263,10 @@ func LinkDeserialize(hdr *unix.NlMsghdr, m []byte) (Link, error) {
 					break
 				}
 			}
+		case unix.IFLA_PARENT_DEV_NAME:
+			base.ParentDev = string(attr.Value[:len(attr.Value)-1])
+		case unix.IFLA_PARENT_DEV_BUS_NAME:
+			base.ParentDevBus = string(attr.Value[:len(attr.Value)-1])
 		}
 	}
 
@@ -2321,6 +2342,9 @@ func LinkList() ([]Link, error) {
 
 // LinkList gets a list of link devices.
 // Equivalent to: `ip link show`
+//
+// If the returned error is [ErrDumpInterrupted], results may be inconsistent
+// or incomplete.
 func (h *Handle) LinkList() ([]Link, error) {
 	// NOTE(vish): This duplicates functionality in net/iface_linux.go, but we need
 	//             to get the message ourselves to parse link type.
@@ -2331,9 +2355,9 @@ func (h *Handle) LinkList() ([]Link, error) {
 	attr := nl.NewRtAttr(unix.IFLA_EXT_MASK, nl.Uint32Attr(nl.RTEXT_FILTER_VF))
 	req.AddData(attr)
 
-	msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWLINK)
-	if err != nil {
-		return nil, err
+	msgs, executeErr := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWLINK)
+	if executeErr != nil && !errors.Is(executeErr, ErrDumpInterrupted) {
+		return nil, executeErr
 	}
 
 	var res []Link
@@ -2345,7 +2369,7 @@ func (h *Handle) LinkList() ([]Link, error) {
 		res = append(res, link)
 	}
 
-	return res, nil
+	return res, executeErr
 }
 
 // LinkUpdate is used to pass information back from LinkSubscribe()
@@ -2381,6 +2405,10 @@ type LinkSubscribeOptions struct {
 // LinkSubscribeWithOptions work like LinkSubscribe but enable to
 // provide additional options to modify the behavior. Currently, the
 // namespace can be provided as well as an error callback.
+//
+// When options.ListExisting is true, options.ErrorCallback may be
+// called with [ErrDumpInterrupted] to indicate that results from
+// the initial dump of links may be inconsistent or incomplete.
 func LinkSubscribeWithOptions(ch chan<- LinkUpdate, done <-chan struct{}, options LinkSubscribeOptions) error {
 	if options.Namespace == nil {
 		none := netns.None()
@@ -2440,6 +2468,9 @@ func linkSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- LinkUpdate, done <-c
 				continue
 			}
 			for _, m := range msgs {
+				if m.Header.Flags&unix.NLM_F_DUMP_INTR != 0 && cberr != nil {
+					cberr(ErrDumpInterrupted)
+				}
 				if m.Header.Type == unix.NLMSG_DONE {
 					continue
 				}
@@ -2649,6 +2680,8 @@ func addNetkitAttrs(nk *Netkit, linkInfo *nl.RtAttr, flag int) error {
 	data.AddRtAttr(nl.IFLA_NETKIT_MODE, nl.Uint32Attr(uint32(nk.Mode)))
 	data.AddRtAttr(nl.IFLA_NETKIT_POLICY, nl.Uint32Attr(uint32(nk.Policy)))
 	data.AddRtAttr(nl.IFLA_NETKIT_PEER_POLICY, nl.Uint32Attr(uint32(nk.PeerPolicy)))
+	data.AddRtAttr(nl.IFLA_NETKIT_SCRUB, nl.Uint32Attr(uint32(nk.Scrub)))
+	data.AddRtAttr(nl.IFLA_NETKIT_PEER_SCRUB, nl.Uint32Attr(uint32(nk.PeerScrub)))
 
 	if (flag & unix.NLM_F_EXCL) == 0 {
 		// Modifying peer link attributes will not take effect
@@ -2709,6 +2742,12 @@ func parseNetkitData(link Link, data []syscall.NetlinkRouteAttr) {
 			netkit.Policy = NetkitPolicy(native.Uint32(datum.Value[0:4]))
 		case nl.IFLA_NETKIT_PEER_POLICY:
 			netkit.PeerPolicy = NetkitPolicy(native.Uint32(datum.Value[0:4]))
+		case nl.IFLA_NETKIT_SCRUB:
+			netkit.supportsScrub = true
+			netkit.Scrub = NetkitScrub(native.Uint32(datum.Value[0:4]))
+		case nl.IFLA_NETKIT_PEER_SCRUB:
+			netkit.supportsScrub = true
+			netkit.PeerScrub = NetkitScrub(native.Uint32(datum.Value[0:4]))
 		}
 	}
 }
@@ -3006,7 +3045,6 @@ func parseMacvlanData(link Link, data []syscall.NetlinkRouteAttr) {
 	}
 }
 
-// copied from pkg/net_linux.go
 func linkFlags(rawFlags uint32) net.Flags {
 	var f net.Flags
 	if rawFlags&unix.IFF_UP != 0 {
@@ -3024,6 +3062,9 @@ func linkFlags(rawFlags uint32) net.Flags {
 	if rawFlags&unix.IFF_MULTICAST != 0 {
 		f |= net.FlagMulticast
 	}
+	if rawFlags&unix.IFF_RUNNING != 0 {
+		f |= net.FlagRunning
+	}
 	return f
 }
 
diff --git a/vendor/github.com/vishvananda/netlink/neigh_linux.go b/vendor/github.com/vishvananda/netlink/neigh_linux.go
index 2d93044a6eaeada548513343e5bea73ba340ebb6..1c6f2958aea73cadf0067807436bd133c3755f7a 100644
--- a/vendor/github.com/vishvananda/netlink/neigh_linux.go
+++ b/vendor/github.com/vishvananda/netlink/neigh_linux.go
@@ -1,6 +1,7 @@
 package netlink
 
 import (
+	"errors"
 	"fmt"
 	"net"
 	"syscall"
@@ -206,6 +207,9 @@ func neighHandle(neigh *Neigh, req *nl.NetlinkRequest) error {
 // NeighList returns a list of IP-MAC mappings in the system (ARP table).
 // Equivalent to: `ip neighbor show`.
 // The list can be filtered by link and ip family.
+//
+// If the returned error is [ErrDumpInterrupted], results may be inconsistent
+// or incomplete.
 func NeighList(linkIndex, family int) ([]Neigh, error) {
 	return pkgHandle.NeighList(linkIndex, family)
 }
@@ -213,6 +217,9 @@ func NeighList(linkIndex, family int) ([]Neigh, error) {
 // NeighProxyList returns a list of neighbor proxies in the system.
 // Equivalent to: `ip neighbor show proxy`.
 // The list can be filtered by link and ip family.
+//
+// If the returned error is [ErrDumpInterrupted], results may be inconsistent
+// or incomplete.
 func NeighProxyList(linkIndex, family int) ([]Neigh, error) {
 	return pkgHandle.NeighProxyList(linkIndex, family)
 }
@@ -220,6 +227,9 @@ func NeighProxyList(linkIndex, family int) ([]Neigh, error) {
 // NeighList returns a list of IP-MAC mappings in the system (ARP table).
 // Equivalent to: `ip neighbor show`.
 // The list can be filtered by link and ip family.
+//
+// If the returned error is [ErrDumpInterrupted], results may be inconsistent
+// or incomplete.
 func (h *Handle) NeighList(linkIndex, family int) ([]Neigh, error) {
 	return h.NeighListExecute(Ndmsg{
 		Family: uint8(family),
@@ -230,6 +240,9 @@ func (h *Handle) NeighList(linkIndex, family int) ([]Neigh, error) {
 // NeighProxyList returns a list of neighbor proxies in the system.
 // Equivalent to: `ip neighbor show proxy`.
 // The list can be filtered by link, ip family.
+//
+// If the returned error is [ErrDumpInterrupted], results may be inconsistent
+// or incomplete.
 func (h *Handle) NeighProxyList(linkIndex, family int) ([]Neigh, error) {
 	return h.NeighListExecute(Ndmsg{
 		Family: uint8(family),
@@ -239,18 +252,24 @@ func (h *Handle) NeighProxyList(linkIndex, family int) ([]Neigh, error) {
 }
 
 // NeighListExecute returns a list of neighbour entries filtered by link, ip family, flag and state.
+//
+// If the returned error is [ErrDumpInterrupted], results may be inconsistent
+// or incomplete.
 func NeighListExecute(msg Ndmsg) ([]Neigh, error) {
 	return pkgHandle.NeighListExecute(msg)
 }
 
 // NeighListExecute returns a list of neighbour entries filtered by link, ip family, flag and state.
+//
+// If the returned error is [ErrDumpInterrupted], results may be inconsistent
+// or incomplete.
 func (h *Handle) NeighListExecute(msg Ndmsg) ([]Neigh, error) {
 	req := h.newNetlinkRequest(unix.RTM_GETNEIGH, unix.NLM_F_DUMP)
 	req.AddData(&msg)
 
-	msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWNEIGH)
-	if err != nil {
-		return nil, err
+	msgs, executeErr := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWNEIGH)
+	if executeErr != nil && !errors.Is(executeErr, ErrDumpInterrupted) {
+		return nil, executeErr
 	}
 
 	var res []Neigh
@@ -281,7 +300,7 @@ func (h *Handle) NeighListExecute(msg Ndmsg) ([]Neigh, error) {
 		res = append(res, *neigh)
 	}
 
-	return res, nil
+	return res, executeErr
 }
 
 func NeighDeserialize(m []byte) (*Neigh, error) {
@@ -364,6 +383,10 @@ type NeighSubscribeOptions struct {
 // NeighSubscribeWithOptions work like NeighSubscribe but enable to
 // provide additional options to modify the behavior. Currently, the
 // namespace can be provided as well as an error callback.
+//
+// When options.ListExisting is true, options.ErrorCallback may be
+// called with [ErrDumpInterrupted] to indicate that results from
+// the initial dump of links may be inconsistent or incomplete.
 func NeighSubscribeWithOptions(ch chan<- NeighUpdate, done <-chan struct{}, options NeighSubscribeOptions) error {
 	if options.Namespace == nil {
 		none := netns.None()
@@ -428,6 +451,9 @@ func neighSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- NeighUpdate, done <
 				continue
 			}
 			for _, m := range msgs {
+				if m.Header.Flags&unix.NLM_F_DUMP_INTR != 0 && cberr != nil {
+					cberr(ErrDumpInterrupted)
+				}
 				if m.Header.Type == unix.NLMSG_DONE {
 					if listExisting {
 						// This will be called after handling AF_UNSPEC
diff --git a/vendor/github.com/vishvananda/netlink/netlink_linux.go b/vendor/github.com/vishvananda/netlink/netlink_linux.go
index a20d293d870fc8836c3d88dc7f6524e6c1068670..7416e305104001814924a1270d930531292c52c7 100644
--- a/vendor/github.com/vishvananda/netlink/netlink_linux.go
+++ b/vendor/github.com/vishvananda/netlink/netlink_linux.go
@@ -9,3 +9,6 @@ const (
 	FAMILY_V6   = nl.FAMILY_V6
 	FAMILY_MPLS = nl.FAMILY_MPLS
 )
+
+// ErrDumpInterrupted is an alias for [nl.ErrDumpInterrupted].
+var ErrDumpInterrupted = nl.ErrDumpInterrupted
diff --git a/vendor/github.com/vishvananda/netlink/nl/link_linux.go b/vendor/github.com/vishvananda/netlink/nl/link_linux.go
index 0b5be470cb061c535114ab5c16d693fe795cf8f4..6dfa16cc28dd16779c6bfedbaebc02366ded8a9f 100644
--- a/vendor/github.com/vishvananda/netlink/nl/link_linux.go
+++ b/vendor/github.com/vishvananda/netlink/nl/link_linux.go
@@ -38,6 +38,8 @@ const (
 	IFLA_NETKIT_POLICY
 	IFLA_NETKIT_PEER_POLICY
 	IFLA_NETKIT_MODE
+	IFLA_NETKIT_SCRUB
+	IFLA_NETKIT_PEER_SCRUB
 	IFLA_NETKIT_MAX = IFLA_NETKIT_MODE
 )
 
diff --git a/vendor/github.com/vishvananda/netlink/nl/nl_linux.go b/vendor/github.com/vishvananda/netlink/nl/nl_linux.go
index 6cecc4517a5dc30337f0e02eaece157c3b46e7b8..4d2732a9e883b8987b1f6e94fcc32cc155e9e407 100644
--- a/vendor/github.com/vishvananda/netlink/nl/nl_linux.go
+++ b/vendor/github.com/vishvananda/netlink/nl/nl_linux.go
@@ -4,6 +4,7 @@ package nl
 import (
 	"bytes"
 	"encoding/binary"
+	"errors"
 	"fmt"
 	"net"
 	"os"
@@ -11,6 +12,7 @@ import (
 	"sync"
 	"sync/atomic"
 	"syscall"
+	"time"
 	"unsafe"
 
 	"github.com/vishvananda/netns"
@@ -43,6 +45,26 @@ var SocketTimeoutTv = unix.Timeval{Sec: 60, Usec: 0}
 // ErrorMessageReporting is the default error message reporting configuration for the new netlink sockets
 var EnableErrorMessageReporting bool = false
 
+// ErrDumpInterrupted is an instance of errDumpInterrupted, used to report that
+// a netlink function has set the NLM_F_DUMP_INTR flag in a response message,
+// indicating that the results may be incomplete or inconsistent.
+var ErrDumpInterrupted = errDumpInterrupted{}
+
+// errDumpInterrupted is an error type, used to report that NLM_F_DUMP_INTR was
+// set in a netlink response.
+type errDumpInterrupted struct{}
+
+func (errDumpInterrupted) Error() string {
+	return "results may be incomplete or inconsistent"
+}
+
+// Before errDumpInterrupted was introduced, EINTR was returned when a netlink
+// response had NLM_F_DUMP_INTR. Retain backward compatibility with code that
+// may be checking for EINTR using Is.
+func (e errDumpInterrupted) Is(target error) bool {
+	return target == unix.EINTR
+}
+
 // GetIPFamily returns the family type of a net.IP.
 func GetIPFamily(ip net.IP) int {
 	if len(ip) <= net.IPv4len {
@@ -492,22 +514,26 @@ func (req *NetlinkRequest) AddRawData(data []byte) {
 // Execute the request against the given sockType.
 // Returns a list of netlink messages in serialized format, optionally filtered
 // by resType.
+// If the returned error is [ErrDumpInterrupted], results may be inconsistent
+// or incomplete.
 func (req *NetlinkRequest) Execute(sockType int, resType uint16) ([][]byte, error) {
 	var res [][]byte
 	err := req.ExecuteIter(sockType, resType, func(msg []byte) bool {
 		res = append(res, msg)
 		return true
 	})
-	if err != nil {
+	if err != nil && !errors.Is(err, ErrDumpInterrupted) {
 		return nil, err
 	}
-	return res, nil
+	return res, err
 }
 
 // ExecuteIter executes the request against the given sockType.
 // Calls the provided callback func once for each netlink message.
 // If the callback returns false, it is not called again, but
 // the remaining messages are consumed/discarded.
+// If the returned error is [ErrDumpInterrupted], results may be inconsistent
+// or incomplete.
 //
 // Thread safety: ExecuteIter holds a lock on the socket until
 // it finishes iteration so the callback must not call back into
@@ -559,6 +585,8 @@ func (req *NetlinkRequest) ExecuteIter(sockType int, resType uint16, f func(msg
 		return err
 	}
 
+	dumpIntr := false
+
 done:
 	for {
 		msgs, from, err := s.Receive()
@@ -580,7 +608,7 @@ done:
 			}
 
 			if m.Header.Flags&unix.NLM_F_DUMP_INTR != 0 {
-				return syscall.Errno(unix.EINTR)
+				dumpIntr = true
 			}
 
 			if m.Header.Type == unix.NLMSG_DONE || m.Header.Type == unix.NLMSG_ERROR {
@@ -634,6 +662,9 @@ done:
 			}
 		}
 	}
+	if dumpIntr {
+		return ErrDumpInterrupted
+	}
 	return nil
 }
 
@@ -656,9 +687,11 @@ func NewNetlinkRequest(proto, flags int) *NetlinkRequest {
 }
 
 type NetlinkSocket struct {
-	fd   int32
-	file *os.File
-	lsa  unix.SockaddrNetlink
+	fd             int32
+	file           *os.File
+	lsa            unix.SockaddrNetlink
+	sendTimeout    int64 // Access using atomic.Load/StoreInt64
+	receiveTimeout int64 // Access using atomic.Load/StoreInt64
 	sync.Mutex
 }
 
@@ -802,8 +835,44 @@ func (s *NetlinkSocket) GetFd() int {
 	return int(s.fd)
 }
 
+func (s *NetlinkSocket) GetTimeouts() (send, receive time.Duration) {
+	return time.Duration(atomic.LoadInt64(&s.sendTimeout)),
+		time.Duration(atomic.LoadInt64(&s.receiveTimeout))
+}
+
 func (s *NetlinkSocket) Send(request *NetlinkRequest) error {
-	return unix.Sendto(int(s.fd), request.Serialize(), 0, &s.lsa)
+	rawConn, err := s.file.SyscallConn()
+	if err != nil {
+		return err
+	}
+	var (
+		deadline time.Time
+		innerErr error
+	)
+	sendTimeout := atomic.LoadInt64(&s.sendTimeout)
+	if sendTimeout != 0 {
+		deadline = time.Now().Add(time.Duration(sendTimeout))
+	}
+	if err := s.file.SetWriteDeadline(deadline); err != nil {
+		return err
+	}
+	serializedReq := request.Serialize()
+	err = rawConn.Write(func(fd uintptr) (done bool) {
+		innerErr = unix.Sendto(int(s.fd), serializedReq, 0, &s.lsa)
+		return innerErr != unix.EWOULDBLOCK
+	})
+	if innerErr != nil {
+		return innerErr
+	}
+	if err != nil {
+		// The timeout was previously implemented using SO_SNDTIMEO on a blocking
+		// socket. So, continue to return EAGAIN when the timeout is reached.
+		if errors.Is(err, os.ErrDeadlineExceeded) {
+			return unix.EAGAIN
+		}
+		return err
+	}
+	return nil
 }
 
 func (s *NetlinkSocket) Receive() ([]syscall.NetlinkMessage, *unix.SockaddrNetlink, error) {
@@ -812,20 +881,33 @@ func (s *NetlinkSocket) Receive() ([]syscall.NetlinkMessage, *unix.SockaddrNetli
 		return nil, nil, err
 	}
 	var (
+		deadline time.Time
 		fromAddr *unix.SockaddrNetlink
 		rb       [RECEIVE_BUFFER_SIZE]byte
 		nr       int
 		from     unix.Sockaddr
 		innerErr error
 	)
+	receiveTimeout := atomic.LoadInt64(&s.receiveTimeout)
+	if receiveTimeout != 0 {
+		deadline = time.Now().Add(time.Duration(receiveTimeout))
+	}
+	if err := s.file.SetReadDeadline(deadline); err != nil {
+		return nil, nil, err
+	}
 	err = rawConn.Read(func(fd uintptr) (done bool) {
 		nr, from, innerErr = unix.Recvfrom(int(fd), rb[:], 0)
 		return innerErr != unix.EWOULDBLOCK
 	})
 	if innerErr != nil {
-		err = innerErr
+		return nil, nil, innerErr
 	}
 	if err != nil {
+		// The timeout was previously implemented using SO_RCVTIMEO on a blocking
+		// socket. So, continue to return EAGAIN when the timeout is reached.
+		if errors.Is(err, os.ErrDeadlineExceeded) {
+			return nil, nil, unix.EAGAIN
+		}
 		return nil, nil, err
 	}
 	fromAddr, ok := from.(*unix.SockaddrNetlink)
@@ -847,16 +929,14 @@ func (s *NetlinkSocket) Receive() ([]syscall.NetlinkMessage, *unix.SockaddrNetli
 
 // SetSendTimeout allows to set a send timeout on the socket
 func (s *NetlinkSocket) SetSendTimeout(timeout *unix.Timeval) error {
-	// Set a send timeout of SOCKET_SEND_TIMEOUT, this will allow the Send to periodically unblock and avoid that a routine
-	// remains stuck on a send on a closed fd
-	return unix.SetsockoptTimeval(int(s.fd), unix.SOL_SOCKET, unix.SO_SNDTIMEO, timeout)
+	atomic.StoreInt64(&s.sendTimeout, timeout.Nano())
+	return nil
 }
 
 // SetReceiveTimeout allows to set a receive timeout on the socket
 func (s *NetlinkSocket) SetReceiveTimeout(timeout *unix.Timeval) error {
-	// Set a read timeout of SOCKET_READ_TIMEOUT, this will allow the Read to periodically unblock and avoid that a routine
-	// remains stuck on a recvmsg on a closed fd
-	return unix.SetsockoptTimeval(int(s.fd), unix.SOL_SOCKET, unix.SO_RCVTIMEO, timeout)
+	atomic.StoreInt64(&s.receiveTimeout, timeout.Nano())
+	return nil
 }
 
 // SetReceiveBufferSize allows to set a receive buffer size on the socket
diff --git a/vendor/github.com/vishvananda/netlink/nl/tc_linux.go b/vendor/github.com/vishvananda/netlink/nl/tc_linux.go
index 0720729a9003025dd1630c5a30b5b05ecee7de48..b8f500792bf65c6fb0d42416938c3e1e4b894469 100644
--- a/vendor/github.com/vishvananda/netlink/nl/tc_linux.go
+++ b/vendor/github.com/vishvananda/netlink/nl/tc_linux.go
@@ -115,6 +115,7 @@ const (
 	SizeofTcConnmark     = SizeofTcGen + 0x04
 	SizeofTcCsum         = SizeofTcGen + 0x04
 	SizeofTcMirred       = SizeofTcGen + 0x08
+	SizeofTcVlan         = SizeofTcGen + 0x04
 	SizeofTcTunnelKey    = SizeofTcGen + 0x04
 	SizeofTcSkbEdit      = SizeofTcGen
 	SizeofTcPolice       = 2*SizeofTcRateSpec + 0x20
@@ -816,6 +817,41 @@ func (x *TcMirred) Serialize() []byte {
 	return (*(*[SizeofTcMirred]byte)(unsafe.Pointer(x)))[:]
 }
 
+const (
+	TCA_VLAN_UNSPEC = iota
+	TCA_VLAN_TM
+	TCA_VLAN_PARMS
+	TCA_VLAN_PUSH_VLAN_ID
+	TCA_VLAN_PUSH_VLAN_PROTOCOL
+	TCA_VLAN_PAD
+	TCA_VLAN_PUSH_VLAN_PRIORITY
+	TCA_VLAN_PUSH_ETH_DST
+	TCA_VLAN_PUSH_ETH_SRC
+	TCA_VLAN_MAX
+)
+
+//struct tc_vlan {
+//	tc_gen;
+//	int v_action;
+//};
+
+type TcVlan struct {
+	TcGen
+	Action int32
+}
+
+func (msg *TcVlan) Len() int {
+	return SizeofTcVlan
+}
+
+func DeserializeTcVlan(b []byte) *TcVlan {
+	return (*TcVlan)(unsafe.Pointer(&b[0:SizeofTcVlan][0]))
+}
+
+func (x *TcVlan) Serialize() []byte {
+	return (*(*[SizeofTcVlan]byte)(unsafe.Pointer(x)))[:]
+}
+
 const (
 	TCA_TUNNEL_KEY_UNSPEC = iota
 	TCA_TUNNEL_KEY_TM
@@ -1239,8 +1275,8 @@ const (
 )
 
 // /* TCA_PEDIT_KEY_EX_HDR_TYPE_NETWROK is a special case for legacy users. It
-//  * means no specific header type - offset is relative to the network layer
-//  */
+//   - means no specific header type - offset is relative to the network layer
+//     */
 type PeditHeaderType uint16
 
 const (
diff --git a/vendor/github.com/vishvananda/netlink/protinfo_linux.go b/vendor/github.com/vishvananda/netlink/protinfo_linux.go
index 1ba25d3cd473febeed91b27ba16c45c92d6d5fe2..aa51e3b47037a1d8ae788a39bca2c8183feb69ca 100644
--- a/vendor/github.com/vishvananda/netlink/protinfo_linux.go
+++ b/vendor/github.com/vishvananda/netlink/protinfo_linux.go
@@ -1,6 +1,7 @@
 package netlink
 
 import (
+	"errors"
 	"fmt"
 	"syscall"
 
@@ -8,10 +9,14 @@ import (
 	"golang.org/x/sys/unix"
 )
 
+// If the returned error is [ErrDumpInterrupted], results may be inconsistent
+// or incomplete.
 func LinkGetProtinfo(link Link) (Protinfo, error) {
 	return pkgHandle.LinkGetProtinfo(link)
 }
 
+// If the returned error is [ErrDumpInterrupted], results may be inconsistent
+// or incomplete.
 func (h *Handle) LinkGetProtinfo(link Link) (Protinfo, error) {
 	base := link.Attrs()
 	h.ensureIndex(base)
@@ -19,9 +24,9 @@ func (h *Handle) LinkGetProtinfo(link Link) (Protinfo, error) {
 	req := h.newNetlinkRequest(unix.RTM_GETLINK, unix.NLM_F_DUMP)
 	msg := nl.NewIfInfomsg(unix.AF_BRIDGE)
 	req.AddData(msg)
-	msgs, err := req.Execute(unix.NETLINK_ROUTE, 0)
-	if err != nil {
-		return pi, err
+	msgs, executeErr := req.Execute(unix.NETLINK_ROUTE, 0)
+	if executeErr != nil && !errors.Is(executeErr, ErrDumpInterrupted) {
+		return pi, executeErr
 	}
 
 	for _, m := range msgs {
@@ -43,7 +48,7 @@ func (h *Handle) LinkGetProtinfo(link Link) (Protinfo, error) {
 			}
 			pi = parseProtinfo(infos)
 
-			return pi, nil
+			return pi, executeErr
 		}
 	}
 	return pi, fmt.Errorf("Device with index %d not found", base.Index)
diff --git a/vendor/github.com/vishvananda/netlink/qdisc_linux.go b/vendor/github.com/vishvananda/netlink/qdisc_linux.go
index e732ae3bd642607558e9bfa6ed3ec4c268477c77..22cf0e58257ade1a17d2ab88386265bd5b4fb31a 100644
--- a/vendor/github.com/vishvananda/netlink/qdisc_linux.go
+++ b/vendor/github.com/vishvananda/netlink/qdisc_linux.go
@@ -1,6 +1,7 @@
 package netlink
 
 import (
+	"errors"
 	"fmt"
 	"io/ioutil"
 	"strconv"
@@ -338,6 +339,9 @@ func qdiscPayload(req *nl.NetlinkRequest, qdisc Qdisc) error {
 // QdiscList gets a list of qdiscs in the system.
 // Equivalent to: `tc qdisc show`.
 // The list can be filtered by link.
+//
+// If the returned error is [ErrDumpInterrupted], results may be inconsistent
+// or incomplete.
 func QdiscList(link Link) ([]Qdisc, error) {
 	return pkgHandle.QdiscList(link)
 }
@@ -345,6 +349,9 @@ func QdiscList(link Link) ([]Qdisc, error) {
 // QdiscList gets a list of qdiscs in the system.
 // Equivalent to: `tc qdisc show`.
 // The list can be filtered by link.
+//
+// If the returned error is [ErrDumpInterrupted], results may be inconsistent
+// or incomplete.
 func (h *Handle) QdiscList(link Link) ([]Qdisc, error) {
 	req := h.newNetlinkRequest(unix.RTM_GETQDISC, unix.NLM_F_DUMP)
 	index := int32(0)
@@ -359,9 +366,9 @@ func (h *Handle) QdiscList(link Link) ([]Qdisc, error) {
 	}
 	req.AddData(msg)
 
-	msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWQDISC)
-	if err != nil {
-		return nil, err
+	msgs, executeErr := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWQDISC)
+	if executeErr != nil && !errors.Is(executeErr, ErrDumpInterrupted) {
+		return nil, executeErr
 	}
 
 	var res []Qdisc
@@ -497,7 +504,7 @@ func (h *Handle) QdiscList(link Link) ([]Qdisc, error) {
 		res = append(res, qdisc)
 	}
 
-	return res, nil
+	return res, executeErr
 }
 
 func parsePfifoFastData(qdisc Qdisc, value []byte) error {
diff --git a/vendor/github.com/vishvananda/netlink/rdma_link_linux.go b/vendor/github.com/vishvananda/netlink/rdma_link_linux.go
index 036399db6b0c2bebcdb076d29ee7ef6e25294c46..9bb7507321d437363ff4a42486171aaf31a6faaf 100644
--- a/vendor/github.com/vishvananda/netlink/rdma_link_linux.go
+++ b/vendor/github.com/vishvananda/netlink/rdma_link_linux.go
@@ -3,6 +3,7 @@ package netlink
 import (
 	"bytes"
 	"encoding/binary"
+	"errors"
 	"fmt"
 	"net"
 
@@ -85,19 +86,25 @@ func execRdmaSetLink(req *nl.NetlinkRequest) error {
 
 // RdmaLinkList gets a list of RDMA link devices.
 // Equivalent to: `rdma dev show`
+//
+// If the returned error is [ErrDumpInterrupted], results may be inconsistent
+// or incomplete.
 func RdmaLinkList() ([]*RdmaLink, error) {
 	return pkgHandle.RdmaLinkList()
 }
 
 // RdmaLinkList gets a list of RDMA link devices.
 // Equivalent to: `rdma dev show`
+//
+// If the returned error is [ErrDumpInterrupted], results may be inconsistent
+// or incomplete.
 func (h *Handle) RdmaLinkList() ([]*RdmaLink, error) {
 	proto := getProtoField(nl.RDMA_NL_NLDEV, nl.RDMA_NLDEV_CMD_GET)
 	req := h.newNetlinkRequest(proto, unix.NLM_F_ACK|unix.NLM_F_DUMP)
 
-	msgs, err := req.Execute(unix.NETLINK_RDMA, 0)
-	if err != nil {
-		return nil, err
+	msgs, executeErr := req.Execute(unix.NETLINK_RDMA, 0)
+	if executeErr != nil && !errors.Is(executeErr, ErrDumpInterrupted) {
+		return nil, executeErr
 	}
 
 	var res []*RdmaLink
@@ -109,17 +116,23 @@ func (h *Handle) RdmaLinkList() ([]*RdmaLink, error) {
 		res = append(res, link)
 	}
 
-	return res, nil
+	return res, executeErr
 }
 
 // RdmaLinkByName finds a link by name and returns a pointer to the object if
 // found and nil error, otherwise returns error code.
+//
+// If the returned error is [ErrDumpInterrupted], the result may be missing or
+// outdated and the caller should retry.
 func RdmaLinkByName(name string) (*RdmaLink, error) {
 	return pkgHandle.RdmaLinkByName(name)
 }
 
 // RdmaLinkByName finds a link by name and returns a pointer to the object if
 // found and nil error, otherwise returns error code.
+//
+// If the returned error is [ErrDumpInterrupted], the result may be missing or
+// outdated and the caller should retry.
 func (h *Handle) RdmaLinkByName(name string) (*RdmaLink, error) {
 	links, err := h.RdmaLinkList()
 	if err != nil {
@@ -288,6 +301,8 @@ func RdmaLinkDel(name string) error {
 }
 
 // RdmaLinkDel deletes an rdma link.
+//
+// If the returned error is [ErrDumpInterrupted], the caller should retry.
 func (h *Handle) RdmaLinkDel(name string) error {
 	link, err := h.RdmaLinkByName(name)
 	if err != nil {
@@ -307,6 +322,7 @@ func (h *Handle) RdmaLinkDel(name string) error {
 
 // RdmaLinkAdd adds an rdma link for the specified type to the network device.
 // Similar to: rdma link add NAME type TYPE netdev NETDEV
+//
 //	NAME - specifies the new name of the rdma link to add
 //	TYPE - specifies which rdma type to use.  Link types:
 //		rxe - Soft RoCE driver
diff --git a/vendor/github.com/vishvananda/netlink/route_linux.go b/vendor/github.com/vishvananda/netlink/route_linux.go
index 0cd4f8363a7ff3add58dc01615eb253bf4d6d13d..28a132a2f0cf16a0772917bb5336bab2dade9e0c 100644
--- a/vendor/github.com/vishvananda/netlink/route_linux.go
+++ b/vendor/github.com/vishvananda/netlink/route_linux.go
@@ -3,6 +3,7 @@ package netlink
 import (
 	"bytes"
 	"encoding/binary"
+	"errors"
 	"fmt"
 	"net"
 	"strconv"
@@ -1163,6 +1164,9 @@ func (h *Handle) prepareRouteReq(route *Route, req *nl.NetlinkRequest, msg *nl.R
 // RouteList gets a list of routes in the system.
 // Equivalent to: `ip route show`.
 // The list can be filtered by link and ip family.
+//
+// If the returned error is [ErrDumpInterrupted], results may be inconsistent
+// or incomplete.
 func RouteList(link Link, family int) ([]Route, error) {
 	return pkgHandle.RouteList(link, family)
 }
@@ -1170,6 +1174,9 @@ func RouteList(link Link, family int) ([]Route, error) {
 // RouteList gets a list of routes in the system.
 // Equivalent to: `ip route show`.
 // The list can be filtered by link and ip family.
+//
+// If the returned error is [ErrDumpInterrupted], results may be inconsistent
+// or incomplete.
 func (h *Handle) RouteList(link Link, family int) ([]Route, error) {
 	routeFilter := &Route{}
 	if link != nil {
@@ -1188,6 +1195,9 @@ func RouteListFiltered(family int, filter *Route, filterMask uint64) ([]Route, e
 
 // RouteListFiltered gets a list of routes in the system filtered with specified rules.
 // All rules must be defined in RouteFilter struct
+//
+// If the returned error is [ErrDumpInterrupted], results may be inconsistent
+// or incomplete.
 func (h *Handle) RouteListFiltered(family int, filter *Route, filterMask uint64) ([]Route, error) {
 	var res []Route
 	err := h.RouteListFilteredIter(family, filter, filterMask, func(route Route) (cont bool) {
@@ -1202,17 +1212,22 @@ func (h *Handle) RouteListFiltered(family int, filter *Route, filterMask uint64)
 
 // RouteListFilteredIter passes each route that matches the filter to the given iterator func.  Iteration continues
 // until all routes are loaded or the func returns false.
+//
+// If the returned error is [ErrDumpInterrupted], results may be inconsistent
+// or incomplete.
 func RouteListFilteredIter(family int, filter *Route, filterMask uint64, f func(Route) (cont bool)) error {
 	return pkgHandle.RouteListFilteredIter(family, filter, filterMask, f)
 }
 
+// If the returned error is [ErrDumpInterrupted], results may be inconsistent
+// or incomplete.
 func (h *Handle) RouteListFilteredIter(family int, filter *Route, filterMask uint64, f func(Route) (cont bool)) error {
 	req := h.newNetlinkRequest(unix.RTM_GETROUTE, unix.NLM_F_DUMP)
 	rtmsg := &nl.RtMsg{}
 	rtmsg.Family = uint8(family)
 
 	var parseErr error
-	err := h.routeHandleIter(filter, req, rtmsg, func(m []byte) bool {
+	executeErr := h.routeHandleIter(filter, req, rtmsg, func(m []byte) bool {
 		msg := nl.DeserializeRtMsg(m)
 		if family != FAMILY_ALL && msg.Family != uint8(family) {
 			// Ignore routes not matching requested family
@@ -1270,13 +1285,13 @@ func (h *Handle) RouteListFilteredIter(family int, filter *Route, filterMask uin
 		}
 		return f(route)
 	})
-	if err != nil {
-		return err
+	if executeErr != nil && !errors.Is(executeErr, ErrDumpInterrupted) {
+		return executeErr
 	}
 	if parseErr != nil {
 		return parseErr
 	}
-	return nil
+	return executeErr
 }
 
 // deserializeRoute decodes a binary netlink message into a Route struct
@@ -1684,6 +1699,10 @@ type RouteSubscribeOptions struct {
 // RouteSubscribeWithOptions work like RouteSubscribe but enable to
 // provide additional options to modify the behavior. Currently, the
 // namespace can be provided as well as an error callback.
+//
+// When options.ListExisting is true, options.ErrorCallback may be
+// called with [ErrDumpInterrupted] to indicate that results from
+// the initial dump of links may be inconsistent or incomplete.
 func RouteSubscribeWithOptions(ch chan<- RouteUpdate, done <-chan struct{}, options RouteSubscribeOptions) error {
 	if options.Namespace == nil {
 		none := netns.None()
@@ -1743,6 +1762,9 @@ func routeSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- RouteUpdate, done <
 				continue
 			}
 			for _, m := range msgs {
+				if m.Header.Flags&unix.NLM_F_DUMP_INTR != 0 && cberr != nil {
+					cberr(ErrDumpInterrupted)
+				}
 				if m.Header.Type == unix.NLMSG_DONE {
 					continue
 				}
diff --git a/vendor/github.com/vishvananda/netlink/rule_linux.go b/vendor/github.com/vishvananda/netlink/rule_linux.go
index ddff99cfad29cad262abeff0e80d3ce2c7b4062d..dba99147b2da68b523da0c9b7ce94b6fb516f82b 100644
--- a/vendor/github.com/vishvananda/netlink/rule_linux.go
+++ b/vendor/github.com/vishvananda/netlink/rule_linux.go
@@ -2,6 +2,7 @@ package netlink
 
 import (
 	"bytes"
+	"errors"
 	"fmt"
 	"net"
 
@@ -183,12 +184,18 @@ func ruleHandle(rule *Rule, req *nl.NetlinkRequest) error {
 
 // RuleList lists rules in the system.
 // Equivalent to: ip rule list
+//
+// If the returned error is [ErrDumpInterrupted], results may be inconsistent
+// or incomplete.
 func RuleList(family int) ([]Rule, error) {
 	return pkgHandle.RuleList(family)
 }
 
 // RuleList lists rules in the system.
 // Equivalent to: ip rule list
+//
+// If the returned error is [ErrDumpInterrupted], results may be inconsistent
+// or incomplete.
 func (h *Handle) RuleList(family int) ([]Rule, error) {
 	return h.RuleListFiltered(family, nil, 0)
 }
@@ -196,20 +203,26 @@ func (h *Handle) RuleList(family int) ([]Rule, error) {
 // RuleListFiltered gets a list of rules in the system filtered by the
 // specified rule template `filter`.
 // Equivalent to: ip rule list
+//
+// If the returned error is [ErrDumpInterrupted], results may be inconsistent
+// or incomplete.
 func RuleListFiltered(family int, filter *Rule, filterMask uint64) ([]Rule, error) {
 	return pkgHandle.RuleListFiltered(family, filter, filterMask)
 }
 
 // RuleListFiltered lists rules in the system.
 // Equivalent to: ip rule list
+//
+// If the returned error is [ErrDumpInterrupted], results may be inconsistent
+// or incomplete.
 func (h *Handle) RuleListFiltered(family int, filter *Rule, filterMask uint64) ([]Rule, error) {
 	req := h.newNetlinkRequest(unix.RTM_GETRULE, unix.NLM_F_DUMP|unix.NLM_F_REQUEST)
 	msg := nl.NewIfInfomsg(family)
 	req.AddData(msg)
 
-	msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWRULE)
-	if err != nil {
-		return nil, err
+	msgs, executeErr := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWRULE)
+	if executeErr != nil && !errors.Is(executeErr, ErrDumpInterrupted) {
+		return nil, executeErr
 	}
 
 	var res = make([]Rule, 0)
@@ -306,7 +319,7 @@ func (h *Handle) RuleListFiltered(family int, filter *Rule, filterMask uint64) (
 		res = append(res, *rule)
 	}
 
-	return res, nil
+	return res, executeErr
 }
 
 func (pr *RulePortRange) toRtAttrData() []byte {
diff --git a/vendor/github.com/vishvananda/netlink/socket_linux.go b/vendor/github.com/vishvananda/netlink/socket_linux.go
index 4eb4aeafbdf779e03d7b797196d136cb99c06ffb..82891bc2e0656fdc41b2314845d555d3fe43088a 100644
--- a/vendor/github.com/vishvananda/netlink/socket_linux.go
+++ b/vendor/github.com/vishvananda/netlink/socket_linux.go
@@ -157,6 +157,9 @@ func (u *UnixSocket) deserialize(b []byte) error {
 }
 
 // SocketGet returns the Socket identified by its local and remote addresses.
+//
+// If the returned error is [ErrDumpInterrupted], the search for a result may
+// be incomplete and the caller should retry.
 func (h *Handle) SocketGet(local, remote net.Addr) (*Socket, error) {
 	var protocol uint8
 	var localIP, remoteIP net.IP
@@ -232,6 +235,9 @@ func (h *Handle) SocketGet(local, remote net.Addr) (*Socket, error) {
 }
 
 // SocketGet returns the Socket identified by its local and remote addresses.
+//
+// If the returned error is [ErrDumpInterrupted], the search for a result may
+// be incomplete and the caller should retry.
 func SocketGet(local, remote net.Addr) (*Socket, error) {
 	return pkgHandle.SocketGet(local, remote)
 }
@@ -283,6 +289,9 @@ func SocketDestroy(local, remote net.Addr) error {
 }
 
 // SocketDiagTCPInfo requests INET_DIAG_INFO for TCP protocol for specified family type and return with extension TCP info.
+//
+// If the returned error is [ErrDumpInterrupted], results may be inconsistent
+// or incomplete.
 func (h *Handle) SocketDiagTCPInfo(family uint8) ([]*InetDiagTCPInfoResp, error) {
 	// Construct the request
 	req := h.newNetlinkRequest(nl.SOCK_DIAG_BY_FAMILY, unix.NLM_F_DUMP)
@@ -295,9 +304,9 @@ func (h *Handle) SocketDiagTCPInfo(family uint8) ([]*InetDiagTCPInfoResp, error)
 
 	// Do the query and parse the result
 	var result []*InetDiagTCPInfoResp
-	var err error
-	err = req.ExecuteIter(unix.NETLINK_INET_DIAG, nl.SOCK_DIAG_BY_FAMILY, func(msg []byte) bool {
+	executeErr := req.ExecuteIter(unix.NETLINK_INET_DIAG, nl.SOCK_DIAG_BY_FAMILY, func(msg []byte) bool {
 		sockInfo := &Socket{}
+		var err error
 		if err = sockInfo.deserialize(msg); err != nil {
 			return false
 		}
@@ -315,18 +324,24 @@ func (h *Handle) SocketDiagTCPInfo(family uint8) ([]*InetDiagTCPInfoResp, error)
 		return true
 	})
 
-	if err != nil {
-		return nil, err
+	if executeErr != nil && !errors.Is(executeErr, ErrDumpInterrupted) {
+		return nil, executeErr
 	}
-	return result, nil
+	return result, executeErr
 }
 
 // SocketDiagTCPInfo requests INET_DIAG_INFO for TCP protocol for specified family type and return with extension TCP info.
+//
+// If the returned error is [ErrDumpInterrupted], results may be inconsistent
+// or incomplete.
 func SocketDiagTCPInfo(family uint8) ([]*InetDiagTCPInfoResp, error) {
 	return pkgHandle.SocketDiagTCPInfo(family)
 }
 
 // SocketDiagTCP requests INET_DIAG_INFO for TCP protocol for specified family type and return related socket.
+//
+// If the returned error is [ErrDumpInterrupted], results may be inconsistent
+// or incomplete.
 func (h *Handle) SocketDiagTCP(family uint8) ([]*Socket, error) {
 	// Construct the request
 	req := h.newNetlinkRequest(nl.SOCK_DIAG_BY_FAMILY, unix.NLM_F_DUMP)
@@ -339,27 +354,32 @@ func (h *Handle) SocketDiagTCP(family uint8) ([]*Socket, error) {
 
 	// Do the query and parse the result
 	var result []*Socket
-	var err error
-	err = req.ExecuteIter(unix.NETLINK_INET_DIAG, nl.SOCK_DIAG_BY_FAMILY, func(msg []byte) bool {
+	executeErr := req.ExecuteIter(unix.NETLINK_INET_DIAG, nl.SOCK_DIAG_BY_FAMILY, func(msg []byte) bool {
 		sockInfo := &Socket{}
-		if err = sockInfo.deserialize(msg); err != nil {
+		if err := sockInfo.deserialize(msg); err != nil {
 			return false
 		}
 		result = append(result, sockInfo)
 		return true
 	})
-	if err != nil {
-		return nil, err
+	if executeErr != nil && !errors.Is(executeErr, ErrDumpInterrupted) {
+		return nil, executeErr
 	}
-	return result, nil
+	return result, executeErr
 }
 
 // SocketDiagTCP requests INET_DIAG_INFO for TCP protocol for specified family type and return related socket.
+//
+// If the returned error is [ErrDumpInterrupted], results may be inconsistent
+// or incomplete.
 func SocketDiagTCP(family uint8) ([]*Socket, error) {
 	return pkgHandle.SocketDiagTCP(family)
 }
 
 // SocketDiagUDPInfo requests INET_DIAG_INFO for UDP protocol for specified family type and return with extension info.
+//
+// If the returned error is [ErrDumpInterrupted], results may be inconsistent
+// or incomplete.
 func (h *Handle) SocketDiagUDPInfo(family uint8) ([]*InetDiagUDPInfoResp, error) {
 	// Construct the request
 	var extensions uint8
@@ -377,14 +397,14 @@ func (h *Handle) SocketDiagUDPInfo(family uint8) ([]*InetDiagUDPInfoResp, error)
 
 	// Do the query and parse the result
 	var result []*InetDiagUDPInfoResp
-	var err error
-	err = req.ExecuteIter(unix.NETLINK_INET_DIAG, nl.SOCK_DIAG_BY_FAMILY, func(msg []byte) bool {
+	executeErr := req.ExecuteIter(unix.NETLINK_INET_DIAG, nl.SOCK_DIAG_BY_FAMILY, func(msg []byte) bool {
 		sockInfo := &Socket{}
-		if err = sockInfo.deserialize(msg); err != nil {
+		if err := sockInfo.deserialize(msg); err != nil {
 			return false
 		}
 
 		var attrs []syscall.NetlinkRouteAttr
+		var err error
 		if attrs, err = nl.ParseRouteAttr(msg[sizeofSocket:]); err != nil {
 			return false
 		}
@@ -397,18 +417,24 @@ func (h *Handle) SocketDiagUDPInfo(family uint8) ([]*InetDiagUDPInfoResp, error)
 		result = append(result, res)
 		return true
 	})
-	if err != nil {
-		return nil, err
+	if executeErr != nil && !errors.Is(executeErr, ErrDumpInterrupted) {
+		return nil, executeErr
 	}
-	return result, nil
+	return result, executeErr
 }
 
 // SocketDiagUDPInfo requests INET_DIAG_INFO for UDP protocol for specified family type and return with extension info.
+//
+// If the returned error is [ErrDumpInterrupted], results may be inconsistent
+// or incomplete.
 func SocketDiagUDPInfo(family uint8) ([]*InetDiagUDPInfoResp, error) {
 	return pkgHandle.SocketDiagUDPInfo(family)
 }
 
 // SocketDiagUDP requests INET_DIAG_INFO for UDP protocol for specified family type and return related socket.
+//
+// If the returned error is [ErrDumpInterrupted], results may be inconsistent
+// or incomplete.
 func (h *Handle) SocketDiagUDP(family uint8) ([]*Socket, error) {
 	// Construct the request
 	req := h.newNetlinkRequest(nl.SOCK_DIAG_BY_FAMILY, unix.NLM_F_DUMP)
@@ -421,27 +447,32 @@ func (h *Handle) SocketDiagUDP(family uint8) ([]*Socket, error) {
 
 	// Do the query and parse the result
 	var result []*Socket
-	var err error
-	err = req.ExecuteIter(unix.NETLINK_INET_DIAG, nl.SOCK_DIAG_BY_FAMILY, func(msg []byte) bool {
+	executeErr := req.ExecuteIter(unix.NETLINK_INET_DIAG, nl.SOCK_DIAG_BY_FAMILY, func(msg []byte) bool {
 		sockInfo := &Socket{}
-		if err = sockInfo.deserialize(msg); err != nil {
+		if err := sockInfo.deserialize(msg); err != nil {
 			return false
 		}
 		result = append(result, sockInfo)
 		return true
 	})
-	if err != nil {
-		return nil, err
+	if executeErr != nil && !errors.Is(executeErr, ErrDumpInterrupted) {
+		return nil, executeErr
 	}
-	return result, nil
+	return result, executeErr
 }
 
 // SocketDiagUDP requests INET_DIAG_INFO for UDP protocol for specified family type and return related socket.
+//
+// If the returned error is [ErrDumpInterrupted], results may be inconsistent
+// or incomplete.
 func SocketDiagUDP(family uint8) ([]*Socket, error) {
 	return pkgHandle.SocketDiagUDP(family)
 }
 
 // UnixSocketDiagInfo requests UNIX_DIAG_INFO for unix sockets and return with extension info.
+//
+// If the returned error is [ErrDumpInterrupted], results may be inconsistent
+// or incomplete.
 func (h *Handle) UnixSocketDiagInfo() ([]*UnixDiagInfoResp, error) {
 	// Construct the request
 	var extensions uint8
@@ -456,10 +487,9 @@ func (h *Handle) UnixSocketDiagInfo() ([]*UnixDiagInfoResp, error) {
 	})
 
 	var result []*UnixDiagInfoResp
-	var err error
-	err = req.ExecuteIter(unix.NETLINK_INET_DIAG, nl.SOCK_DIAG_BY_FAMILY, func(msg []byte) bool {
+	executeErr := req.ExecuteIter(unix.NETLINK_INET_DIAG, nl.SOCK_DIAG_BY_FAMILY, func(msg []byte) bool {
 		sockInfo := &UnixSocket{}
-		if err = sockInfo.deserialize(msg); err != nil {
+		if err := sockInfo.deserialize(msg); err != nil {
 			return false
 		}
 
@@ -469,6 +499,7 @@ func (h *Handle) UnixSocketDiagInfo() ([]*UnixDiagInfoResp, error) {
 		}
 
 		var attrs []syscall.NetlinkRouteAttr
+		var err error
 		if attrs, err = nl.ParseRouteAttr(msg[sizeofSocket:]); err != nil {
 			return false
 		}
@@ -480,18 +511,24 @@ func (h *Handle) UnixSocketDiagInfo() ([]*UnixDiagInfoResp, error) {
 		result = append(result, res)
 		return true
 	})
-	if err != nil {
-		return nil, err
+	if executeErr != nil && !errors.Is(executeErr, ErrDumpInterrupted) {
+		return nil, executeErr
 	}
-	return result, nil
+	return result, executeErr
 }
 
 // UnixSocketDiagInfo requests UNIX_DIAG_INFO for unix sockets and return with extension info.
+//
+// If the returned error is [ErrDumpInterrupted], results may be inconsistent
+// or incomplete.
 func UnixSocketDiagInfo() ([]*UnixDiagInfoResp, error) {
 	return pkgHandle.UnixSocketDiagInfo()
 }
 
 // UnixSocketDiag requests UNIX_DIAG_INFO for unix sockets.
+//
+// If the returned error is [ErrDumpInterrupted], results may be inconsistent
+// or incomplete.
 func (h *Handle) UnixSocketDiag() ([]*UnixSocket, error) {
 	// Construct the request
 	req := h.newNetlinkRequest(nl.SOCK_DIAG_BY_FAMILY, unix.NLM_F_DUMP)
@@ -501,10 +538,9 @@ func (h *Handle) UnixSocketDiag() ([]*UnixSocket, error) {
 	})
 
 	var result []*UnixSocket
-	var err error
-	err = req.ExecuteIter(unix.NETLINK_INET_DIAG, nl.SOCK_DIAG_BY_FAMILY, func(msg []byte) bool {
+	executeErr := req.ExecuteIter(unix.NETLINK_INET_DIAG, nl.SOCK_DIAG_BY_FAMILY, func(msg []byte) bool {
 		sockInfo := &UnixSocket{}
-		if err = sockInfo.deserialize(msg); err != nil {
+		if err := sockInfo.deserialize(msg); err != nil {
 			return false
 		}
 
@@ -514,13 +550,16 @@ func (h *Handle) UnixSocketDiag() ([]*UnixSocket, error) {
 		}
 		return true
 	})
-	if err != nil {
-		return nil, err
+	if executeErr != nil && !errors.Is(executeErr, ErrDumpInterrupted) {
+		return nil, executeErr
 	}
-	return result, nil
+	return result, executeErr
 }
 
 // UnixSocketDiag requests UNIX_DIAG_INFO for unix sockets.
+//
+// If the returned error is [ErrDumpInterrupted], results may be inconsistent
+// or incomplete.
 func UnixSocketDiag() ([]*UnixSocket, error) {
 	return pkgHandle.UnixSocketDiag()
 }
diff --git a/vendor/github.com/vishvananda/netlink/socket_xdp_linux.go b/vendor/github.com/vishvananda/netlink/socket_xdp_linux.go
index 20c82f9c7665c4c28bcd860b0f07e7cf9302ff20..c1dd00a8647ac898bc819eea820c20a4c20b4bb5 100644
--- a/vendor/github.com/vishvananda/netlink/socket_xdp_linux.go
+++ b/vendor/github.com/vishvananda/netlink/socket_xdp_linux.go
@@ -52,8 +52,10 @@ func (s *XDPSocket) deserialize(b []byte) error {
 	return nil
 }
 
-// XDPSocketGet returns the XDP socket identified by its inode number and/or
+// SocketXDPGetInfo returns the XDP socket identified by its inode number and/or
 // socket cookie. Specify the cookie as SOCK_ANY_COOKIE if
+//
+// If the returned error is [ErrDumpInterrupted], the caller should retry.
 func SocketXDPGetInfo(ino uint32, cookie uint64) (*XDPDiagInfoResp, error) {
 	// We have a problem here: dumping AF_XDP sockets currently does not support
 	// filtering. We thus need to dump all XSKs and then only filter afterwards
@@ -85,6 +87,9 @@ func SocketXDPGetInfo(ino uint32, cookie uint64) (*XDPDiagInfoResp, error) {
 }
 
 // SocketDiagXDP requests XDP_DIAG_INFO for XDP family sockets.
+//
+// If the returned error is [ErrDumpInterrupted], results may be inconsistent
+// or incomplete.
 func SocketDiagXDP() ([]*XDPDiagInfoResp, error) {
 	var result []*XDPDiagInfoResp
 	err := socketDiagXDPExecutor(func(m syscall.NetlinkMessage) error {
@@ -105,10 +110,10 @@ func SocketDiagXDP() ([]*XDPDiagInfoResp, error) {
 		result = append(result, res)
 		return nil
 	})
-	if err != nil {
+	if err != nil && !errors.Is(err, ErrDumpInterrupted) {
 		return nil, err
 	}
-	return result, nil
+	return result, err
 }
 
 // socketDiagXDPExecutor requests XDP_DIAG_INFO for XDP family sockets.
@@ -128,6 +133,7 @@ func socketDiagXDPExecutor(receiver func(syscall.NetlinkMessage) error) error {
 		return err
 	}
 
+	dumpIntr := false
 loop:
 	for {
 		msgs, from, err := s.Receive()
@@ -142,6 +148,9 @@ loop:
 		}
 
 		for _, m := range msgs {
+			if m.Header.Flags&unix.NLM_F_DUMP_INTR != 0 {
+				dumpIntr = true
+			}
 			switch m.Header.Type {
 			case unix.NLMSG_DONE:
 				break loop
@@ -154,6 +163,9 @@ loop:
 			}
 		}
 	}
+	if dumpIntr {
+		return ErrDumpInterrupted
+	}
 	return nil
 }
 
diff --git a/vendor/github.com/vishvananda/netlink/vdpa_linux.go b/vendor/github.com/vishvananda/netlink/vdpa_linux.go
index 7c15986d0f9dc595ceb5224fd77b9b17610f9cbd..c14877a295d31de93de4b2af3527a615ee178e26 100644
--- a/vendor/github.com/vishvananda/netlink/vdpa_linux.go
+++ b/vendor/github.com/vishvananda/netlink/vdpa_linux.go
@@ -1,6 +1,7 @@
 package netlink
 
 import (
+	"errors"
 	"fmt"
 	"net"
 	"syscall"
@@ -118,6 +119,9 @@ func VDPADelDev(name string) error {
 
 // VDPAGetDevList returns list of VDPA devices
 // Equivalent to: `vdpa dev show`
+//
+// If the returned error is [ErrDumpInterrupted], results may be inconsistent
+// or incomplete.
 func VDPAGetDevList() ([]*VDPADev, error) {
 	return pkgHandle.VDPAGetDevList()
 }
@@ -130,6 +134,9 @@ func VDPAGetDevByName(name string) (*VDPADev, error) {
 
 // VDPAGetDevConfigList returns list of VDPA devices configurations
 // Equivalent to: `vdpa dev config show`
+//
+// If the returned error is [ErrDumpInterrupted], results may be inconsistent
+// or incomplete.
 func VDPAGetDevConfigList() ([]*VDPADevConfig, error) {
 	return pkgHandle.VDPAGetDevConfigList()
 }
@@ -148,6 +155,9 @@ func VDPAGetDevVStats(name string, queueIndex uint32) (*VDPADevVStats, error) {
 
 // VDPAGetMGMTDevList returns list of mgmt devices
 // Equivalent to: `vdpa mgmtdev show`
+//
+// If the returned error is [ErrDumpInterrupted], results may be inconsistent
+// or incomplete.
 func VDPAGetMGMTDevList() ([]*VDPAMGMTDev, error) {
 	return pkgHandle.VDPAGetMGMTDevList()
 }
@@ -261,9 +271,9 @@ func (h *Handle) vdpaRequest(command uint8, extraFlags int, attrs []*nl.RtAttr)
 		req.AddData(a)
 	}
 
-	resp, err := req.Execute(unix.NETLINK_GENERIC, 0)
-	if err != nil {
-		return nil, err
+	resp, executeErr := req.Execute(unix.NETLINK_GENERIC, 0)
+	if executeErr != nil && !errors.Is(executeErr, ErrDumpInterrupted) {
+		return nil, executeErr
 	}
 	messages := make([]vdpaNetlinkMessage, 0, len(resp))
 	for _, m := range resp {
@@ -273,10 +283,13 @@ func (h *Handle) vdpaRequest(command uint8, extraFlags int, attrs []*nl.RtAttr)
 		}
 		messages = append(messages, attrs)
 	}
-	return messages, nil
+	return messages, executeErr
 }
 
 // dump all devices if dev is nil
+//
+// If dev is nil and the returned error is [ErrDumpInterrupted], results may be inconsistent
+// or incomplete.
 func (h *Handle) vdpaDevGet(dev *string) ([]*VDPADev, error) {
 	var extraFlags int
 	var attrs []*nl.RtAttr
@@ -285,9 +298,9 @@ func (h *Handle) vdpaDevGet(dev *string) ([]*VDPADev, error) {
 	} else {
 		extraFlags = extraFlags | unix.NLM_F_DUMP
 	}
-	messages, err := h.vdpaRequest(nl.VDPA_CMD_DEV_GET, extraFlags, attrs)
-	if err != nil {
-		return nil, err
+	messages, executeErr := h.vdpaRequest(nl.VDPA_CMD_DEV_GET, extraFlags, attrs)
+	if executeErr != nil && !errors.Is(executeErr, ErrDumpInterrupted) {
+		return nil, executeErr
 	}
 	devs := make([]*VDPADev, 0, len(messages))
 	for _, m := range messages {
@@ -295,10 +308,13 @@ func (h *Handle) vdpaDevGet(dev *string) ([]*VDPADev, error) {
 		d.parseAttributes(m)
 		devs = append(devs, d)
 	}
-	return devs, nil
+	return devs, executeErr
 }
 
 // dump all devices if dev is nil
+//
+// If dev is nil, and the returned error is [ErrDumpInterrupted], results may be inconsistent
+// or incomplete.
 func (h *Handle) vdpaDevConfigGet(dev *string) ([]*VDPADevConfig, error) {
 	var extraFlags int
 	var attrs []*nl.RtAttr
@@ -307,9 +323,9 @@ func (h *Handle) vdpaDevConfigGet(dev *string) ([]*VDPADevConfig, error) {
 	} else {
 		extraFlags = extraFlags | unix.NLM_F_DUMP
 	}
-	messages, err := h.vdpaRequest(nl.VDPA_CMD_DEV_CONFIG_GET, extraFlags, attrs)
-	if err != nil {
-		return nil, err
+	messages, executeErr := h.vdpaRequest(nl.VDPA_CMD_DEV_CONFIG_GET, extraFlags, attrs)
+	if executeErr != nil && !errors.Is(executeErr, ErrDumpInterrupted) {
+		return nil, executeErr
 	}
 	cfgs := make([]*VDPADevConfig, 0, len(messages))
 	for _, m := range messages {
@@ -317,10 +333,13 @@ func (h *Handle) vdpaDevConfigGet(dev *string) ([]*VDPADevConfig, error) {
 		cfg.parseAttributes(m)
 		cfgs = append(cfgs, cfg)
 	}
-	return cfgs, nil
+	return cfgs, executeErr
 }
 
 // dump all devices if dev is nil
+//
+// If dev is nil and the returned error is [ErrDumpInterrupted], results may be inconsistent
+// or incomplete.
 func (h *Handle) vdpaMGMTDevGet(bus, dev *string) ([]*VDPAMGMTDev, error) {
 	var extraFlags int
 	var attrs []*nl.RtAttr
@@ -336,9 +355,9 @@ func (h *Handle) vdpaMGMTDevGet(bus, dev *string) ([]*VDPAMGMTDev, error) {
 	} else {
 		extraFlags = extraFlags | unix.NLM_F_DUMP
 	}
-	messages, err := h.vdpaRequest(nl.VDPA_CMD_MGMTDEV_GET, extraFlags, attrs)
-	if err != nil {
-		return nil, err
+	messages, executeErr := h.vdpaRequest(nl.VDPA_CMD_MGMTDEV_GET, extraFlags, attrs)
+	if executeErr != nil && !errors.Is(executeErr, ErrDumpInterrupted) {
+		return nil, executeErr
 	}
 	cfgs := make([]*VDPAMGMTDev, 0, len(messages))
 	for _, m := range messages {
@@ -346,7 +365,7 @@ func (h *Handle) vdpaMGMTDevGet(bus, dev *string) ([]*VDPAMGMTDev, error) {
 		cfg.parseAttributes(m)
 		cfgs = append(cfgs, cfg)
 	}
-	return cfgs, nil
+	return cfgs, executeErr
 }
 
 // VDPANewDev adds new VDPA device
@@ -385,6 +404,9 @@ func (h *Handle) VDPADelDev(name string) error {
 
 // VDPAGetDevList returns list of VDPA devices
 // Equivalent to: `vdpa dev show`
+//
+// If the returned error is [ErrDumpInterrupted], results may be inconsistent
+// or incomplete.
 func (h *Handle) VDPAGetDevList() ([]*VDPADev, error) {
 	return h.vdpaDevGet(nil)
 }
@@ -404,6 +426,9 @@ func (h *Handle) VDPAGetDevByName(name string) (*VDPADev, error) {
 
 // VDPAGetDevConfigList returns list of VDPA devices configurations
 // Equivalent to: `vdpa dev config show`
+//
+// If the returned error is [ErrDumpInterrupted], results may be inconsistent
+// or incomplete.
 func (h *Handle) VDPAGetDevConfigList() ([]*VDPADevConfig, error) {
 	return h.vdpaDevConfigGet(nil)
 }
@@ -441,6 +466,9 @@ func (h *Handle) VDPAGetDevVStats(name string, queueIndex uint32) (*VDPADevVStat
 
 // VDPAGetMGMTDevList returns list of mgmt devices
 // Equivalent to: `vdpa mgmtdev show`
+//
+// If the returned error is [ErrDumpInterrupted], results may be inconsistent
+// or incomplete.
 func (h *Handle) VDPAGetMGMTDevList() ([]*VDPAMGMTDev, error) {
 	return h.vdpaMGMTDevGet(nil, nil)
 }
diff --git a/vendor/github.com/vishvananda/netlink/xfrm_policy_linux.go b/vendor/github.com/vishvananda/netlink/xfrm_policy_linux.go
index d526739cebf143b55ad484630cac2bcbc850e5ea..bf143a1b13f937376e8248503c52ce4527953636 100644
--- a/vendor/github.com/vishvananda/netlink/xfrm_policy_linux.go
+++ b/vendor/github.com/vishvananda/netlink/xfrm_policy_linux.go
@@ -1,6 +1,7 @@
 package netlink
 
 import (
+	"errors"
 	"fmt"
 	"net"
 
@@ -215,6 +216,9 @@ func (h *Handle) XfrmPolicyDel(policy *XfrmPolicy) error {
 // XfrmPolicyList gets a list of xfrm policies in the system.
 // Equivalent to: `ip xfrm policy show`.
 // The list can be filtered by ip family.
+//
+// If the returned error is [ErrDumpInterrupted], results may be inconsistent
+// or incomplete.
 func XfrmPolicyList(family int) ([]XfrmPolicy, error) {
 	return pkgHandle.XfrmPolicyList(family)
 }
@@ -222,15 +226,18 @@ func XfrmPolicyList(family int) ([]XfrmPolicy, error) {
 // XfrmPolicyList gets a list of xfrm policies in the system.
 // Equivalent to: `ip xfrm policy show`.
 // The list can be filtered by ip family.
+//
+// If the returned error is [ErrDumpInterrupted], results may be inconsistent
+// or incomplete.
 func (h *Handle) XfrmPolicyList(family int) ([]XfrmPolicy, error) {
 	req := h.newNetlinkRequest(nl.XFRM_MSG_GETPOLICY, unix.NLM_F_DUMP)
 
 	msg := nl.NewIfInfomsg(family)
 	req.AddData(msg)
 
-	msgs, err := req.Execute(unix.NETLINK_XFRM, nl.XFRM_MSG_NEWPOLICY)
-	if err != nil {
-		return nil, err
+	msgs, executeErr := req.Execute(unix.NETLINK_XFRM, nl.XFRM_MSG_NEWPOLICY)
+	if executeErr != nil && !errors.Is(executeErr, ErrDumpInterrupted) {
+		return nil, executeErr
 	}
 
 	var res []XfrmPolicy
@@ -243,7 +250,7 @@ func (h *Handle) XfrmPolicyList(family int) ([]XfrmPolicy, error) {
 			return nil, err
 		}
 	}
-	return res, nil
+	return res, executeErr
 }
 
 // XfrmPolicyGet gets a the policy described by the index or selector, if found.
diff --git a/vendor/github.com/vishvananda/netlink/xfrm_state_linux.go b/vendor/github.com/vishvananda/netlink/xfrm_state_linux.go
index 554f2498c2c5eded59373c8c1565a264d6cf8cff..2f461465148c6ec19b204fbd33975f9dc0d015a1 100644
--- a/vendor/github.com/vishvananda/netlink/xfrm_state_linux.go
+++ b/vendor/github.com/vishvananda/netlink/xfrm_state_linux.go
@@ -1,6 +1,7 @@
 package netlink
 
 import (
+	"errors"
 	"fmt"
 	"net"
 	"time"
@@ -382,6 +383,9 @@ func (h *Handle) XfrmStateDel(state *XfrmState) error {
 // XfrmStateList gets a list of xfrm states in the system.
 // Equivalent to: `ip [-4|-6] xfrm state show`.
 // The list can be filtered by ip family.
+//
+// If the returned error is [ErrDumpInterrupted], results may be inconsistent
+// or incomplete.
 func XfrmStateList(family int) ([]XfrmState, error) {
 	return pkgHandle.XfrmStateList(family)
 }
@@ -389,12 +393,15 @@ func XfrmStateList(family int) ([]XfrmState, error) {
 // XfrmStateList gets a list of xfrm states in the system.
 // Equivalent to: `ip xfrm state show`.
 // The list can be filtered by ip family.
+//
+// If the returned error is [ErrDumpInterrupted], results may be inconsistent
+// or incomplete.
 func (h *Handle) XfrmStateList(family int) ([]XfrmState, error) {
 	req := h.newNetlinkRequest(nl.XFRM_MSG_GETSA, unix.NLM_F_DUMP)
 
-	msgs, err := req.Execute(unix.NETLINK_XFRM, nl.XFRM_MSG_NEWSA)
-	if err != nil {
-		return nil, err
+	msgs, executeErr := req.Execute(unix.NETLINK_XFRM, nl.XFRM_MSG_NEWSA)
+	if executeErr != nil && !errors.Is(executeErr, ErrDumpInterrupted) {
+		return nil, executeErr
 	}
 
 	var res []XfrmState
@@ -407,7 +414,7 @@ func (h *Handle) XfrmStateList(family int) ([]XfrmState, error) {
 			return nil, err
 		}
 	}
-	return res, nil
+	return res, executeErr
 }
 
 // XfrmStateGet gets the xfrm state described by the ID, if found.
diff --git a/vendor/github.com/vladimirvivien/gexe/exec/builder.go b/vendor/github.com/vladimirvivien/gexe/exec/builder.go
index c5395c88835167b0d054c6dbd8b5953b7f96ba55..75e27ea78c689bbba84c455ed8d3c33aad2b2425 100644
--- a/vendor/github.com/vladimirvivien/gexe/exec/builder.go
+++ b/vendor/github.com/vladimirvivien/gexe/exec/builder.go
@@ -105,12 +105,14 @@ func (cr *PipedCommandResult) LastProc() *Proc {
 // CommandBuilder is a batch command builder that
 // can execute commands using different execution policies (i.e. serial, piped, concurrent)
 type CommandBuilder struct {
-	cmdPolicy CommandPolicy
-	procs     []*Proc
-	vars      *vars.Variables
-	err       error
-	stdout    io.Writer
-	stderr    io.Writer
+	cmdPolicy  CommandPolicy
+	procs      []*Proc
+	vars       *vars.Variables
+	err        error
+	stdout     io.Writer
+	stderr     io.Writer
+	shellStr   string
+	cmdStrings []string
 }
 
 // CommandsWithContextVars creates a *CommandBuilder with the specified context and session variables.
@@ -118,6 +120,7 @@ type CommandBuilder struct {
 func CommandsWithContextVars(ctx context.Context, variables *vars.Variables, cmds ...string) *CommandBuilder {
 	cb := new(CommandBuilder)
 	cb.vars = variables
+	cb.cmdStrings = cmds
 	for _, cmd := range cmds {
 		cb.procs = append(cb.procs, NewProcWithContextVars(ctx, cmd, variables))
 	}
@@ -175,6 +178,12 @@ func (cb *CommandBuilder) WithWorkDir(dir string) *CommandBuilder {
 	return cb
 }
 
+// WithShell sets the shell to use for all commands
+func (cb *CommandBuilder) WithShell(shell string) *CommandBuilder {
+	cb.shellStr = shell
+	return cb
+}
+
 // Run executes all commands successively and waits for all of the result. The result of each individual
 // command can be accessed from CommandResult.Procs[] after the execution completes. If policy == ExitOnErrPolicy, the
 // execution will stop on the first error encountered, otherwise it will continue. Processes with errors can be accessed
@@ -281,65 +290,6 @@ func (cb *CommandBuilder) Concurr() *CommandResult {
 	return cb.Start()
 }
 
-// Pipe executes each command serially chaining the combinedOutput of previous command to the inputPipe of next command.
-func (cb *CommandBuilder) Pipe() *PipedCommandResult {
-	if cb.err != nil {
-		return &PipedCommandResult{err: cb.err}
-	}
-
-	var result PipedCommandResult
-	procLen := len(cb.procs)
-	if procLen == 0 {
-		return &PipedCommandResult{}
-	}
-
-	// wire last proc to combined output
-	last := procLen - 1
-	result.lastProc = cb.procs[last]
-
-	// setup standard output/err for last proc in pipe
-	result.lastProc.cmd.Stdout = cb.stdout
-	if cb.stdout == nil {
-		result.lastProc.cmd.Stdout = result.lastProc.result
-	}
-
-	result.lastProc.cmd.Stderr = cb.stderr
-	if cb.stderr == nil {
-		result.lastProc.cmd.Stderr = result.lastProc.result
-	}
-
-	result.lastProc.cmd.Stdout = result.lastProc.result
-	for i, p := range cb.procs[:last] {
-		pipeout, err := p.cmd.StdoutPipe()
-		if err != nil {
-			p.err = err
-			return &PipedCommandResult{err: err, errProcs: []*Proc{p}}
-		}
-
-		cb.procs[i+1].cmd.Stdin = pipeout
-	}
-
-	// start each process (but, not wait for result)
-	// to ensure data flow between successive processes start
-	for _, p := range cb.procs {
-		result.procs = append(result.procs, p)
-		if err := p.Start().Err(); err != nil {
-			result.errProcs = append(result.errProcs, p)
-			return &result
-		}
-	}
-
-	// wait and access processes result
-	for _, p := range cb.procs {
-		if err := p.Wait().Err(); err != nil {
-			result.errProcs = append(result.errProcs, p)
-			break
-		}
-	}
-
-	return &result
-}
-
 func (cb *CommandBuilder) runCommand(proc *Proc) error {
 	// setup standard out and standard err
 
diff --git a/vendor/github.com/vladimirvivien/gexe/exec/pipe_unix.go b/vendor/github.com/vladimirvivien/gexe/exec/pipe_unix.go
new file mode 100644
index 0000000000000000000000000000000000000000..8b07a53bc0f0669ec4aee0de1389836dd6ccb4e4
--- /dev/null
+++ b/vendor/github.com/vladimirvivien/gexe/exec/pipe_unix.go
@@ -0,0 +1,81 @@
+//go:build !windows
+
+package exec
+
+import "errors"
+
+// Pipe executes each command serially chaining the combinedOutput
+// of previous command to the input Pipe of next command.
+func (cb *CommandBuilder) Pipe() *PipedCommandResult {
+	if cb.err != nil {
+		return &PipedCommandResult{err: cb.err}
+	}
+
+	result := cb.connectProcPipes()
+
+	// check for structural errors
+	if result.err != nil {
+		return result
+	}
+
+	// start each process (but, not wait for result)
+	// to ensure data flow between successive processes start
+	for _, p := range cb.procs {
+		result.procs = append(result.procs, p)
+		if err := p.Start().Err(); err != nil {
+			result.errProcs = append(result.errProcs, p)
+			return result
+		}
+	}
+
+	// wait and access processes result
+	for _, p := range cb.procs {
+		if err := p.Wait().Err(); err != nil {
+			result.errProcs = append(result.errProcs, p)
+			break
+		}
+	}
+
+	return result
+}
+
+// connectProcPipes connects the output of each process to the input of the next process in the chain.
+// It returns a PipedCommandResult containing the connected processes and any errors encountered.
+func (cb *CommandBuilder) connectProcPipes() *PipedCommandResult {
+	var result PipedCommandResult
+
+	procLen := len(cb.procs)
+	if procLen == 0 {
+		return &PipedCommandResult{err: errors.New("no processes to connect")}
+	}
+
+	// wire last proc to combined output
+	last := procLen - 1
+	result.lastProc = cb.procs[last]
+
+	// setup standard output/err of last proc in pipe
+	result.lastProc.cmd.Stdout = cb.stdout
+	if cb.stdout == nil {
+		result.lastProc.cmd.Stdout = result.lastProc.result
+	}
+
+	// Wire standard error of last proc in pipe
+	result.lastProc.cmd.Stderr = cb.stderr
+	if cb.stderr == nil {
+		result.lastProc.cmd.Stderr = result.lastProc.result
+	}
+
+	// setup pipes for inner procs in the pipe chain
+	result.lastProc.cmd.Stdout = result.lastProc.result
+	for i, p := range cb.procs[:last] {
+		pipeout, err := p.cmd.StdoutPipe()
+		if err != nil {
+			p.err = err
+			return &PipedCommandResult{err: err, errProcs: []*Proc{p}}
+		}
+
+		cb.procs[i+1].cmd.Stdin = pipeout
+	}
+
+	return &result
+}
diff --git a/vendor/github.com/vladimirvivien/gexe/exec/pipe_windows.go b/vendor/github.com/vladimirvivien/gexe/exec/pipe_windows.go
new file mode 100644
index 0000000000000000000000000000000000000000..dde8ca1323fcbae3d3d8cae83e65b71e654580c9
--- /dev/null
+++ b/vendor/github.com/vladimirvivien/gexe/exec/pipe_windows.go
@@ -0,0 +1,80 @@
+//go:build windows
+
+package exec
+
+import (
+	"bytes"
+	"errors"
+	"strings"
+)
+
+// Pipe executes each Windows command serially. Windows, however, does not support
+// OS pipes like {Li|U}nix. Instead, pipes use a single command string, with | delimiters,
+// passed to powershell. So prior to calling Pipe(), call CommandBulider.WithShell()
+// to specify "powershell.exe -c" as the shell.
+// (See tests for examples.)
+func (cb *CommandBuilder) Pipe() *PipedCommandResult {
+	if cb.err != nil {
+		return &PipedCommandResult{err: cb.err}
+	}
+
+	result := new(PipedCommandResult)
+
+	// setup a single command string with pipe delimiters
+	cmd := strings.Join(cb.cmdStrings, " | ")
+
+	// Prepend shell command if specified
+	if cb.shellStr != "" {
+		cmd = cb.shellStr + " " + cmd
+	}
+
+	proc := NewProcWithVars(cmd, cb.vars)
+	result.procs = append(result.procs, proc)
+	result.lastProc = proc
+
+	// execute the piped commands
+	if err := cb.runCommand(proc); err != nil {
+		return &PipedCommandResult{err: err, errProcs: []*Proc{proc}}
+	}
+
+	return result
+}
+
+// connectProcPipes connects the output of each process to the input of the next process in the chain.
+// It returns a PipedCommandResult containing the connected processes and any errors encountered.
+func (cb *CommandBuilder) connectProcPipes() *PipedCommandResult {
+	var result PipedCommandResult
+
+	procLen := len(cb.procs)
+	if procLen == 0 {
+		return &PipedCommandResult{err: errors.New("no processes to connect")}
+	}
+
+	// wire last proc to combined output
+	last := procLen - 1
+	result.lastProc = cb.procs[last]
+
+	// setup standard output/err for last proc in pipe
+	result.lastProc.cmd.Stdout = cb.stdout
+	if cb.stdout == nil {
+		result.lastProc.cmd.Stdout = result.lastProc.result
+	}
+
+	// Wire the remainder procs
+	result.lastProc.cmd.Stderr = cb.stderr
+	if cb.stderr == nil {
+		result.lastProc.cmd.Stderr = result.lastProc.result
+	}
+
+	// exec.Command.StdoutPipe() uses OS pipes, which are not supported on Windows.
+	// Instead, this uses an in-memory pipe and set the command's stdin to the write end of the pipe.
+	result.lastProc.cmd.Stdout = result.lastProc.result
+	for i := range cb.procs[:last] {
+		// Create an in-memory pipe for the command's stdout
+		pipe := new(bytes.Buffer)
+		cb.procs[i].cmd.Stdout = pipe
+		cb.procs[i+1].cmd.Stdin = pipe
+	}
+
+	return &result
+}
diff --git a/vendor/github.com/vladimirvivien/gexe/exec/proc.go b/vendor/github.com/vladimirvivien/gexe/exec/proc.go
index f9b6a520edd66dc04021506771b6ec0db1a1a372..65439ee48384d057c92a07543a284b6f3231cd58 100644
--- a/vendor/github.com/vladimirvivien/gexe/exec/proc.go
+++ b/vendor/github.com/vladimirvivien/gexe/exec/proc.go
@@ -10,7 +10,6 @@ import (
 	"os/user"
 	"strconv"
 	"strings"
-	"syscall"
 	"time"
 
 	"github.com/vladimirvivien/gexe/vars"
@@ -189,24 +188,7 @@ func (p *Proc) Start() *Proc {
 	}
 
 	// apply user id and user grp
-	var procCred *syscall.Credential
-	if p.userid != nil {
-		procCred = &syscall.Credential{
-			Uid: uint32(*p.userid),
-		}
-	}
-	if p.groupid != nil {
-		if procCred == nil {
-			procCred = new(syscall.Credential)
-		}
-		procCred.Uid = uint32(*p.groupid)
-	}
-	if procCred != nil {
-		if p.cmd.SysProcAttr == nil {
-			p.cmd.SysProcAttr = new(syscall.SysProcAttr)
-		}
-		p.cmd.SysProcAttr.Credential = procCred
-	}
+	p.applyCredentials()
 
 	if err := p.cmd.Start(); err != nil {
 		p.err = err
diff --git a/vendor/github.com/vladimirvivien/gexe/exec/proc_unix.go b/vendor/github.com/vladimirvivien/gexe/exec/proc_unix.go
new file mode 100644
index 0000000000000000000000000000000000000000..cdea947a1b1acf900eaec2408969ae50b2d06ac5
--- /dev/null
+++ b/vendor/github.com/vladimirvivien/gexe/exec/proc_unix.go
@@ -0,0 +1,30 @@
+//go:build !windows
+
+package exec
+
+import (
+	"syscall"
+)
+
+// applyCredentials applies the user and group IDs to the command.
+func (p *Proc) applyCredentials() {
+	// apply user id and user grp
+	var procCred *syscall.Credential
+	if p.userid != nil {
+		procCred = &syscall.Credential{
+			Uid: uint32(*p.userid),
+		}
+	}
+	if p.groupid != nil {
+		if procCred == nil {
+			procCred = new(syscall.Credential)
+		}
+		procCred.Gid = uint32(*p.groupid)
+	}
+	if procCred != nil {
+		if p.cmd.SysProcAttr == nil {
+			p.cmd.SysProcAttr = new(syscall.SysProcAttr)
+		}
+		p.cmd.SysProcAttr.Credential = procCred
+	}
+}
diff --git a/vendor/github.com/vladimirvivien/gexe/exec/proc_windows.go b/vendor/github.com/vladimirvivien/gexe/exec/proc_windows.go
new file mode 100644
index 0000000000000000000000000000000000000000..88ad91b9945fad839309f553f7d886c4c4e4c7bf
--- /dev/null
+++ b/vendor/github.com/vladimirvivien/gexe/exec/proc_windows.go
@@ -0,0 +1,9 @@
+//go:build windows
+
+package exec
+
+// applyCredentials is a no-op as this works vastly different on Windows.
+func (p *Proc) applyCredentials() {
+	// Windows doesn't support user/group IDs in the same way {Li|U}nix does.
+	// Windows impersonation will not be supported in this package a this time.
+}
diff --git a/vendor/github.com/vmware/go-ipfix/pkg/entities/record.go b/vendor/github.com/vmware/go-ipfix/pkg/entities/record.go
index a3a68ab9729e00a2e4b4e71c82a2ac24538f4a87..452551dd3087b4ac78f39f3f4a0c3e3775131360 100644
--- a/vendor/github.com/vmware/go-ipfix/pkg/entities/record.go
+++ b/vendor/github.com/vmware/go-ipfix/pkg/entities/record.go
@@ -46,42 +46,30 @@ type Record interface {
 }
 
 type baseRecord struct {
-	buffer             []byte
 	fieldCount         uint16
 	templateID         uint16
 	orderedElementList []InfoElementWithValue
-	isDecoding         bool
-	len                int
 }
 
 type dataRecord struct {
 	baseRecord
 }
 
-func NewDataRecord(id uint16, numElements, numExtraElements int, isDecoding bool) *dataRecord {
+func NewDataRecord(id uint16, numElements, numExtraElements int) *dataRecord {
 	return &dataRecord{
 		baseRecord{
 			fieldCount:         0,
 			templateID:         id,
-			isDecoding:         isDecoding,
 			orderedElementList: make([]InfoElementWithValue, numElements, numElements+numExtraElements),
 		},
 	}
 }
 
-func NewDataRecordFromElements(id uint16, elements []InfoElementWithValue, isDecoding bool) *dataRecord {
-	length := 0
-	if !isDecoding {
-		for idx := range elements {
-			length += elements[idx].GetLength()
-		}
-	}
+func NewDataRecordFromElements(id uint16, elements []InfoElementWithValue) *dataRecord {
 	return &dataRecord{
 		baseRecord{
 			fieldCount:         uint16(len(elements)),
 			templateID:         id,
-			isDecoding:         isDecoding,
-			len:                length,
 			orderedElementList: elements,
 		},
 	}
@@ -92,35 +80,35 @@ type templateRecord struct {
 	// Minimum data record length required to be sent for this template.
 	// Elements with variable length are considered to be one byte.
 	minDataRecLength uint16
-	// index is used when adding elements to orderedElementList
+	// index is used when adding elements to orderedElementList.
 	index int
+	// buffer is used to marshal the template record.
+	buffer []byte
 }
 
-func NewTemplateRecord(id uint16, numElements int, isDecoding bool) *templateRecord {
+func NewTemplateRecord(id uint16, numElements int) *templateRecord {
 	return &templateRecord{
 		baseRecord{
-			buffer:             make([]byte, 4),
 			fieldCount:         uint16(numElements),
 			templateID:         id,
-			isDecoding:         isDecoding,
 			orderedElementList: make([]InfoElementWithValue, numElements, numElements),
 		},
 		0,
 		0,
+		make([]byte, 4),
 	}
 }
 
-func NewTemplateRecordFromElements(id uint16, elements []InfoElementWithValue, isDecoding bool) *templateRecord {
+func NewTemplateRecordFromElements(id uint16, elements []InfoElementWithValue) *templateRecord {
 	r := &templateRecord{
 		baseRecord{
-			buffer:             make([]byte, 4),
 			fieldCount:         uint16(len(elements)),
 			templateID:         id,
-			isDecoding:         isDecoding,
 			orderedElementList: elements,
 		},
 		0,
 		len(elements),
+		make([]byte, 4),
 	}
 	for idx := range elements {
 		infoElement := elements[idx].GetInfoElement()
@@ -204,18 +192,15 @@ func (d *dataRecord) PrepareRecord() error {
 }
 
 func (d *dataRecord) GetBuffer() ([]byte, error) {
-	if len(d.buffer) == d.len || d.isDecoding {
-		return d.buffer, nil
-	}
-	d.buffer = make([]byte, d.len)
+	buffer := make([]byte, d.GetRecordLength())
 	index := 0
 	for _, element := range d.orderedElementList {
-		if err := encodeInfoElementValueToBuff(element, d.buffer, index); err != nil {
+		if err := encodeInfoElementValueToBuff(element, buffer, index); err != nil {
 			return nil, err
 		}
 		index += element.GetLength()
 	}
-	return d.buffer, nil
+	return buffer, nil
 }
 
 // Callers should ensure that the provided slice has enough capacity (e.g., by calling
@@ -231,13 +216,14 @@ func (d *dataRecord) AppendToBuffer(buffer []byte) ([]byte, error) {
 }
 
 func (d *dataRecord) GetRecordLength() int {
-	return d.len
+	length := 0
+	for _, element := range d.orderedElementList {
+		length += element.GetLength()
+	}
+	return length
 }
 
 func (d *dataRecord) AddInfoElement(element InfoElementWithValue) error {
-	if !d.isDecoding {
-		d.len = d.len + element.GetLength()
-	}
 	if len(d.orderedElementList) <= int(d.fieldCount) {
 		d.orderedElementList = append(d.orderedElementList, element)
 	} else {
diff --git a/vendor/github.com/vmware/go-ipfix/pkg/entities/set.go b/vendor/github.com/vmware/go-ipfix/pkg/entities/set.go
index 6bc72a16b0f5a12a83ddc102b17f97c08370a263..244ea95a35284e8c9a74b1e9fdf10f386bd2fc9f 100644
--- a/vendor/github.com/vmware/go-ipfix/pkg/entities/set.go
+++ b/vendor/github.com/vmware/go-ipfix/pkg/entities/set.go
@@ -148,9 +148,9 @@ func (s *set) AddRecord(elements []InfoElementWithValue, templateID uint16) erro
 func (s *set) AddRecordWithExtraElements(elements []InfoElementWithValue, numExtraElements int, templateID uint16) error {
 	var record Record
 	if s.setType == Data {
-		record = NewDataRecord(templateID, len(elements), numExtraElements, s.isDecoding)
+		record = NewDataRecord(templateID, len(elements), numExtraElements)
 	} else if s.setType == Template {
-		record = NewTemplateRecord(templateID, len(elements), s.isDecoding)
+		record = NewTemplateRecord(templateID, len(elements))
 		err := record.PrepareRecord()
 		if err != nil {
 			return err
@@ -172,9 +172,9 @@ func (s *set) AddRecordWithExtraElements(elements []InfoElementWithValue, numExt
 func (s *set) AddRecordV2(elements []InfoElementWithValue, templateID uint16) error {
 	var record Record
 	if s.setType == Data {
-		record = NewDataRecordFromElements(templateID, elements, s.isDecoding)
+		record = NewDataRecordFromElements(templateID, elements)
 	} else if s.setType == Template {
-		record = NewTemplateRecordFromElements(templateID, elements, s.isDecoding)
+		record = NewTemplateRecordFromElements(templateID, elements)
 		err := record.PrepareRecord()
 		if err != nil {
 			return err
diff --git a/vendor/github.com/vmware/go-ipfix/pkg/exporter/buffered.go b/vendor/github.com/vmware/go-ipfix/pkg/exporter/buffered.go
index f4272531f4f66818c751d698babae89cf8a81dca..58032b6993c17074c8af7495995eba836d9f3984 100644
--- a/vendor/github.com/vmware/go-ipfix/pkg/exporter/buffered.go
+++ b/vendor/github.com/vmware/go-ipfix/pkg/exporter/buffered.go
@@ -186,7 +186,6 @@ func encodeSetHeader(buf []byte, templateID, length uint16) {
 
 func (m *bufferedMessage) sendMessage() (int, error) {
 	now := time.Now()
-	m.ep.seqNumber = m.ep.seqNumber + uint32(m.numRecords)
 	msgLen := len(m.buffer)
 	encodeMessageHeader(m.buffer, 10, uint16(msgLen), uint32(now.Unix()), m.ep.seqNumber, m.ep.obsDomainID)
 	encodeSetHeader(m.buffer[entities.MsgHeaderLength:], m.templateID, uint16(msgLen-entities.MsgHeaderLength))
@@ -194,6 +193,7 @@ func (m *bufferedMessage) sendMessage() (int, error) {
 	if err != nil {
 		return n, err
 	}
+	m.ep.seqNumber = m.ep.seqNumber + uint32(m.numRecords)
 	m.reset()
 	return n, nil
 }
diff --git a/vendor/github.com/vmware/go-ipfix/pkg/exporter/process.go b/vendor/github.com/vmware/go-ipfix/pkg/exporter/process.go
index 958df804658dfdb846f2f5e572311800849e2275..24591165a0d829cab19a239d64e8de17f335b87d 100644
--- a/vendor/github.com/vmware/go-ipfix/pkg/exporter/process.go
+++ b/vendor/github.com/vmware/go-ipfix/pkg/exporter/process.go
@@ -451,9 +451,6 @@ func (ep *ExportingProcess) NewTemplateID() uint16 {
 // createAndSendIPFIXMsg takes in a set as input, creates the IPFIX message, and sends it out.
 // TODO: This method will change when we support sending multiple sets.
 func (ep *ExportingProcess) createAndSendIPFIXMsg(set entities.Set, buf *bytes.Buffer) (int, error) {
-	if set.GetSetType() == entities.Data {
-		ep.seqNumber = ep.seqNumber + set.GetNumberOfRecords()
-	}
 	n, err := WriteIPFIXMsgToBuffer(set, ep.obsDomainID, ep.seqNumber, time.Now(), buf)
 	if err != nil {
 		return 0, err
@@ -471,6 +468,10 @@ func (ep *ExportingProcess) createAndSendIPFIXMsg(set entities.Set, buf *bytes.B
 		return bytesSent, fmt.Errorf("could not send the complete message on the connection")
 	}
 
+	if set.GetSetType() == entities.Data {
+		ep.seqNumber = ep.seqNumber + set.GetNumberOfRecords()
+	}
+
 	return bytesSent, nil
 }
 
diff --git a/vendor/go.uber.org/atomic/CHANGELOG.md b/vendor/go.uber.org/atomic/CHANGELOG.md
index 38f564e2b36ae3a8693e625e7ac8bf429ba31d03..6f87f33fa9555d33dbd48678e747abcf68ead1a7 100644
--- a/vendor/go.uber.org/atomic/CHANGELOG.md
+++ b/vendor/go.uber.org/atomic/CHANGELOG.md
@@ -4,6 +4,33 @@ All notable changes to this project will be documented in this file.
 The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
 and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
 
+## [1.11.0] - 2023-05-02
+### Fixed
+- Fix initialization of `Value` wrappers.
+
+### Added
+- Add `String` method to `atomic.Pointer[T]` type allowing users to safely print
+underlying values of pointers.
+
+[1.11.0]: https://github.com/uber-go/atomic/compare/v1.10.0...v1.11.0
+
+## [1.10.0] - 2022-08-11
+### Added
+- Add `atomic.Float32` type for atomic operations on `float32`.
+- Add `CompareAndSwap` and `Swap` methods to `atomic.String`, `atomic.Error`,
+  and `atomic.Value`.
+- Add generic `atomic.Pointer[T]` type for atomic operations on pointers of any
+  type. This is present only for Go 1.18 or higher, and is a drop-in for
+  replacement for the standard library's `sync/atomic.Pointer` type.
+
+### Changed
+- Deprecate `CAS` methods on all types in favor of corresponding
+  `CompareAndSwap` methods.
+
+Thanks to @eNV25 and @icpd for their contributions to this release.
+
+[1.10.0]: https://github.com/uber-go/atomic/compare/v1.9.0...v1.10.0
+
 ## [1.9.0] - 2021-07-15
 ### Added
 - Add `Float64.Swap` to match int atomic operations.
diff --git a/vendor/go.uber.org/atomic/bool.go b/vendor/go.uber.org/atomic/bool.go
index 209df7bbcd22aa6da61d87845b6c3d13e0ef29cb..f0a2ddd148c8026523a2afe4f7b4a1c701236908 100644
--- a/vendor/go.uber.org/atomic/bool.go
+++ b/vendor/go.uber.org/atomic/bool.go
@@ -1,6 +1,6 @@
 // @generated Code generated by gen-atomicwrapper.
 
-// Copyright (c) 2020-2021 Uber Technologies, Inc.
+// Copyright (c) 2020-2023 Uber Technologies, Inc.
 //
 // Permission is hereby granted, free of charge, to any person obtaining a copy
 // of this software and associated documentation files (the "Software"), to deal
@@ -55,8 +55,15 @@ func (x *Bool) Store(val bool) {
 }
 
 // CAS is an atomic compare-and-swap for bool values.
+//
+// Deprecated: Use CompareAndSwap.
 func (x *Bool) CAS(old, new bool) (swapped bool) {
-	return x.v.CAS(boolToInt(old), boolToInt(new))
+	return x.CompareAndSwap(old, new)
+}
+
+// CompareAndSwap is an atomic compare-and-swap for bool values.
+func (x *Bool) CompareAndSwap(old, new bool) (swapped bool) {
+	return x.v.CompareAndSwap(boolToInt(old), boolToInt(new))
 }
 
 // Swap atomically stores the given bool and returns the old
diff --git a/vendor/go.uber.org/atomic/duration.go b/vendor/go.uber.org/atomic/duration.go
index 207594f5e806c785b901a55fa7b8c46e97382c2c..7c23868fc872200d6c22b54b900024ccbccf2f7b 100644
--- a/vendor/go.uber.org/atomic/duration.go
+++ b/vendor/go.uber.org/atomic/duration.go
@@ -1,6 +1,6 @@
 // @generated Code generated by gen-atomicwrapper.
 
-// Copyright (c) 2020-2021 Uber Technologies, Inc.
+// Copyright (c) 2020-2023 Uber Technologies, Inc.
 //
 // Permission is hereby granted, free of charge, to any person obtaining a copy
 // of this software and associated documentation files (the "Software"), to deal
@@ -56,8 +56,15 @@ func (x *Duration) Store(val time.Duration) {
 }
 
 // CAS is an atomic compare-and-swap for time.Duration values.
+//
+// Deprecated: Use CompareAndSwap.
 func (x *Duration) CAS(old, new time.Duration) (swapped bool) {
-	return x.v.CAS(int64(old), int64(new))
+	return x.CompareAndSwap(old, new)
+}
+
+// CompareAndSwap is an atomic compare-and-swap for time.Duration values.
+func (x *Duration) CompareAndSwap(old, new time.Duration) (swapped bool) {
+	return x.v.CompareAndSwap(int64(old), int64(new))
 }
 
 // Swap atomically stores the given time.Duration and returns the old
diff --git a/vendor/go.uber.org/atomic/error.go b/vendor/go.uber.org/atomic/error.go
index 3be19c35ee753da3c71017979b2782e64f5fd5f3..b7e3f1291a34a177f5708cb99b629f585fe55599 100644
--- a/vendor/go.uber.org/atomic/error.go
+++ b/vendor/go.uber.org/atomic/error.go
@@ -1,6 +1,6 @@
 // @generated Code generated by gen-atomicwrapper.
 
-// Copyright (c) 2020-2021 Uber Technologies, Inc.
+// Copyright (c) 2020-2023 Uber Technologies, Inc.
 //
 // Permission is hereby granted, free of charge, to any person obtaining a copy
 // of this software and associated documentation files (the "Software"), to deal
@@ -49,3 +49,24 @@ func (x *Error) Load() error {
 func (x *Error) Store(val error) {
 	x.v.Store(packError(val))
 }
+
+// CompareAndSwap is an atomic compare-and-swap for error values.
+func (x *Error) CompareAndSwap(old, new error) (swapped bool) {
+	if x.v.CompareAndSwap(packError(old), packError(new)) {
+		return true
+	}
+
+	if old == _zeroError {
+		// If the old value is the empty value, then it's possible the
+		// underlying Value hasn't been set and is nil, so retry with nil.
+		return x.v.CompareAndSwap(nil, packError(new))
+	}
+
+	return false
+}
+
+// Swap atomically stores the given error and returns the old
+// value.
+func (x *Error) Swap(val error) (old error) {
+	return unpackError(x.v.Swap(packError(val)))
+}
diff --git a/vendor/go.uber.org/atomic/error_ext.go b/vendor/go.uber.org/atomic/error_ext.go
index ffe0be21cb0174a02da635fc1505fb88026659a2..d31fb633bb635b3c34cef1638a22fdd7fb9abb26 100644
--- a/vendor/go.uber.org/atomic/error_ext.go
+++ b/vendor/go.uber.org/atomic/error_ext.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2020 Uber Technologies, Inc.
+// Copyright (c) 2020-2022 Uber Technologies, Inc.
 //
 // Permission is hereby granted, free of charge, to any person obtaining a copy
 // of this software and associated documentation files (the "Software"), to deal
@@ -23,7 +23,7 @@ package atomic
 // atomic.Value panics on nil inputs, or if the underlying type changes.
 // Stabilize by always storing a custom struct that we control.
 
-//go:generate bin/gen-atomicwrapper -name=Error -type=error -wrapped=Value -pack=packError -unpack=unpackError -file=error.go
+//go:generate bin/gen-atomicwrapper -name=Error -type=error -wrapped=Value -pack=packError -unpack=unpackError -compareandswap -swap -file=error.go
 
 type packedError struct{ Value error }
 
diff --git a/vendor/go.uber.org/atomic/float32.go b/vendor/go.uber.org/atomic/float32.go
new file mode 100644
index 0000000000000000000000000000000000000000..62c36334fd5c275aee080e8928a21d299a7ab5da
--- /dev/null
+++ b/vendor/go.uber.org/atomic/float32.go
@@ -0,0 +1,77 @@
+// @generated Code generated by gen-atomicwrapper.
+
+// Copyright (c) 2020-2023 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+import (
+	"encoding/json"
+	"math"
+)
+
+// Float32 is an atomic type-safe wrapper for float32 values.
+type Float32 struct {
+	_ nocmp // disallow non-atomic comparison
+
+	v Uint32
+}
+
+var _zeroFloat32 float32
+
+// NewFloat32 creates a new Float32.
+func NewFloat32(val float32) *Float32 {
+	x := &Float32{}
+	if val != _zeroFloat32 {
+		x.Store(val)
+	}
+	return x
+}
+
+// Load atomically loads the wrapped float32.
+func (x *Float32) Load() float32 {
+	return math.Float32frombits(x.v.Load())
+}
+
+// Store atomically stores the passed float32.
+func (x *Float32) Store(val float32) {
+	x.v.Store(math.Float32bits(val))
+}
+
+// Swap atomically stores the given float32 and returns the old
+// value.
+func (x *Float32) Swap(val float32) (old float32) {
+	return math.Float32frombits(x.v.Swap(math.Float32bits(val)))
+}
+
+// MarshalJSON encodes the wrapped float32 into JSON.
+func (x *Float32) MarshalJSON() ([]byte, error) {
+	return json.Marshal(x.Load())
+}
+
+// UnmarshalJSON decodes a float32 from JSON.
+func (x *Float32) UnmarshalJSON(b []byte) error {
+	var v float32
+	if err := json.Unmarshal(b, &v); err != nil {
+		return err
+	}
+	x.Store(v)
+	return nil
+}
diff --git a/vendor/go.uber.org/atomic/float32_ext.go b/vendor/go.uber.org/atomic/float32_ext.go
new file mode 100644
index 0000000000000000000000000000000000000000..b0cd8d9c820af565972f93ba4786fedc654235a5
--- /dev/null
+++ b/vendor/go.uber.org/atomic/float32_ext.go
@@ -0,0 +1,76 @@
+// Copyright (c) 2020-2022 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+import (
+	"math"
+	"strconv"
+)
+
+//go:generate bin/gen-atomicwrapper -name=Float32 -type=float32 -wrapped=Uint32 -pack=math.Float32bits -unpack=math.Float32frombits -swap -json -imports math -file=float32.go
+
+// Add atomically adds to the wrapped float32 and returns the new value.
+func (f *Float32) Add(delta float32) float32 {
+	for {
+		old := f.Load()
+		new := old + delta
+		if f.CAS(old, new) {
+			return new
+		}
+	}
+}
+
+// Sub atomically subtracts from the wrapped float32 and returns the new value.
+func (f *Float32) Sub(delta float32) float32 {
+	return f.Add(-delta)
+}
+
+// CAS is an atomic compare-and-swap for float32 values.
+//
+// Deprecated: Use CompareAndSwap
+func (f *Float32) CAS(old, new float32) (swapped bool) {
+	return f.CompareAndSwap(old, new)
+}
+
+// CompareAndSwap is an atomic compare-and-swap for float32 values.
+//
+// Note: CompareAndSwap handles NaN incorrectly. NaN != NaN using Go's inbuilt operators
+// but CompareAndSwap allows a stored NaN to compare equal to a passed in NaN.
+// This avoids typical CompareAndSwap loops from blocking forever, e.g.,
+//
+//	for {
+//	  old := atom.Load()
+//	  new = f(old)
+//	  if atom.CompareAndSwap(old, new) {
+//	    break
+//	  }
+//	}
+//
+// If CompareAndSwap did not match NaN to match, then the above would loop forever.
+func (f *Float32) CompareAndSwap(old, new float32) (swapped bool) {
+	return f.v.CompareAndSwap(math.Float32bits(old), math.Float32bits(new))
+}
+
+// String encodes the wrapped value as a string.
+func (f *Float32) String() string {
+	// 'g' is the behavior for floats with %v.
+	return strconv.FormatFloat(float64(f.Load()), 'g', -1, 32)
+}
diff --git a/vendor/go.uber.org/atomic/float64.go b/vendor/go.uber.org/atomic/float64.go
index 8a1367184721f989209a8b336a0aa785f3b77d86..5bc11caabe20cfeeb8bdb1ae6531fca01440cd21 100644
--- a/vendor/go.uber.org/atomic/float64.go
+++ b/vendor/go.uber.org/atomic/float64.go
@@ -1,6 +1,6 @@
 // @generated Code generated by gen-atomicwrapper.
 
-// Copyright (c) 2020-2021 Uber Technologies, Inc.
+// Copyright (c) 2020-2023 Uber Technologies, Inc.
 //
 // Permission is hereby granted, free of charge, to any person obtaining a copy
 // of this software and associated documentation files (the "Software"), to deal
diff --git a/vendor/go.uber.org/atomic/float64_ext.go b/vendor/go.uber.org/atomic/float64_ext.go
index df36b0107f0be7dc61ad48e650480e110a51783a..48c52b0abf664eabd62f0c18dc5d93d72068973d 100644
--- a/vendor/go.uber.org/atomic/float64_ext.go
+++ b/vendor/go.uber.org/atomic/float64_ext.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2020 Uber Technologies, Inc.
+// Copyright (c) 2020-2022 Uber Technologies, Inc.
 //
 // Permission is hereby granted, free of charge, to any person obtaining a copy
 // of this software and associated documentation files (the "Software"), to deal
@@ -45,21 +45,28 @@ func (f *Float64) Sub(delta float64) float64 {
 
 // CAS is an atomic compare-and-swap for float64 values.
 //
-// Note: CAS handles NaN incorrectly. NaN != NaN using Go's inbuilt operators
-// but CAS allows a stored NaN to compare equal to a passed in NaN.
-// This avoids typical CAS loops from blocking forever, e.g.,
+// Deprecated: Use CompareAndSwap
+func (f *Float64) CAS(old, new float64) (swapped bool) {
+	return f.CompareAndSwap(old, new)
+}
+
+// CompareAndSwap is an atomic compare-and-swap for float64 values.
 //
-//   for {
-//     old := atom.Load()
-//     new = f(old)
-//     if atom.CAS(old, new) {
-//       break
-//     }
-//   }
+// Note: CompareAndSwap handles NaN incorrectly. NaN != NaN using Go's inbuilt operators
+// but CompareAndSwap allows a stored NaN to compare equal to a passed in NaN.
+// This avoids typical CompareAndSwap loops from blocking forever, e.g.,
 //
-// If CAS did not match NaN to match, then the above would loop forever.
-func (f *Float64) CAS(old, new float64) (swapped bool) {
-	return f.v.CAS(math.Float64bits(old), math.Float64bits(new))
+//	for {
+//	  old := atom.Load()
+//	  new = f(old)
+//	  if atom.CompareAndSwap(old, new) {
+//	    break
+//	  }
+//	}
+//
+// If CompareAndSwap did not match NaN to match, then the above would loop forever.
+func (f *Float64) CompareAndSwap(old, new float64) (swapped bool) {
+	return f.v.CompareAndSwap(math.Float64bits(old), math.Float64bits(new))
 }
 
 // String encodes the wrapped value as a string.
diff --git a/vendor/go.uber.org/atomic/int32.go b/vendor/go.uber.org/atomic/int32.go
index 640ea36a175f8ee20272b58b60955bcc71eeaa97..5320eac10f1efef8deface7dd1b233c8fba0e65c 100644
--- a/vendor/go.uber.org/atomic/int32.go
+++ b/vendor/go.uber.org/atomic/int32.go
@@ -1,6 +1,6 @@
 // @generated Code generated by gen-atomicint.
 
-// Copyright (c) 2020-2021 Uber Technologies, Inc.
+// Copyright (c) 2020-2023 Uber Technologies, Inc.
 //
 // Permission is hereby granted, free of charge, to any person obtaining a copy
 // of this software and associated documentation files (the "Software"), to deal
@@ -66,7 +66,14 @@ func (i *Int32) Dec() int32 {
 }
 
 // CAS is an atomic compare-and-swap.
+//
+// Deprecated: Use CompareAndSwap.
 func (i *Int32) CAS(old, new int32) (swapped bool) {
+	return i.CompareAndSwap(old, new)
+}
+
+// CompareAndSwap is an atomic compare-and-swap.
+func (i *Int32) CompareAndSwap(old, new int32) (swapped bool) {
 	return atomic.CompareAndSwapInt32(&i.v, old, new)
 }
 
diff --git a/vendor/go.uber.org/atomic/int64.go b/vendor/go.uber.org/atomic/int64.go
index 9ab66b98091fb97cb20ef717e3ce3061ac776fe1..460821d009deee26fdaaebf5d56e5a459891a278 100644
--- a/vendor/go.uber.org/atomic/int64.go
+++ b/vendor/go.uber.org/atomic/int64.go
@@ -1,6 +1,6 @@
 // @generated Code generated by gen-atomicint.
 
-// Copyright (c) 2020-2021 Uber Technologies, Inc.
+// Copyright (c) 2020-2023 Uber Technologies, Inc.
 //
 // Permission is hereby granted, free of charge, to any person obtaining a copy
 // of this software and associated documentation files (the "Software"), to deal
@@ -66,7 +66,14 @@ func (i *Int64) Dec() int64 {
 }
 
 // CAS is an atomic compare-and-swap.
+//
+// Deprecated: Use CompareAndSwap.
 func (i *Int64) CAS(old, new int64) (swapped bool) {
+	return i.CompareAndSwap(old, new)
+}
+
+// CompareAndSwap is an atomic compare-and-swap.
+func (i *Int64) CompareAndSwap(old, new int64) (swapped bool) {
 	return atomic.CompareAndSwapInt64(&i.v, old, new)
 }
 
diff --git a/vendor/go.uber.org/atomic/nocmp.go b/vendor/go.uber.org/atomic/nocmp.go
index a8201cb4a18ef74d9993709fa2bef07008556812..54b74174abdf9d3fb44644b1a16ae2a0b4abdf6d 100644
--- a/vendor/go.uber.org/atomic/nocmp.go
+++ b/vendor/go.uber.org/atomic/nocmp.go
@@ -23,13 +23,13 @@ package atomic
 // nocmp is an uncomparable struct. Embed this inside another struct to make
 // it uncomparable.
 //
-//  type Foo struct {
-//    nocmp
-//    // ...
-//  }
+//	type Foo struct {
+//	  nocmp
+//	  // ...
+//	}
 //
 // This DOES NOT:
 //
-//  - Disallow shallow copies of structs
-//  - Disallow comparison of pointers to uncomparable structs
+//   - Disallow shallow copies of structs
+//   - Disallow comparison of pointers to uncomparable structs
 type nocmp [0]func()
diff --git a/vendor/go.uber.org/atomic/pointer_go118.go b/vendor/go.uber.org/atomic/pointer_go118.go
new file mode 100644
index 0000000000000000000000000000000000000000..1fb6c03b26127a55f787db3048d3900f47077bca
--- /dev/null
+++ b/vendor/go.uber.org/atomic/pointer_go118.go
@@ -0,0 +1,31 @@
+// Copyright (c) 2022 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+//go:build go1.18
+// +build go1.18
+
+package atomic
+
+import "fmt"
+
+// String returns a human readable representation of a Pointer's underlying value.
+func (p *Pointer[T]) String() string {
+	return fmt.Sprint(p.Load())
+}
diff --git a/vendor/go.uber.org/atomic/pointer_go118_pre119.go b/vendor/go.uber.org/atomic/pointer_go118_pre119.go
new file mode 100644
index 0000000000000000000000000000000000000000..e0f47dba46862f09bd79d3bd7b6b365aae11ad81
--- /dev/null
+++ b/vendor/go.uber.org/atomic/pointer_go118_pre119.go
@@ -0,0 +1,60 @@
+// Copyright (c) 2022 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+//go:build go1.18 && !go1.19
+// +build go1.18,!go1.19
+
+package atomic
+
+import "unsafe"
+
+type Pointer[T any] struct {
+	_ nocmp // disallow non-atomic comparison
+	p UnsafePointer
+}
+
+// NewPointer creates a new Pointer.
+func NewPointer[T any](v *T) *Pointer[T] {
+	var p Pointer[T]
+	if v != nil {
+		p.p.Store(unsafe.Pointer(v))
+	}
+	return &p
+}
+
+// Load atomically loads the wrapped value.
+func (p *Pointer[T]) Load() *T {
+	return (*T)(p.p.Load())
+}
+
+// Store atomically stores the passed value.
+func (p *Pointer[T]) Store(val *T) {
+	p.p.Store(unsafe.Pointer(val))
+}
+
+// Swap atomically swaps the wrapped pointer and returns the old value.
+func (p *Pointer[T]) Swap(val *T) (old *T) {
+	return (*T)(p.p.Swap(unsafe.Pointer(val)))
+}
+
+// CompareAndSwap is an atomic compare-and-swap.
+func (p *Pointer[T]) CompareAndSwap(old, new *T) (swapped bool) {
+	return p.p.CompareAndSwap(unsafe.Pointer(old), unsafe.Pointer(new))
+}
diff --git a/vendor/go.uber.org/atomic/pointer_go119.go b/vendor/go.uber.org/atomic/pointer_go119.go
new file mode 100644
index 0000000000000000000000000000000000000000..6726f17ad64f8cf22e9591f2dbfbf3b1773d720b
--- /dev/null
+++ b/vendor/go.uber.org/atomic/pointer_go119.go
@@ -0,0 +1,61 @@
+// Copyright (c) 2022 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+//go:build go1.19
+// +build go1.19
+
+package atomic
+
+import "sync/atomic"
+
+// Pointer is an atomic pointer of type *T.
+type Pointer[T any] struct {
+	_ nocmp // disallow non-atomic comparison
+	p atomic.Pointer[T]
+}
+
+// NewPointer creates a new Pointer.
+func NewPointer[T any](v *T) *Pointer[T] {
+	var p Pointer[T]
+	if v != nil {
+		p.p.Store(v)
+	}
+	return &p
+}
+
+// Load atomically loads the wrapped value.
+func (p *Pointer[T]) Load() *T {
+	return p.p.Load()
+}
+
+// Store atomically stores the passed value.
+func (p *Pointer[T]) Store(val *T) {
+	p.p.Store(val)
+}
+
+// Swap atomically swaps the wrapped pointer and returns the old value.
+func (p *Pointer[T]) Swap(val *T) (old *T) {
+	return p.p.Swap(val)
+}
+
+// CompareAndSwap is an atomic compare-and-swap.
+func (p *Pointer[T]) CompareAndSwap(old, new *T) (swapped bool) {
+	return p.p.CompareAndSwap(old, new)
+}
diff --git a/vendor/go.uber.org/atomic/string.go b/vendor/go.uber.org/atomic/string.go
index 80df93d0949d59b0465938577ac1c613985d61c5..061466c5bde794b0511d46aff50e77539b994d3d 100644
--- a/vendor/go.uber.org/atomic/string.go
+++ b/vendor/go.uber.org/atomic/string.go
@@ -1,6 +1,6 @@
 // @generated Code generated by gen-atomicwrapper.
 
-// Copyright (c) 2020-2021 Uber Technologies, Inc.
+// Copyright (c) 2020-2023 Uber Technologies, Inc.
 //
 // Permission is hereby granted, free of charge, to any person obtaining a copy
 // of this software and associated documentation files (the "Software"), to deal
@@ -42,13 +42,31 @@ func NewString(val string) *String {
 
 // Load atomically loads the wrapped string.
 func (x *String) Load() string {
-	if v := x.v.Load(); v != nil {
-		return v.(string)
-	}
-	return _zeroString
+	return unpackString(x.v.Load())
 }
 
 // Store atomically stores the passed string.
 func (x *String) Store(val string) {
-	x.v.Store(val)
+	x.v.Store(packString(val))
+}
+
+// CompareAndSwap is an atomic compare-and-swap for string values.
+func (x *String) CompareAndSwap(old, new string) (swapped bool) {
+	if x.v.CompareAndSwap(packString(old), packString(new)) {
+		return true
+	}
+
+	if old == _zeroString {
+		// If the old value is the empty value, then it's possible the
+		// underlying Value hasn't been set and is nil, so retry with nil.
+		return x.v.CompareAndSwap(nil, packString(new))
+	}
+
+	return false
+}
+
+// Swap atomically stores the given string and returns the old
+// value.
+func (x *String) Swap(val string) (old string) {
+	return unpackString(x.v.Swap(packString(val)))
 }
diff --git a/vendor/go.uber.org/atomic/string_ext.go b/vendor/go.uber.org/atomic/string_ext.go
index 83d92edafc71498ce95b8a3851abd2065298806f..019109c86ba932e6952e6de0d0e7abd38be01d4d 100644
--- a/vendor/go.uber.org/atomic/string_ext.go
+++ b/vendor/go.uber.org/atomic/string_ext.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2020 Uber Technologies, Inc.
+// Copyright (c) 2020-2023 Uber Technologies, Inc.
 //
 // Permission is hereby granted, free of charge, to any person obtaining a copy
 // of this software and associated documentation files (the "Software"), to deal
@@ -20,9 +20,18 @@
 
 package atomic
 
-//go:generate bin/gen-atomicwrapper -name=String -type=string -wrapped=Value -file=string.go
-// Note: No Swap as String wraps Value, which wraps the stdlib sync/atomic.Value which
-// only supports Swap as of go1.17: https://github.com/golang/go/issues/39351
+//go:generate bin/gen-atomicwrapper -name=String -type=string -wrapped Value -pack packString -unpack unpackString -compareandswap -swap -file=string.go
+
+func packString(s string) interface{} {
+	return s
+}
+
+func unpackString(v interface{}) string {
+	if s, ok := v.(string); ok {
+		return s
+	}
+	return ""
+}
 
 // String returns the wrapped value.
 func (s *String) String() string {
diff --git a/vendor/go.uber.org/atomic/time.go b/vendor/go.uber.org/atomic/time.go
index 33460fc37eaee9f68127d81c454ed1facd5e7d7c..cc2a230c001878f23564743130491c61d1d436b8 100644
--- a/vendor/go.uber.org/atomic/time.go
+++ b/vendor/go.uber.org/atomic/time.go
@@ -1,6 +1,6 @@
 // @generated Code generated by gen-atomicwrapper.
 
-// Copyright (c) 2020-2021 Uber Technologies, Inc.
+// Copyright (c) 2020-2023 Uber Technologies, Inc.
 //
 // Permission is hereby granted, free of charge, to any person obtaining a copy
 // of this software and associated documentation files (the "Software"), to deal
diff --git a/vendor/go.uber.org/atomic/uint32.go b/vendor/go.uber.org/atomic/uint32.go
index 7859a9cc3b5b85f7aeb0fc8fe0ce9afbc35bc089..4adc294ac2a02fa4208ef5f9fe1e567b48aa0e46 100644
--- a/vendor/go.uber.org/atomic/uint32.go
+++ b/vendor/go.uber.org/atomic/uint32.go
@@ -1,6 +1,6 @@
 // @generated Code generated by gen-atomicint.
 
-// Copyright (c) 2020-2021 Uber Technologies, Inc.
+// Copyright (c) 2020-2023 Uber Technologies, Inc.
 //
 // Permission is hereby granted, free of charge, to any person obtaining a copy
 // of this software and associated documentation files (the "Software"), to deal
@@ -66,7 +66,14 @@ func (i *Uint32) Dec() uint32 {
 }
 
 // CAS is an atomic compare-and-swap.
+//
+// Deprecated: Use CompareAndSwap.
 func (i *Uint32) CAS(old, new uint32) (swapped bool) {
+	return i.CompareAndSwap(old, new)
+}
+
+// CompareAndSwap is an atomic compare-and-swap.
+func (i *Uint32) CompareAndSwap(old, new uint32) (swapped bool) {
 	return atomic.CompareAndSwapUint32(&i.v, old, new)
 }
 
diff --git a/vendor/go.uber.org/atomic/uint64.go b/vendor/go.uber.org/atomic/uint64.go
index 2f2a7db6380fa603bda67e869da106d4e3bb23c4..0e2eddb30389841df7cf381f4dacf2dedb0d979a 100644
--- a/vendor/go.uber.org/atomic/uint64.go
+++ b/vendor/go.uber.org/atomic/uint64.go
@@ -1,6 +1,6 @@
 // @generated Code generated by gen-atomicint.
 
-// Copyright (c) 2020-2021 Uber Technologies, Inc.
+// Copyright (c) 2020-2023 Uber Technologies, Inc.
 //
 // Permission is hereby granted, free of charge, to any person obtaining a copy
 // of this software and associated documentation files (the "Software"), to deal
@@ -66,7 +66,14 @@ func (i *Uint64) Dec() uint64 {
 }
 
 // CAS is an atomic compare-and-swap.
+//
+// Deprecated: Use CompareAndSwap.
 func (i *Uint64) CAS(old, new uint64) (swapped bool) {
+	return i.CompareAndSwap(old, new)
+}
+
+// CompareAndSwap is an atomic compare-and-swap.
+func (i *Uint64) CompareAndSwap(old, new uint64) (swapped bool) {
 	return atomic.CompareAndSwapUint64(&i.v, old, new)
 }
 
diff --git a/vendor/go.uber.org/atomic/uintptr.go b/vendor/go.uber.org/atomic/uintptr.go
index ecf7a77273a1f4b1d5fa60a3c09f15ba65800634..7d5b000d6102d5c239b08f2c10672490ddcb60df 100644
--- a/vendor/go.uber.org/atomic/uintptr.go
+++ b/vendor/go.uber.org/atomic/uintptr.go
@@ -1,6 +1,6 @@
 // @generated Code generated by gen-atomicint.
 
-// Copyright (c) 2020-2021 Uber Technologies, Inc.
+// Copyright (c) 2020-2023 Uber Technologies, Inc.
 //
 // Permission is hereby granted, free of charge, to any person obtaining a copy
 // of this software and associated documentation files (the "Software"), to deal
@@ -66,7 +66,14 @@ func (i *Uintptr) Dec() uintptr {
 }
 
 // CAS is an atomic compare-and-swap.
+//
+// Deprecated: Use CompareAndSwap.
 func (i *Uintptr) CAS(old, new uintptr) (swapped bool) {
+	return i.CompareAndSwap(old, new)
+}
+
+// CompareAndSwap is an atomic compare-and-swap.
+func (i *Uintptr) CompareAndSwap(old, new uintptr) (swapped bool) {
 	return atomic.CompareAndSwapUintptr(&i.v, old, new)
 }
 
diff --git a/vendor/go.uber.org/atomic/unsafe_pointer.go b/vendor/go.uber.org/atomic/unsafe_pointer.go
index 169f793dcf39933c5512ad87f70aab0d3174972f..34868baf6a85389c48bce81a5036be9ad6e60415 100644
--- a/vendor/go.uber.org/atomic/unsafe_pointer.go
+++ b/vendor/go.uber.org/atomic/unsafe_pointer.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2021 Uber Technologies, Inc.
+// Copyright (c) 2021-2022 Uber Technologies, Inc.
 //
 // Permission is hereby granted, free of charge, to any person obtaining a copy
 // of this software and associated documentation files (the "Software"), to deal
@@ -53,6 +53,13 @@ func (p *UnsafePointer) Swap(val unsafe.Pointer) (old unsafe.Pointer) {
 }
 
 // CAS is an atomic compare-and-swap.
+//
+// Deprecated: Use CompareAndSwap
 func (p *UnsafePointer) CAS(old, new unsafe.Pointer) (swapped bool) {
+	return p.CompareAndSwap(old, new)
+}
+
+// CompareAndSwap is an atomic compare-and-swap.
+func (p *UnsafePointer) CompareAndSwap(old, new unsafe.Pointer) (swapped bool) {
 	return atomic.CompareAndSwapPointer(&p.v, old, new)
 }
diff --git a/vendor/go.uber.org/atomic/value.go b/vendor/go.uber.org/atomic/value.go
index 671f3a382475b9e7981501a7c9a14ff4de8affb3..52caedb9a58fcc2a90b6661a3b38b745f561894e 100644
--- a/vendor/go.uber.org/atomic/value.go
+++ b/vendor/go.uber.org/atomic/value.go
@@ -25,7 +25,7 @@ import "sync/atomic"
 // Value shadows the type of the same name from sync/atomic
 // https://godoc.org/sync/atomic#Value
 type Value struct {
-	atomic.Value
-
 	_ nocmp // disallow non-atomic comparison
+
+	atomic.Value
 }
diff --git a/vendor/golang.org/x/crypto/cryptobyte/asn1.go b/vendor/golang.org/x/crypto/cryptobyte/asn1.go
index 2492f796af97f10799401c77fb403ae07c74eaad..d25979d9f5330e6fe197f2159ce8687db446a8be 100644
--- a/vendor/golang.org/x/crypto/cryptobyte/asn1.go
+++ b/vendor/golang.org/x/crypto/cryptobyte/asn1.go
@@ -234,7 +234,7 @@ func (b *Builder) AddASN1(tag asn1.Tag, f BuilderContinuation) {
 	// Identifiers with the low five bits set indicate high-tag-number format
 	// (two or more octets), which we don't support.
 	if tag&0x1f == 0x1f {
-		b.err = fmt.Errorf("cryptobyte: high-tag number identifier octects not supported: 0x%x", tag)
+		b.err = fmt.Errorf("cryptobyte: high-tag number identifier octets not supported: 0x%x", tag)
 		return
 	}
 	b.AddUint8(uint8(tag))
diff --git a/vendor/golang.org/x/net/html/atom/table.go b/vendor/golang.org/x/net/html/atom/table.go
index 2a938864cb9d3e88b3259c4856412517a39dce5f..b460e6f722b9b6f8699af31c30b92f73c829036e 100644
--- a/vendor/golang.org/x/net/html/atom/table.go
+++ b/vendor/golang.org/x/net/html/atom/table.go
@@ -11,23 +11,23 @@ const (
 	AcceptCharset             Atom = 0x1a0e
 	Accesskey                 Atom = 0x2c09
 	Acronym                   Atom = 0xaa07
-	Action                    Atom = 0x27206
-	Address                   Atom = 0x6f307
+	Action                    Atom = 0x26506
+	Address                   Atom = 0x6f107
 	Align                     Atom = 0xb105
-	Allowfullscreen           Atom = 0x2080f
+	Allowfullscreen           Atom = 0x3280f
 	Allowpaymentrequest       Atom = 0xc113
 	Allowusermedia            Atom = 0xdd0e
 	Alt                       Atom = 0xf303
 	Annotation                Atom = 0x1c90a
 	AnnotationXml             Atom = 0x1c90e
-	Applet                    Atom = 0x31906
-	Area                      Atom = 0x35604
-	Article                   Atom = 0x3fc07
+	Applet                    Atom = 0x30806
+	Area                      Atom = 0x35004
+	Article                   Atom = 0x3f607
 	As                        Atom = 0x3c02
 	Aside                     Atom = 0x10705
 	Async                     Atom = 0xff05
 	Audio                     Atom = 0x11505
-	Autocomplete              Atom = 0x2780c
+	Autocomplete              Atom = 0x26b0c
 	Autofocus                 Atom = 0x12109
 	Autoplay                  Atom = 0x13c08
 	B                         Atom = 0x101
@@ -43,34 +43,34 @@ const (
 	Br                        Atom = 0x202
 	Button                    Atom = 0x19106
 	Canvas                    Atom = 0x10306
-	Caption                   Atom = 0x23107
-	Center                    Atom = 0x22006
-	Challenge                 Atom = 0x29b09
+	Caption                   Atom = 0x22407
+	Center                    Atom = 0x21306
+	Challenge                 Atom = 0x28e09
 	Charset                   Atom = 0x2107
-	Checked                   Atom = 0x47907
+	Checked                   Atom = 0x5b507
 	Cite                      Atom = 0x19c04
-	Class                     Atom = 0x56405
-	Code                      Atom = 0x5c504
+	Class                     Atom = 0x55805
+	Code                      Atom = 0x5ee04
 	Col                       Atom = 0x1ab03
 	Colgroup                  Atom = 0x1ab08
 	Color                     Atom = 0x1bf05
 	Cols                      Atom = 0x1c404
 	Colspan                   Atom = 0x1c407
 	Command                   Atom = 0x1d707
-	Content                   Atom = 0x58b07
-	Contenteditable           Atom = 0x58b0f
-	Contextmenu               Atom = 0x3800b
+	Content                   Atom = 0x57b07
+	Contenteditable           Atom = 0x57b0f
+	Contextmenu               Atom = 0x37a0b
 	Controls                  Atom = 0x1de08
-	Coords                    Atom = 0x1ea06
-	Crossorigin               Atom = 0x1fb0b
-	Data                      Atom = 0x4a504
-	Datalist                  Atom = 0x4a508
-	Datetime                  Atom = 0x2b808
-	Dd                        Atom = 0x2d702
+	Coords                    Atom = 0x1f006
+	Crossorigin               Atom = 0x1fa0b
+	Data                      Atom = 0x49904
+	Datalist                  Atom = 0x49908
+	Datetime                  Atom = 0x2ab08
+	Dd                        Atom = 0x2bf02
 	Default                   Atom = 0x10a07
-	Defer                     Atom = 0x5c705
-	Del                       Atom = 0x45203
-	Desc                      Atom = 0x56104
+	Defer                     Atom = 0x5f005
+	Del                       Atom = 0x44c03
+	Desc                      Atom = 0x55504
 	Details                   Atom = 0x7207
 	Dfn                       Atom = 0x8703
 	Dialog                    Atom = 0xbb06
@@ -78,106 +78,106 @@ const (
 	Dirname                   Atom = 0x9307
 	Disabled                  Atom = 0x16408
 	Div                       Atom = 0x16b03
-	Dl                        Atom = 0x5e602
-	Download                  Atom = 0x46308
+	Dl                        Atom = 0x5d602
+	Download                  Atom = 0x45d08
 	Draggable                 Atom = 0x17a09
-	Dropzone                  Atom = 0x40508
-	Dt                        Atom = 0x64b02
+	Dropzone                  Atom = 0x3ff08
+	Dt                        Atom = 0x64002
 	Em                        Atom = 0x6e02
 	Embed                     Atom = 0x6e05
-	Enctype                   Atom = 0x28d07
-	Face                      Atom = 0x21e04
-	Fieldset                  Atom = 0x22608
-	Figcaption                Atom = 0x22e0a
-	Figure                    Atom = 0x24806
+	Enctype                   Atom = 0x28007
+	Face                      Atom = 0x21104
+	Fieldset                  Atom = 0x21908
+	Figcaption                Atom = 0x2210a
+	Figure                    Atom = 0x23b06
 	Font                      Atom = 0x3f04
 	Footer                    Atom = 0xf606
-	For                       Atom = 0x25403
-	ForeignObject             Atom = 0x2540d
-	Foreignobject             Atom = 0x2610d
-	Form                      Atom = 0x26e04
-	Formaction                Atom = 0x26e0a
-	Formenctype               Atom = 0x2890b
-	Formmethod                Atom = 0x2a40a
-	Formnovalidate            Atom = 0x2ae0e
-	Formtarget                Atom = 0x2c00a
+	For                       Atom = 0x24703
+	ForeignObject             Atom = 0x2470d
+	Foreignobject             Atom = 0x2540d
+	Form                      Atom = 0x26104
+	Formaction                Atom = 0x2610a
+	Formenctype               Atom = 0x27c0b
+	Formmethod                Atom = 0x2970a
+	Formnovalidate            Atom = 0x2a10e
+	Formtarget                Atom = 0x2b30a
 	Frame                     Atom = 0x8b05
 	Frameset                  Atom = 0x8b08
 	H1                        Atom = 0x15c02
-	H2                        Atom = 0x2de02
-	H3                        Atom = 0x30d02
-	H4                        Atom = 0x34502
-	H5                        Atom = 0x34f02
-	H6                        Atom = 0x64d02
-	Head                      Atom = 0x33104
-	Header                    Atom = 0x33106
-	Headers                   Atom = 0x33107
+	H2                        Atom = 0x56102
+	H3                        Atom = 0x2cd02
+	H4                        Atom = 0x2fc02
+	H5                        Atom = 0x33f02
+	H6                        Atom = 0x34902
+	Head                      Atom = 0x32004
+	Header                    Atom = 0x32006
+	Headers                   Atom = 0x32007
 	Height                    Atom = 0x5206
-	Hgroup                    Atom = 0x2ca06
-	Hidden                    Atom = 0x2d506
-	High                      Atom = 0x2db04
+	Hgroup                    Atom = 0x64206
+	Hidden                    Atom = 0x2bd06
+	High                      Atom = 0x2ca04
 	Hr                        Atom = 0x15702
-	Href                      Atom = 0x2e004
-	Hreflang                  Atom = 0x2e008
+	Href                      Atom = 0x2cf04
+	Hreflang                  Atom = 0x2cf08
 	Html                      Atom = 0x5604
-	HttpEquiv                 Atom = 0x2e80a
+	HttpEquiv                 Atom = 0x2d70a
 	I                         Atom = 0x601
-	Icon                      Atom = 0x58a04
+	Icon                      Atom = 0x57a04
 	Id                        Atom = 0x10902
-	Iframe                    Atom = 0x2fc06
-	Image                     Atom = 0x30205
-	Img                       Atom = 0x30703
-	Input                     Atom = 0x44b05
-	Inputmode                 Atom = 0x44b09
-	Ins                       Atom = 0x20403
-	Integrity                 Atom = 0x23f09
+	Iframe                    Atom = 0x2eb06
+	Image                     Atom = 0x2f105
+	Img                       Atom = 0x2f603
+	Input                     Atom = 0x44505
+	Inputmode                 Atom = 0x44509
+	Ins                       Atom = 0x20303
+	Integrity                 Atom = 0x23209
 	Is                        Atom = 0x16502
-	Isindex                   Atom = 0x30f07
-	Ismap                     Atom = 0x31605
-	Itemid                    Atom = 0x38b06
+	Isindex                   Atom = 0x2fe07
+	Ismap                     Atom = 0x30505
+	Itemid                    Atom = 0x38506
 	Itemprop                  Atom = 0x19d08
-	Itemref                   Atom = 0x3cd07
-	Itemscope                 Atom = 0x67109
-	Itemtype                  Atom = 0x31f08
+	Itemref                   Atom = 0x3c707
+	Itemscope                 Atom = 0x66f09
+	Itemtype                  Atom = 0x30e08
 	Kbd                       Atom = 0xb903
 	Keygen                    Atom = 0x3206
 	Keytype                   Atom = 0xd607
 	Kind                      Atom = 0x17704
 	Label                     Atom = 0x5905
-	Lang                      Atom = 0x2e404
+	Lang                      Atom = 0x2d304
 	Legend                    Atom = 0x18106
 	Li                        Atom = 0xb202
 	Link                      Atom = 0x17404
-	List                      Atom = 0x4a904
-	Listing                   Atom = 0x4a907
+	List                      Atom = 0x49d04
+	Listing                   Atom = 0x49d07
 	Loop                      Atom = 0x5d04
 	Low                       Atom = 0xc303
 	Main                      Atom = 0x1004
 	Malignmark                Atom = 0xb00a
-	Manifest                  Atom = 0x6d708
-	Map                       Atom = 0x31803
+	Manifest                  Atom = 0x6d508
+	Map                       Atom = 0x30703
 	Mark                      Atom = 0xb604
-	Marquee                   Atom = 0x32707
-	Math                      Atom = 0x32e04
-	Max                       Atom = 0x33d03
-	Maxlength                 Atom = 0x33d09
+	Marquee                   Atom = 0x31607
+	Math                      Atom = 0x31d04
+	Max                       Atom = 0x33703
+	Maxlength                 Atom = 0x33709
 	Media                     Atom = 0xe605
 	Mediagroup                Atom = 0xe60a
-	Menu                      Atom = 0x38704
-	Menuitem                  Atom = 0x38708
-	Meta                      Atom = 0x4b804
+	Menu                      Atom = 0x38104
+	Menuitem                  Atom = 0x38108
+	Meta                      Atom = 0x4ac04
 	Meter                     Atom = 0x9805
-	Method                    Atom = 0x2a806
-	Mglyph                    Atom = 0x30806
-	Mi                        Atom = 0x34702
-	Min                       Atom = 0x34703
-	Minlength                 Atom = 0x34709
-	Mn                        Atom = 0x2b102
+	Method                    Atom = 0x29b06
+	Mglyph                    Atom = 0x2f706
+	Mi                        Atom = 0x34102
+	Min                       Atom = 0x34103
+	Minlength                 Atom = 0x34109
+	Mn                        Atom = 0x2a402
 	Mo                        Atom = 0xa402
-	Ms                        Atom = 0x67402
-	Mtext                     Atom = 0x35105
-	Multiple                  Atom = 0x35f08
-	Muted                     Atom = 0x36705
+	Ms                        Atom = 0x67202
+	Mtext                     Atom = 0x34b05
+	Multiple                  Atom = 0x35908
+	Muted                     Atom = 0x36105
 	Name                      Atom = 0x9604
 	Nav                       Atom = 0x1303
 	Nobr                      Atom = 0x3704
@@ -185,101 +185,101 @@ const (
 	Noframes                  Atom = 0x8908
 	Nomodule                  Atom = 0xa208
 	Nonce                     Atom = 0x1a605
-	Noscript                  Atom = 0x21608
-	Novalidate                Atom = 0x2b20a
-	Object                    Atom = 0x26806
+	Noscript                  Atom = 0x2c208
+	Novalidate                Atom = 0x2a50a
+	Object                    Atom = 0x25b06
 	Ol                        Atom = 0x13702
 	Onabort                   Atom = 0x19507
-	Onafterprint              Atom = 0x2360c
-	Onautocomplete            Atom = 0x2760e
-	Onautocompleteerror       Atom = 0x27613
-	Onauxclick                Atom = 0x61f0a
-	Onbeforeprint             Atom = 0x69e0d
-	Onbeforeunload            Atom = 0x6e70e
-	Onblur                    Atom = 0x56d06
+	Onafterprint              Atom = 0x2290c
+	Onautocomplete            Atom = 0x2690e
+	Onautocompleteerror       Atom = 0x26913
+	Onauxclick                Atom = 0x6140a
+	Onbeforeprint             Atom = 0x69c0d
+	Onbeforeunload            Atom = 0x6e50e
+	Onblur                    Atom = 0x1ea06
 	Oncancel                  Atom = 0x11908
 	Oncanplay                 Atom = 0x14d09
 	Oncanplaythrough          Atom = 0x14d10
-	Onchange                  Atom = 0x41b08
-	Onclick                   Atom = 0x2f507
-	Onclose                   Atom = 0x36c07
-	Oncontextmenu             Atom = 0x37e0d
-	Oncopy                    Atom = 0x39106
-	Oncuechange               Atom = 0x3970b
-	Oncut                     Atom = 0x3a205
-	Ondblclick                Atom = 0x3a70a
-	Ondrag                    Atom = 0x3b106
-	Ondragend                 Atom = 0x3b109
-	Ondragenter               Atom = 0x3ba0b
-	Ondragexit                Atom = 0x3c50a
-	Ondragleave               Atom = 0x3df0b
-	Ondragover                Atom = 0x3ea0a
-	Ondragstart               Atom = 0x3f40b
-	Ondrop                    Atom = 0x40306
-	Ondurationchange          Atom = 0x41310
-	Onemptied                 Atom = 0x40a09
-	Onended                   Atom = 0x42307
-	Onerror                   Atom = 0x42a07
-	Onfocus                   Atom = 0x43107
-	Onhashchange              Atom = 0x43d0c
-	Oninput                   Atom = 0x44907
-	Oninvalid                 Atom = 0x45509
-	Onkeydown                 Atom = 0x45e09
-	Onkeypress                Atom = 0x46b0a
-	Onkeyup                   Atom = 0x48007
-	Onlanguagechange          Atom = 0x48d10
-	Onload                    Atom = 0x49d06
-	Onloadeddata              Atom = 0x49d0c
-	Onloadedmetadata          Atom = 0x4b010
-	Onloadend                 Atom = 0x4c609
-	Onloadstart               Atom = 0x4cf0b
-	Onmessage                 Atom = 0x4da09
-	Onmessageerror            Atom = 0x4da0e
-	Onmousedown               Atom = 0x4e80b
-	Onmouseenter              Atom = 0x4f30c
-	Onmouseleave              Atom = 0x4ff0c
-	Onmousemove               Atom = 0x50b0b
-	Onmouseout                Atom = 0x5160a
-	Onmouseover               Atom = 0x5230b
-	Onmouseup                 Atom = 0x52e09
-	Onmousewheel              Atom = 0x53c0c
-	Onoffline                 Atom = 0x54809
-	Ononline                  Atom = 0x55108
-	Onpagehide                Atom = 0x5590a
-	Onpageshow                Atom = 0x5730a
-	Onpaste                   Atom = 0x57f07
-	Onpause                   Atom = 0x59a07
-	Onplay                    Atom = 0x5a406
-	Onplaying                 Atom = 0x5a409
-	Onpopstate                Atom = 0x5ad0a
-	Onprogress                Atom = 0x5b70a
-	Onratechange              Atom = 0x5cc0c
-	Onrejectionhandled        Atom = 0x5d812
-	Onreset                   Atom = 0x5ea07
-	Onresize                  Atom = 0x5f108
-	Onscroll                  Atom = 0x60008
-	Onsecuritypolicyviolation Atom = 0x60819
-	Onseeked                  Atom = 0x62908
-	Onseeking                 Atom = 0x63109
-	Onselect                  Atom = 0x63a08
-	Onshow                    Atom = 0x64406
-	Onsort                    Atom = 0x64f06
-	Onstalled                 Atom = 0x65909
-	Onstorage                 Atom = 0x66209
-	Onsubmit                  Atom = 0x66b08
-	Onsuspend                 Atom = 0x67b09
+	Onchange                  Atom = 0x41508
+	Onclick                   Atom = 0x2e407
+	Onclose                   Atom = 0x36607
+	Oncontextmenu             Atom = 0x3780d
+	Oncopy                    Atom = 0x38b06
+	Oncuechange               Atom = 0x3910b
+	Oncut                     Atom = 0x39c05
+	Ondblclick                Atom = 0x3a10a
+	Ondrag                    Atom = 0x3ab06
+	Ondragend                 Atom = 0x3ab09
+	Ondragenter               Atom = 0x3b40b
+	Ondragexit                Atom = 0x3bf0a
+	Ondragleave               Atom = 0x3d90b
+	Ondragover                Atom = 0x3e40a
+	Ondragstart               Atom = 0x3ee0b
+	Ondrop                    Atom = 0x3fd06
+	Ondurationchange          Atom = 0x40d10
+	Onemptied                 Atom = 0x40409
+	Onended                   Atom = 0x41d07
+	Onerror                   Atom = 0x42407
+	Onfocus                   Atom = 0x42b07
+	Onhashchange              Atom = 0x4370c
+	Oninput                   Atom = 0x44307
+	Oninvalid                 Atom = 0x44f09
+	Onkeydown                 Atom = 0x45809
+	Onkeypress                Atom = 0x4650a
+	Onkeyup                   Atom = 0x47407
+	Onlanguagechange          Atom = 0x48110
+	Onload                    Atom = 0x49106
+	Onloadeddata              Atom = 0x4910c
+	Onloadedmetadata          Atom = 0x4a410
+	Onloadend                 Atom = 0x4ba09
+	Onloadstart               Atom = 0x4c30b
+	Onmessage                 Atom = 0x4ce09
+	Onmessageerror            Atom = 0x4ce0e
+	Onmousedown               Atom = 0x4dc0b
+	Onmouseenter              Atom = 0x4e70c
+	Onmouseleave              Atom = 0x4f30c
+	Onmousemove               Atom = 0x4ff0b
+	Onmouseout                Atom = 0x50a0a
+	Onmouseover               Atom = 0x5170b
+	Onmouseup                 Atom = 0x52209
+	Onmousewheel              Atom = 0x5300c
+	Onoffline                 Atom = 0x53c09
+	Ononline                  Atom = 0x54508
+	Onpagehide                Atom = 0x54d0a
+	Onpageshow                Atom = 0x5630a
+	Onpaste                   Atom = 0x56f07
+	Onpause                   Atom = 0x58a07
+	Onplay                    Atom = 0x59406
+	Onplaying                 Atom = 0x59409
+	Onpopstate                Atom = 0x59d0a
+	Onprogress                Atom = 0x5a70a
+	Onratechange              Atom = 0x5bc0c
+	Onrejectionhandled        Atom = 0x5c812
+	Onreset                   Atom = 0x5da07
+	Onresize                  Atom = 0x5e108
+	Onscroll                  Atom = 0x5f508
+	Onsecuritypolicyviolation Atom = 0x5fd19
+	Onseeked                  Atom = 0x61e08
+	Onseeking                 Atom = 0x62609
+	Onselect                  Atom = 0x62f08
+	Onshow                    Atom = 0x63906
+	Onsort                    Atom = 0x64d06
+	Onstalled                 Atom = 0x65709
+	Onstorage                 Atom = 0x66009
+	Onsubmit                  Atom = 0x66908
+	Onsuspend                 Atom = 0x67909
 	Ontimeupdate              Atom = 0x400c
-	Ontoggle                  Atom = 0x68408
-	Onunhandledrejection      Atom = 0x68c14
-	Onunload                  Atom = 0x6ab08
-	Onvolumechange            Atom = 0x6b30e
-	Onwaiting                 Atom = 0x6c109
-	Onwheel                   Atom = 0x6ca07
+	Ontoggle                  Atom = 0x68208
+	Onunhandledrejection      Atom = 0x68a14
+	Onunload                  Atom = 0x6a908
+	Onvolumechange            Atom = 0x6b10e
+	Onwaiting                 Atom = 0x6bf09
+	Onwheel                   Atom = 0x6c807
 	Open                      Atom = 0x1a304
 	Optgroup                  Atom = 0x5f08
-	Optimum                   Atom = 0x6d107
-	Option                    Atom = 0x6e306
-	Output                    Atom = 0x51d06
+	Optimum                   Atom = 0x6cf07
+	Option                    Atom = 0x6e106
+	Output                    Atom = 0x51106
 	P                         Atom = 0xc01
 	Param                     Atom = 0xc05
 	Pattern                   Atom = 0x6607
@@ -288,466 +288,468 @@ const (
 	Placeholder               Atom = 0x1310b
 	Plaintext                 Atom = 0x1b209
 	Playsinline               Atom = 0x1400b
-	Poster                    Atom = 0x2cf06
-	Pre                       Atom = 0x47003
-	Preload                   Atom = 0x48607
-	Progress                  Atom = 0x5b908
-	Prompt                    Atom = 0x53606
-	Public                    Atom = 0x58606
+	Poster                    Atom = 0x64706
+	Pre                       Atom = 0x46a03
+	Preload                   Atom = 0x47a07
+	Progress                  Atom = 0x5a908
+	Prompt                    Atom = 0x52a06
+	Public                    Atom = 0x57606
 	Q                         Atom = 0xcf01
 	Radiogroup                Atom = 0x30a
 	Rb                        Atom = 0x3a02
-	Readonly                  Atom = 0x35708
-	Referrerpolicy            Atom = 0x3d10e
-	Rel                       Atom = 0x48703
-	Required                  Atom = 0x24c08
+	Readonly                  Atom = 0x35108
+	Referrerpolicy            Atom = 0x3cb0e
+	Rel                       Atom = 0x47b03
+	Required                  Atom = 0x23f08
 	Reversed                  Atom = 0x8008
 	Rows                      Atom = 0x9c04
 	Rowspan                   Atom = 0x9c07
-	Rp                        Atom = 0x23c02
+	Rp                        Atom = 0x22f02
 	Rt                        Atom = 0x19a02
 	Rtc                       Atom = 0x19a03
 	Ruby                      Atom = 0xfb04
 	S                         Atom = 0x2501
 	Samp                      Atom = 0x7804
 	Sandbox                   Atom = 0x12907
-	Scope                     Atom = 0x67505
-	Scoped                    Atom = 0x67506
-	Script                    Atom = 0x21806
-	Seamless                  Atom = 0x37108
-	Section                   Atom = 0x56807
-	Select                    Atom = 0x63c06
-	Selected                  Atom = 0x63c08
-	Shape                     Atom = 0x1e505
-	Size                      Atom = 0x5f504
-	Sizes                     Atom = 0x5f505
-	Slot                      Atom = 0x1ef04
-	Small                     Atom = 0x20605
-	Sortable                  Atom = 0x65108
-	Sorted                    Atom = 0x33706
-	Source                    Atom = 0x37806
-	Spacer                    Atom = 0x43706
+	Scope                     Atom = 0x67305
+	Scoped                    Atom = 0x67306
+	Script                    Atom = 0x2c406
+	Seamless                  Atom = 0x36b08
+	Search                    Atom = 0x55c06
+	Section                   Atom = 0x1e507
+	Select                    Atom = 0x63106
+	Selected                  Atom = 0x63108
+	Shape                     Atom = 0x1f505
+	Size                      Atom = 0x5e504
+	Sizes                     Atom = 0x5e505
+	Slot                      Atom = 0x20504
+	Small                     Atom = 0x32605
+	Sortable                  Atom = 0x64f08
+	Sorted                    Atom = 0x37206
+	Source                    Atom = 0x43106
+	Spacer                    Atom = 0x46e06
 	Span                      Atom = 0x9f04
-	Spellcheck                Atom = 0x4740a
-	Src                       Atom = 0x5c003
-	Srcdoc                    Atom = 0x5c006
-	Srclang                   Atom = 0x5f907
-	Srcset                    Atom = 0x6f906
-	Start                     Atom = 0x3fa05
-	Step                      Atom = 0x58304
+	Spellcheck                Atom = 0x5b00a
+	Src                       Atom = 0x5e903
+	Srcdoc                    Atom = 0x5e906
+	Srclang                   Atom = 0x6f707
+	Srcset                    Atom = 0x6fe06
+	Start                     Atom = 0x3f405
+	Step                      Atom = 0x57304
 	Strike                    Atom = 0xd206
-	Strong                    Atom = 0x6dd06
-	Style                     Atom = 0x6ff05
-	Sub                       Atom = 0x66d03
-	Summary                   Atom = 0x70407
-	Sup                       Atom = 0x70b03
-	Svg                       Atom = 0x70e03
-	System                    Atom = 0x71106
-	Tabindex                  Atom = 0x4be08
-	Table                     Atom = 0x59505
-	Target                    Atom = 0x2c406
+	Strong                    Atom = 0x6db06
+	Style                     Atom = 0x70405
+	Sub                       Atom = 0x66b03
+	Summary                   Atom = 0x70907
+	Sup                       Atom = 0x71003
+	Svg                       Atom = 0x71303
+	System                    Atom = 0x71606
+	Tabindex                  Atom = 0x4b208
+	Table                     Atom = 0x58505
+	Target                    Atom = 0x2b706
 	Tbody                     Atom = 0x2705
 	Td                        Atom = 0x9202
-	Template                  Atom = 0x71408
-	Textarea                  Atom = 0x35208
+	Template                  Atom = 0x71908
+	Textarea                  Atom = 0x34c08
 	Tfoot                     Atom = 0xf505
 	Th                        Atom = 0x15602
-	Thead                     Atom = 0x33005
+	Thead                     Atom = 0x31f05
 	Time                      Atom = 0x4204
 	Title                     Atom = 0x11005
 	Tr                        Atom = 0xcc02
 	Track                     Atom = 0x1ba05
-	Translate                 Atom = 0x1f209
+	Translate                 Atom = 0x20809
 	Tt                        Atom = 0x6802
 	Type                      Atom = 0xd904
-	Typemustmatch             Atom = 0x2900d
+	Typemustmatch             Atom = 0x2830d
 	U                         Atom = 0xb01
 	Ul                        Atom = 0xa702
 	Updateviacache            Atom = 0x460e
-	Usemap                    Atom = 0x59e06
+	Usemap                    Atom = 0x58e06
 	Value                     Atom = 0x1505
 	Var                       Atom = 0x16d03
-	Video                     Atom = 0x2f105
-	Wbr                       Atom = 0x57c03
-	Width                     Atom = 0x64905
-	Workertype                Atom = 0x71c0a
-	Wrap                      Atom = 0x72604
+	Video                     Atom = 0x2e005
+	Wbr                       Atom = 0x56c03
+	Width                     Atom = 0x63e05
+	Workertype                Atom = 0x7210a
+	Wrap                      Atom = 0x72b04
 	Xmp                       Atom = 0x12f03
 )
 
-const hash0 = 0x81cdf10e
+const hash0 = 0x84f70e16
 
 const maxAtomLen = 25
 
 var table = [1 << 9]Atom{
-	0x1:   0xe60a,  // mediagroup
-	0x2:   0x2e404, // lang
-	0x4:   0x2c09,  // accesskey
-	0x5:   0x8b08,  // frameset
-	0x7:   0x63a08, // onselect
-	0x8:   0x71106, // system
-	0xa:   0x64905, // width
-	0xc:   0x2890b, // formenctype
-	0xd:   0x13702, // ol
-	0xe:   0x3970b, // oncuechange
-	0x10:  0x14b03, // bdo
-	0x11:  0x11505, // audio
-	0x12:  0x17a09, // draggable
-	0x14:  0x2f105, // video
-	0x15:  0x2b102, // mn
-	0x16:  0x38704, // menu
-	0x17:  0x2cf06, // poster
-	0x19:  0xf606,  // footer
-	0x1a:  0x2a806, // method
-	0x1b:  0x2b808, // datetime
-	0x1c:  0x19507, // onabort
-	0x1d:  0x460e,  // updateviacache
-	0x1e:  0xff05,  // async
-	0x1f:  0x49d06, // onload
-	0x21:  0x11908, // oncancel
-	0x22:  0x62908, // onseeked
-	0x23:  0x30205, // image
-	0x24:  0x5d812, // onrejectionhandled
-	0x26:  0x17404, // link
-	0x27:  0x51d06, // output
-	0x28:  0x33104, // head
-	0x29:  0x4ff0c, // onmouseleave
-	0x2a:  0x57f07, // onpaste
-	0x2b:  0x5a409, // onplaying
-	0x2c:  0x1c407, // colspan
-	0x2f:  0x1bf05, // color
-	0x30:  0x5f504, // size
-	0x31:  0x2e80a, // http-equiv
-	0x33:  0x601,   // i
-	0x34:  0x5590a, // onpagehide
-	0x35:  0x68c14, // onunhandledrejection
-	0x37:  0x42a07, // onerror
-	0x3a:  0x3b08,  // basefont
-	0x3f:  0x1303,  // nav
-	0x40:  0x17704, // kind
-	0x41:  0x35708, // readonly
-	0x42:  0x30806, // mglyph
-	0x44:  0xb202,  // li
-	0x46:  0x2d506, // hidden
-	0x47:  0x70e03, // svg
-	0x48:  0x58304, // step
-	0x49:  0x23f09, // integrity
-	0x4a:  0x58606, // public
-	0x4c:  0x1ab03, // col
-	0x4d:  0x1870a, // blockquote
-	0x4e:  0x34f02, // h5
-	0x50:  0x5b908, // progress
-	0x51:  0x5f505, // sizes
-	0x52:  0x34502, // h4
-	0x56:  0x33005, // thead
-	0x57:  0xd607,  // keytype
-	0x58:  0x5b70a, // onprogress
-	0x59:  0x44b09, // inputmode
-	0x5a:  0x3b109, // ondragend
-	0x5d:  0x3a205, // oncut
-	0x5e:  0x43706, // spacer
-	0x5f:  0x1ab08, // colgroup
-	0x62:  0x16502, // is
-	0x65:  0x3c02,  // as
-	0x66:  0x54809, // onoffline
-	0x67:  0x33706, // sorted
-	0x69:  0x48d10, // onlanguagechange
-	0x6c:  0x43d0c, // onhashchange
-	0x6d:  0x9604,  // name
-	0x6e:  0xf505,  // tfoot
-	0x6f:  0x56104, // desc
-	0x70:  0x33d03, // max
-	0x72:  0x1ea06, // coords
-	0x73:  0x30d02, // h3
-	0x74:  0x6e70e, // onbeforeunload
-	0x75:  0x9c04,  // rows
-	0x76:  0x63c06, // select
-	0x77:  0x9805,  // meter
-	0x78:  0x38b06, // itemid
-	0x79:  0x53c0c, // onmousewheel
-	0x7a:  0x5c006, // srcdoc
-	0x7d:  0x1ba05, // track
-	0x7f:  0x31f08, // itemtype
-	0x82:  0xa402,  // mo
-	0x83:  0x41b08, // onchange
-	0x84:  0x33107, // headers
-	0x85:  0x5cc0c, // onratechange
-	0x86:  0x60819, // onsecuritypolicyviolation
-	0x88:  0x4a508, // datalist
-	0x89:  0x4e80b, // onmousedown
-	0x8a:  0x1ef04, // slot
-	0x8b:  0x4b010, // onloadedmetadata
-	0x8c:  0x1a06,  // accept
-	0x8d:  0x26806, // object
-	0x91:  0x6b30e, // onvolumechange
-	0x92:  0x2107,  // charset
-	0x93:  0x27613, // onautocompleteerror
-	0x94:  0xc113,  // allowpaymentrequest
-	0x95:  0x2804,  // body
-	0x96:  0x10a07, // default
-	0x97:  0x63c08, // selected
-	0x98:  0x21e04, // face
-	0x99:  0x1e505, // shape
-	0x9b:  0x68408, // ontoggle
-	0x9e:  0x64b02, // dt
-	0x9f:  0xb604,  // mark
-	0xa1:  0xb01,   // u
-	0xa4:  0x6ab08, // onunload
-	0xa5:  0x5d04,  // loop
-	0xa6:  0x16408, // disabled
-	0xaa:  0x42307, // onended
-	0xab:  0xb00a,  // malignmark
-	0xad:  0x67b09, // onsuspend
-	0xae:  0x35105, // mtext
-	0xaf:  0x64f06, // onsort
-	0xb0:  0x19d08, // itemprop
-	0xb3:  0x67109, // itemscope
-	0xb4:  0x17305, // blink
-	0xb6:  0x3b106, // ondrag
-	0xb7:  0xa702,  // ul
-	0xb8:  0x26e04, // form
-	0xb9:  0x12907, // sandbox
-	0xba:  0x8b05,  // frame
-	0xbb:  0x1505,  // value
-	0xbc:  0x66209, // onstorage
-	0xbf:  0xaa07,  // acronym
-	0xc0:  0x19a02, // rt
-	0xc2:  0x202,   // br
-	0xc3:  0x22608, // fieldset
-	0xc4:  0x2900d, // typemustmatch
-	0xc5:  0xa208,  // nomodule
-	0xc6:  0x6c07,  // noembed
-	0xc7:  0x69e0d, // onbeforeprint
-	0xc8:  0x19106, // button
-	0xc9:  0x2f507, // onclick
-	0xca:  0x70407, // summary
-	0xcd:  0xfb04,  // ruby
-	0xce:  0x56405, // class
-	0xcf:  0x3f40b, // ondragstart
-	0xd0:  0x23107, // caption
-	0xd4:  0xdd0e,  // allowusermedia
-	0xd5:  0x4cf0b, // onloadstart
-	0xd9:  0x16b03, // div
-	0xda:  0x4a904, // list
-	0xdb:  0x32e04, // math
-	0xdc:  0x44b05, // input
-	0xdf:  0x3ea0a, // ondragover
-	0xe0:  0x2de02, // h2
-	0xe2:  0x1b209, // plaintext
-	0xe4:  0x4f30c, // onmouseenter
-	0xe7:  0x47907, // checked
-	0xe8:  0x47003, // pre
-	0xea:  0x35f08, // multiple
-	0xeb:  0xba03,  // bdi
-	0xec:  0x33d09, // maxlength
-	0xed:  0xcf01,  // q
-	0xee:  0x61f0a, // onauxclick
-	0xf0:  0x57c03, // wbr
-	0xf2:  0x3b04,  // base
-	0xf3:  0x6e306, // option
-	0xf5:  0x41310, // ondurationchange
-	0xf7:  0x8908,  // noframes
-	0xf9:  0x40508, // dropzone
-	0xfb:  0x67505, // scope
-	0xfc:  0x8008,  // reversed
-	0xfd:  0x3ba0b, // ondragenter
-	0xfe:  0x3fa05, // start
-	0xff:  0x12f03, // xmp
-	0x100: 0x5f907, // srclang
-	0x101: 0x30703, // img
-	0x104: 0x101,   // b
-	0x105: 0x25403, // for
-	0x106: 0x10705, // aside
-	0x107: 0x44907, // oninput
-	0x108: 0x35604, // area
-	0x109: 0x2a40a, // formmethod
-	0x10a: 0x72604, // wrap
-	0x10c: 0x23c02, // rp
-	0x10d: 0x46b0a, // onkeypress
-	0x10e: 0x6802,  // tt
-	0x110: 0x34702, // mi
-	0x111: 0x36705, // muted
-	0x112: 0xf303,  // alt
-	0x113: 0x5c504, // code
-	0x114: 0x6e02,  // em
-	0x115: 0x3c50a, // ondragexit
-	0x117: 0x9f04,  // span
-	0x119: 0x6d708, // manifest
-	0x11a: 0x38708, // menuitem
-	0x11b: 0x58b07, // content
-	0x11d: 0x6c109, // onwaiting
-	0x11f: 0x4c609, // onloadend
-	0x121: 0x37e0d, // oncontextmenu
-	0x123: 0x56d06, // onblur
-	0x124: 0x3fc07, // article
-	0x125: 0x9303,  // dir
-	0x126: 0xef04,  // ping
-	0x127: 0x24c08, // required
-	0x128: 0x45509, // oninvalid
-	0x129: 0xb105,  // align
-	0x12b: 0x58a04, // icon
-	0x12c: 0x64d02, // h6
-	0x12d: 0x1c404, // cols
-	0x12e: 0x22e0a, // figcaption
-	0x12f: 0x45e09, // onkeydown
-	0x130: 0x66b08, // onsubmit
-	0x131: 0x14d09, // oncanplay
-	0x132: 0x70b03, // sup
-	0x133: 0xc01,   // p
-	0x135: 0x40a09, // onemptied
-	0x136: 0x39106, // oncopy
-	0x137: 0x19c04, // cite
-	0x138: 0x3a70a, // ondblclick
-	0x13a: 0x50b0b, // onmousemove
-	0x13c: 0x66d03, // sub
-	0x13d: 0x48703, // rel
-	0x13e: 0x5f08,  // optgroup
-	0x142: 0x9c07,  // rowspan
-	0x143: 0x37806, // source
-	0x144: 0x21608, // noscript
-	0x145: 0x1a304, // open
-	0x146: 0x20403, // ins
-	0x147: 0x2540d, // foreignObject
-	0x148: 0x5ad0a, // onpopstate
-	0x14a: 0x28d07, // enctype
-	0x14b: 0x2760e, // onautocomplete
-	0x14c: 0x35208, // textarea
-	0x14e: 0x2780c, // autocomplete
-	0x14f: 0x15702, // hr
-	0x150: 0x1de08, // controls
-	0x151: 0x10902, // id
-	0x153: 0x2360c, // onafterprint
-	0x155: 0x2610d, // foreignobject
-	0x156: 0x32707, // marquee
-	0x157: 0x59a07, // onpause
-	0x158: 0x5e602, // dl
-	0x159: 0x5206,  // height
-	0x15a: 0x34703, // min
-	0x15b: 0x9307,  // dirname
-	0x15c: 0x1f209, // translate
-	0x15d: 0x5604,  // html
-	0x15e: 0x34709, // minlength
-	0x15f: 0x48607, // preload
-	0x160: 0x71408, // template
-	0x161: 0x3df0b, // ondragleave
-	0x162: 0x3a02,  // rb
-	0x164: 0x5c003, // src
-	0x165: 0x6dd06, // strong
-	0x167: 0x7804,  // samp
-	0x168: 0x6f307, // address
-	0x169: 0x55108, // ononline
-	0x16b: 0x1310b, // placeholder
-	0x16c: 0x2c406, // target
-	0x16d: 0x20605, // small
-	0x16e: 0x6ca07, // onwheel
-	0x16f: 0x1c90a, // annotation
-	0x170: 0x4740a, // spellcheck
-	0x171: 0x7207,  // details
-	0x172: 0x10306, // canvas
-	0x173: 0x12109, // autofocus
-	0x174: 0xc05,   // param
-	0x176: 0x46308, // download
-	0x177: 0x45203, // del
-	0x178: 0x36c07, // onclose
-	0x179: 0xb903,  // kbd
-	0x17a: 0x31906, // applet
-	0x17b: 0x2e004, // href
-	0x17c: 0x5f108, // onresize
-	0x17e: 0x49d0c, // onloadeddata
-	0x180: 0xcc02,  // tr
-	0x181: 0x2c00a, // formtarget
-	0x182: 0x11005, // title
-	0x183: 0x6ff05, // style
-	0x184: 0xd206,  // strike
-	0x185: 0x59e06, // usemap
-	0x186: 0x2fc06, // iframe
-	0x187: 0x1004,  // main
-	0x189: 0x7b07,  // picture
-	0x18c: 0x31605, // ismap
-	0x18e: 0x4a504, // data
-	0x18f: 0x5905,  // label
-	0x191: 0x3d10e, // referrerpolicy
-	0x192: 0x15602, // th
-	0x194: 0x53606, // prompt
-	0x195: 0x56807, // section
-	0x197: 0x6d107, // optimum
-	0x198: 0x2db04, // high
-	0x199: 0x15c02, // h1
-	0x19a: 0x65909, // onstalled
-	0x19b: 0x16d03, // var
-	0x19c: 0x4204,  // time
-	0x19e: 0x67402, // ms
-	0x19f: 0x33106, // header
-	0x1a0: 0x4da09, // onmessage
-	0x1a1: 0x1a605, // nonce
-	0x1a2: 0x26e0a, // formaction
-	0x1a3: 0x22006, // center
-	0x1a4: 0x3704,  // nobr
-	0x1a5: 0x59505, // table
-	0x1a6: 0x4a907, // listing
-	0x1a7: 0x18106, // legend
-	0x1a9: 0x29b09, // challenge
-	0x1aa: 0x24806, // figure
-	0x1ab: 0xe605,  // media
-	0x1ae: 0xd904,  // type
-	0x1af: 0x3f04,  // font
-	0x1b0: 0x4da0e, // onmessageerror
-	0x1b1: 0x37108, // seamless
-	0x1b2: 0x8703,  // dfn
-	0x1b3: 0x5c705, // defer
-	0x1b4: 0xc303,  // low
-	0x1b5: 0x19a03, // rtc
-	0x1b6: 0x5230b, // onmouseover
-	0x1b7: 0x2b20a, // novalidate
-	0x1b8: 0x71c0a, // workertype
-	0x1ba: 0x3cd07, // itemref
-	0x1bd: 0x1,     // a
-	0x1be: 0x31803, // map
-	0x1bf: 0x400c,  // ontimeupdate
-	0x1c0: 0x15e07, // bgsound
-	0x1c1: 0x3206,  // keygen
-	0x1c2: 0x2705,  // tbody
-	0x1c5: 0x64406, // onshow
-	0x1c7: 0x2501,  // s
-	0x1c8: 0x6607,  // pattern
-	0x1cc: 0x14d10, // oncanplaythrough
-	0x1ce: 0x2d702, // dd
-	0x1cf: 0x6f906, // srcset
-	0x1d0: 0x17003, // big
-	0x1d2: 0x65108, // sortable
-	0x1d3: 0x48007, // onkeyup
-	0x1d5: 0x5a406, // onplay
-	0x1d7: 0x4b804, // meta
-	0x1d8: 0x40306, // ondrop
-	0x1da: 0x60008, // onscroll
-	0x1db: 0x1fb0b, // crossorigin
-	0x1dc: 0x5730a, // onpageshow
-	0x1dd: 0x4,     // abbr
-	0x1de: 0x9202,  // td
-	0x1df: 0x58b0f, // contenteditable
-	0x1e0: 0x27206, // action
-	0x1e1: 0x1400b, // playsinline
-	0x1e2: 0x43107, // onfocus
-	0x1e3: 0x2e008, // hreflang
-	0x1e5: 0x5160a, // onmouseout
-	0x1e6: 0x5ea07, // onreset
-	0x1e7: 0x13c08, // autoplay
-	0x1e8: 0x63109, // onseeking
-	0x1ea: 0x67506, // scoped
-	0x1ec: 0x30a,   // radiogroup
-	0x1ee: 0x3800b, // contextmenu
-	0x1ef: 0x52e09, // onmouseup
-	0x1f1: 0x2ca06, // hgroup
-	0x1f2: 0x2080f, // allowfullscreen
-	0x1f3: 0x4be08, // tabindex
-	0x1f6: 0x30f07, // isindex
-	0x1f7: 0x1a0e,  // accept-charset
-	0x1f8: 0x2ae0e, // formnovalidate
-	0x1fb: 0x1c90e, // annotation-xml
-	0x1fc: 0x6e05,  // embed
-	0x1fd: 0x21806, // script
-	0x1fe: 0xbb06,  // dialog
-	0x1ff: 0x1d707, // command
+	0x1:   0x3ff08, // dropzone
+	0x2:   0x3b08,  // basefont
+	0x3:   0x23209, // integrity
+	0x4:   0x43106, // source
+	0x5:   0x2c09,  // accesskey
+	0x6:   0x1a06,  // accept
+	0x7:   0x6c807, // onwheel
+	0xb:   0x47407, // onkeyup
+	0xc:   0x32007, // headers
+	0xd:   0x67306, // scoped
+	0xe:   0x67909, // onsuspend
+	0xf:   0x8908,  // noframes
+	0x10:  0x1fa0b, // crossorigin
+	0x11:  0x2e407, // onclick
+	0x12:  0x3f405, // start
+	0x13:  0x37a0b, // contextmenu
+	0x14:  0x5e903, // src
+	0x15:  0x1c404, // cols
+	0x16:  0xbb06,  // dialog
+	0x17:  0x47a07, // preload
+	0x18:  0x3c707, // itemref
+	0x1b:  0x2f105, // image
+	0x1d:  0x4ba09, // onloadend
+	0x1e:  0x45d08, // download
+	0x1f:  0x46a03, // pre
+	0x23:  0x2970a, // formmethod
+	0x24:  0x71303, // svg
+	0x25:  0xcf01,  // q
+	0x26:  0x64002, // dt
+	0x27:  0x1de08, // controls
+	0x2a:  0x2804,  // body
+	0x2b:  0xd206,  // strike
+	0x2c:  0x3910b, // oncuechange
+	0x2d:  0x4c30b, // onloadstart
+	0x2e:  0x2fe07, // isindex
+	0x2f:  0xb202,  // li
+	0x30:  0x1400b, // playsinline
+	0x31:  0x34102, // mi
+	0x32:  0x30806, // applet
+	0x33:  0x4ce09, // onmessage
+	0x35:  0x13702, // ol
+	0x36:  0x1a304, // open
+	0x39:  0x14d09, // oncanplay
+	0x3a:  0x6bf09, // onwaiting
+	0x3b:  0x11908, // oncancel
+	0x3c:  0x6a908, // onunload
+	0x3e:  0x53c09, // onoffline
+	0x3f:  0x1a0e,  // accept-charset
+	0x40:  0x32004, // head
+	0x42:  0x3ab09, // ondragend
+	0x43:  0x1310b, // placeholder
+	0x44:  0x2b30a, // formtarget
+	0x45:  0x2540d, // foreignobject
+	0x47:  0x400c,  // ontimeupdate
+	0x48:  0xdd0e,  // allowusermedia
+	0x4a:  0x69c0d, // onbeforeprint
+	0x4b:  0x5604,  // html
+	0x4c:  0x9f04,  // span
+	0x4d:  0x64206, // hgroup
+	0x4e:  0x16408, // disabled
+	0x4f:  0x4204,  // time
+	0x51:  0x42b07, // onfocus
+	0x53:  0xb00a,  // malignmark
+	0x55:  0x4650a, // onkeypress
+	0x56:  0x55805, // class
+	0x57:  0x1ab08, // colgroup
+	0x58:  0x33709, // maxlength
+	0x59:  0x5a908, // progress
+	0x5b:  0x70405, // style
+	0x5c:  0x2a10e, // formnovalidate
+	0x5e:  0x38b06, // oncopy
+	0x60:  0x26104, // form
+	0x61:  0xf606,  // footer
+	0x64:  0x30a,   // radiogroup
+	0x66:  0xfb04,  // ruby
+	0x67:  0x4ff0b, // onmousemove
+	0x68:  0x19d08, // itemprop
+	0x69:  0x2d70a, // http-equiv
+	0x6a:  0x15602, // th
+	0x6c:  0x6e02,  // em
+	0x6d:  0x38108, // menuitem
+	0x6e:  0x63106, // select
+	0x6f:  0x48110, // onlanguagechange
+	0x70:  0x31f05, // thead
+	0x71:  0x15c02, // h1
+	0x72:  0x5e906, // srcdoc
+	0x75:  0x9604,  // name
+	0x76:  0x19106, // button
+	0x77:  0x55504, // desc
+	0x78:  0x17704, // kind
+	0x79:  0x1bf05, // color
+	0x7c:  0x58e06, // usemap
+	0x7d:  0x30e08, // itemtype
+	0x7f:  0x6d508, // manifest
+	0x81:  0x5300c, // onmousewheel
+	0x82:  0x4dc0b, // onmousedown
+	0x84:  0xc05,   // param
+	0x85:  0x2e005, // video
+	0x86:  0x4910c, // onloadeddata
+	0x87:  0x6f107, // address
+	0x8c:  0xef04,  // ping
+	0x8d:  0x24703, // for
+	0x8f:  0x62f08, // onselect
+	0x90:  0x30703, // map
+	0x92:  0xc01,   // p
+	0x93:  0x8008,  // reversed
+	0x94:  0x54d0a, // onpagehide
+	0x95:  0x3206,  // keygen
+	0x96:  0x34109, // minlength
+	0x97:  0x3e40a, // ondragover
+	0x98:  0x42407, // onerror
+	0x9a:  0x2107,  // charset
+	0x9b:  0x29b06, // method
+	0x9c:  0x101,   // b
+	0x9d:  0x68208, // ontoggle
+	0x9e:  0x2bd06, // hidden
+	0xa0:  0x3f607, // article
+	0xa2:  0x63906, // onshow
+	0xa3:  0x64d06, // onsort
+	0xa5:  0x57b0f, // contenteditable
+	0xa6:  0x66908, // onsubmit
+	0xa8:  0x44f09, // oninvalid
+	0xaa:  0x202,   // br
+	0xab:  0x10902, // id
+	0xac:  0x5d04,  // loop
+	0xad:  0x5630a, // onpageshow
+	0xb0:  0x2cf04, // href
+	0xb2:  0x2210a, // figcaption
+	0xb3:  0x2690e, // onautocomplete
+	0xb4:  0x49106, // onload
+	0xb6:  0x9c04,  // rows
+	0xb7:  0x1a605, // nonce
+	0xb8:  0x68a14, // onunhandledrejection
+	0xbb:  0x21306, // center
+	0xbc:  0x59406, // onplay
+	0xbd:  0x33f02, // h5
+	0xbe:  0x49d07, // listing
+	0xbf:  0x57606, // public
+	0xc2:  0x23b06, // figure
+	0xc3:  0x57a04, // icon
+	0xc4:  0x1ab03, // col
+	0xc5:  0x47b03, // rel
+	0xc6:  0xe605,  // media
+	0xc7:  0x12109, // autofocus
+	0xc8:  0x19a02, // rt
+	0xca:  0x2d304, // lang
+	0xcc:  0x49908, // datalist
+	0xce:  0x2eb06, // iframe
+	0xcf:  0x36105, // muted
+	0xd0:  0x6140a, // onauxclick
+	0xd2:  0x3c02,  // as
+	0xd6:  0x3fd06, // ondrop
+	0xd7:  0x1c90a, // annotation
+	0xd8:  0x21908, // fieldset
+	0xdb:  0x2cf08, // hreflang
+	0xdc:  0x4e70c, // onmouseenter
+	0xdd:  0x2a402, // mn
+	0xde:  0xe60a,  // mediagroup
+	0xdf:  0x9805,  // meter
+	0xe0:  0x56c03, // wbr
+	0xe2:  0x63e05, // width
+	0xe3:  0x2290c, // onafterprint
+	0xe4:  0x30505, // ismap
+	0xe5:  0x1505,  // value
+	0xe7:  0x1303,  // nav
+	0xe8:  0x54508, // ononline
+	0xe9:  0xb604,  // mark
+	0xea:  0xc303,  // low
+	0xeb:  0x3ee0b, // ondragstart
+	0xef:  0x12f03, // xmp
+	0xf0:  0x22407, // caption
+	0xf1:  0xd904,  // type
+	0xf2:  0x70907, // summary
+	0xf3:  0x6802,  // tt
+	0xf4:  0x20809, // translate
+	0xf5:  0x1870a, // blockquote
+	0xf8:  0x15702, // hr
+	0xfa:  0x2705,  // tbody
+	0xfc:  0x7b07,  // picture
+	0xfd:  0x5206,  // height
+	0xfe:  0x19c04, // cite
+	0xff:  0x2501,  // s
+	0x101: 0xff05,  // async
+	0x102: 0x56f07, // onpaste
+	0x103: 0x19507, // onabort
+	0x104: 0x2b706, // target
+	0x105: 0x14b03, // bdo
+	0x106: 0x1f006, // coords
+	0x107: 0x5e108, // onresize
+	0x108: 0x71908, // template
+	0x10a: 0x3a02,  // rb
+	0x10b: 0x2a50a, // novalidate
+	0x10c: 0x460e,  // updateviacache
+	0x10d: 0x71003, // sup
+	0x10e: 0x6c07,  // noembed
+	0x10f: 0x16b03, // div
+	0x110: 0x6f707, // srclang
+	0x111: 0x17a09, // draggable
+	0x112: 0x67305, // scope
+	0x113: 0x5905,  // label
+	0x114: 0x22f02, // rp
+	0x115: 0x23f08, // required
+	0x116: 0x3780d, // oncontextmenu
+	0x117: 0x5e504, // size
+	0x118: 0x5b00a, // spellcheck
+	0x119: 0x3f04,  // font
+	0x11a: 0x9c07,  // rowspan
+	0x11b: 0x10a07, // default
+	0x11d: 0x44307, // oninput
+	0x11e: 0x38506, // itemid
+	0x11f: 0x5ee04, // code
+	0x120: 0xaa07,  // acronym
+	0x121: 0x3b04,  // base
+	0x125: 0x2470d, // foreignObject
+	0x126: 0x2ca04, // high
+	0x127: 0x3cb0e, // referrerpolicy
+	0x128: 0x33703, // max
+	0x129: 0x59d0a, // onpopstate
+	0x12a: 0x2fc02, // h4
+	0x12b: 0x4ac04, // meta
+	0x12c: 0x17305, // blink
+	0x12e: 0x5f508, // onscroll
+	0x12f: 0x59409, // onplaying
+	0x130: 0xc113,  // allowpaymentrequest
+	0x131: 0x19a03, // rtc
+	0x132: 0x72b04, // wrap
+	0x134: 0x8b08,  // frameset
+	0x135: 0x32605, // small
+	0x137: 0x32006, // header
+	0x138: 0x40409, // onemptied
+	0x139: 0x34902, // h6
+	0x13a: 0x35908, // multiple
+	0x13c: 0x52a06, // prompt
+	0x13f: 0x28e09, // challenge
+	0x141: 0x4370c, // onhashchange
+	0x142: 0x57b07, // content
+	0x143: 0x1c90e, // annotation-xml
+	0x144: 0x36607, // onclose
+	0x145: 0x14d10, // oncanplaythrough
+	0x148: 0x5170b, // onmouseover
+	0x149: 0x64f08, // sortable
+	0x14a: 0xa402,  // mo
+	0x14b: 0x2cd02, // h3
+	0x14c: 0x2c406, // script
+	0x14d: 0x41d07, // onended
+	0x14f: 0x64706, // poster
+	0x150: 0x7210a, // workertype
+	0x153: 0x1f505, // shape
+	0x154: 0x4,     // abbr
+	0x155: 0x1,     // a
+	0x156: 0x2bf02, // dd
+	0x157: 0x71606, // system
+	0x158: 0x4ce0e, // onmessageerror
+	0x159: 0x36b08, // seamless
+	0x15a: 0x2610a, // formaction
+	0x15b: 0x6e106, // option
+	0x15c: 0x31d04, // math
+	0x15d: 0x62609, // onseeking
+	0x15e: 0x39c05, // oncut
+	0x15f: 0x44c03, // del
+	0x160: 0x11005, // title
+	0x161: 0x11505, // audio
+	0x162: 0x63108, // selected
+	0x165: 0x3b40b, // ondragenter
+	0x166: 0x46e06, // spacer
+	0x167: 0x4a410, // onloadedmetadata
+	0x168: 0x44505, // input
+	0x16a: 0x58505, // table
+	0x16b: 0x41508, // onchange
+	0x16e: 0x5f005, // defer
+	0x171: 0x50a0a, // onmouseout
+	0x172: 0x20504, // slot
+	0x175: 0x3704,  // nobr
+	0x177: 0x1d707, // command
+	0x17a: 0x7207,  // details
+	0x17b: 0x38104, // menu
+	0x17c: 0xb903,  // kbd
+	0x17d: 0x57304, // step
+	0x17e: 0x20303, // ins
+	0x17f: 0x13c08, // autoplay
+	0x182: 0x34103, // min
+	0x183: 0x17404, // link
+	0x185: 0x40d10, // ondurationchange
+	0x186: 0x9202,  // td
+	0x187: 0x8b05,  // frame
+	0x18a: 0x2ab08, // datetime
+	0x18b: 0x44509, // inputmode
+	0x18c: 0x35108, // readonly
+	0x18d: 0x21104, // face
+	0x18f: 0x5e505, // sizes
+	0x191: 0x4b208, // tabindex
+	0x192: 0x6db06, // strong
+	0x193: 0xba03,  // bdi
+	0x194: 0x6fe06, // srcset
+	0x196: 0x67202, // ms
+	0x197: 0x5b507, // checked
+	0x198: 0xb105,  // align
+	0x199: 0x1e507, // section
+	0x19b: 0x6e05,  // embed
+	0x19d: 0x15e07, // bgsound
+	0x1a2: 0x49d04, // list
+	0x1a3: 0x61e08, // onseeked
+	0x1a4: 0x66009, // onstorage
+	0x1a5: 0x2f603, // img
+	0x1a6: 0xf505,  // tfoot
+	0x1a9: 0x26913, // onautocompleteerror
+	0x1aa: 0x5fd19, // onsecuritypolicyviolation
+	0x1ad: 0x9303,  // dir
+	0x1ae: 0x9307,  // dirname
+	0x1b0: 0x5a70a, // onprogress
+	0x1b2: 0x65709, // onstalled
+	0x1b5: 0x66f09, // itemscope
+	0x1b6: 0x49904, // data
+	0x1b7: 0x3d90b, // ondragleave
+	0x1b8: 0x56102, // h2
+	0x1b9: 0x2f706, // mglyph
+	0x1ba: 0x16502, // is
+	0x1bb: 0x6e50e, // onbeforeunload
+	0x1bc: 0x2830d, // typemustmatch
+	0x1bd: 0x3ab06, // ondrag
+	0x1be: 0x5da07, // onreset
+	0x1c0: 0x51106, // output
+	0x1c1: 0x12907, // sandbox
+	0x1c2: 0x1b209, // plaintext
+	0x1c4: 0x34c08, // textarea
+	0x1c7: 0xd607,  // keytype
+	0x1c8: 0x34b05, // mtext
+	0x1c9: 0x6b10e, // onvolumechange
+	0x1ca: 0x1ea06, // onblur
+	0x1cb: 0x58a07, // onpause
+	0x1cd: 0x5bc0c, // onratechange
+	0x1ce: 0x10705, // aside
+	0x1cf: 0x6cf07, // optimum
+	0x1d1: 0x45809, // onkeydown
+	0x1d2: 0x1c407, // colspan
+	0x1d3: 0x1004,  // main
+	0x1d4: 0x66b03, // sub
+	0x1d5: 0x25b06, // object
+	0x1d6: 0x55c06, // search
+	0x1d7: 0x37206, // sorted
+	0x1d8: 0x17003, // big
+	0x1d9: 0xb01,   // u
+	0x1db: 0x26b0c, // autocomplete
+	0x1dc: 0xcc02,  // tr
+	0x1dd: 0xf303,  // alt
+	0x1df: 0x7804,  // samp
+	0x1e0: 0x5c812, // onrejectionhandled
+	0x1e1: 0x4f30c, // onmouseleave
+	0x1e2: 0x28007, // enctype
+	0x1e3: 0xa208,  // nomodule
+	0x1e5: 0x3280f, // allowfullscreen
+	0x1e6: 0x5f08,  // optgroup
+	0x1e8: 0x27c0b, // formenctype
+	0x1e9: 0x18106, // legend
+	0x1ea: 0x10306, // canvas
+	0x1eb: 0x6607,  // pattern
+	0x1ec: 0x2c208, // noscript
+	0x1ed: 0x601,   // i
+	0x1ee: 0x5d602, // dl
+	0x1ef: 0xa702,  // ul
+	0x1f2: 0x52209, // onmouseup
+	0x1f4: 0x1ba05, // track
+	0x1f7: 0x3a10a, // ondblclick
+	0x1f8: 0x3bf0a, // ondragexit
+	0x1fa: 0x8703,  // dfn
+	0x1fc: 0x26506, // action
+	0x1fd: 0x35004, // area
+	0x1fe: 0x31607, // marquee
+	0x1ff: 0x16d03, // var
 }
 
 const atomText = "abbradiogrouparamainavalueaccept-charsetbodyaccesskeygenobrb" +
@@ -758,26 +760,26 @@ const atomText = "abbradiogrouparamainavalueaccept-charsetbodyaccesskeygenobrb"
 	"dboxmplaceholderautoplaysinlinebdoncanplaythrough1bgsoundisa" +
 	"bledivarbigblinkindraggablegendblockquotebuttonabortcitempro" +
 	"penoncecolgrouplaintextrackcolorcolspannotation-xmlcommandco" +
-	"ntrolshapecoordslotranslatecrossoriginsmallowfullscreenoscri" +
-	"ptfacenterfieldsetfigcaptionafterprintegrityfigurequiredfore" +
-	"ignObjectforeignobjectformactionautocompleteerrorformenctype" +
-	"mustmatchallengeformmethodformnovalidatetimeformtargethgroup" +
-	"osterhiddenhigh2hreflanghttp-equivideonclickiframeimageimgly" +
-	"ph3isindexismappletitemtypemarqueematheadersortedmaxlength4m" +
-	"inlength5mtextareadonlymultiplemutedoncloseamlessourceoncont" +
-	"extmenuitemidoncopyoncuechangeoncutondblclickondragendondrag" +
-	"enterondragexitemreferrerpolicyondragleaveondragoverondragst" +
-	"articleondropzonemptiedondurationchangeonendedonerroronfocus" +
-	"paceronhashchangeoninputmodeloninvalidonkeydownloadonkeypres" +
-	"spellcheckedonkeyupreloadonlanguagechangeonloadeddatalisting" +
-	"onloadedmetadatabindexonloadendonloadstartonmessageerroronmo" +
-	"usedownonmouseenteronmouseleaveonmousemoveonmouseoutputonmou" +
-	"seoveronmouseupromptonmousewheelonofflineononlineonpagehides" +
-	"classectionbluronpageshowbronpastepublicontenteditableonpaus" +
-	"emaponplayingonpopstateonprogressrcdocodeferonratechangeonre" +
-	"jectionhandledonresetonresizesrclangonscrollonsecuritypolicy" +
-	"violationauxclickonseekedonseekingonselectedonshowidth6onsor" +
-	"tableonstalledonstorageonsubmitemscopedonsuspendontoggleonun" +
-	"handledrejectionbeforeprintonunloadonvolumechangeonwaitingon" +
-	"wheeloptimumanifestrongoptionbeforeunloaddressrcsetstylesumm" +
-	"arysupsvgsystemplateworkertypewrap"
+	"ntrolsectionblurcoordshapecrossoriginslotranslatefacenterfie" +
+	"ldsetfigcaptionafterprintegrityfigurequiredforeignObjectfore" +
+	"ignobjectformactionautocompleteerrorformenctypemustmatchalle" +
+	"ngeformmethodformnovalidatetimeformtargethiddenoscripthigh3h" +
+	"reflanghttp-equivideonclickiframeimageimglyph4isindexismappl" +
+	"etitemtypemarqueematheadersmallowfullscreenmaxlength5minleng" +
+	"th6mtextareadonlymultiplemutedoncloseamlessortedoncontextmen" +
+	"uitemidoncopyoncuechangeoncutondblclickondragendondragentero" +
+	"ndragexitemreferrerpolicyondragleaveondragoverondragstarticl" +
+	"eondropzonemptiedondurationchangeonendedonerroronfocusourceo" +
+	"nhashchangeoninputmodeloninvalidonkeydownloadonkeypresspacer" +
+	"onkeyupreloadonlanguagechangeonloadeddatalistingonloadedmeta" +
+	"databindexonloadendonloadstartonmessageerroronmousedownonmou" +
+	"seenteronmouseleaveonmousemoveonmouseoutputonmouseoveronmous" +
+	"eupromptonmousewheelonofflineononlineonpagehidesclassearch2o" +
+	"npageshowbronpastepublicontenteditableonpausemaponplayingonp" +
+	"opstateonprogresspellcheckedonratechangeonrejectionhandledon" +
+	"resetonresizesrcdocodeferonscrollonsecuritypolicyviolationau" +
+	"xclickonseekedonseekingonselectedonshowidthgrouposteronsorta" +
+	"bleonstalledonstorageonsubmitemscopedonsuspendontoggleonunha" +
+	"ndledrejectionbeforeprintonunloadonvolumechangeonwaitingonwh" +
+	"eeloptimumanifestrongoptionbeforeunloaddressrclangsrcsetstyl" +
+	"esummarysupsvgsystemplateworkertypewrap"
diff --git a/vendor/golang.org/x/net/html/parse.go b/vendor/golang.org/x/net/html/parse.go
index 643c674e37802d509cb5806cff26a0b0be4ab603..518ee4c94e7498da110263ef680b7c4adae8f024 100644
--- a/vendor/golang.org/x/net/html/parse.go
+++ b/vendor/golang.org/x/net/html/parse.go
@@ -924,7 +924,7 @@ func inBodyIM(p *parser) bool {
 			p.addElement()
 			p.im = inFramesetIM
 			return true
-		case a.Address, a.Article, a.Aside, a.Blockquote, a.Center, a.Details, a.Dialog, a.Dir, a.Div, a.Dl, a.Fieldset, a.Figcaption, a.Figure, a.Footer, a.Header, a.Hgroup, a.Main, a.Menu, a.Nav, a.Ol, a.P, a.Section, a.Summary, a.Ul:
+		case a.Address, a.Article, a.Aside, a.Blockquote, a.Center, a.Details, a.Dialog, a.Dir, a.Div, a.Dl, a.Fieldset, a.Figcaption, a.Figure, a.Footer, a.Header, a.Hgroup, a.Main, a.Menu, a.Nav, a.Ol, a.P, a.Search, a.Section, a.Summary, a.Ul:
 			p.popUntil(buttonScope, a.P)
 			p.addElement()
 		case a.H1, a.H2, a.H3, a.H4, a.H5, a.H6:
@@ -1136,7 +1136,7 @@ func inBodyIM(p *parser) bool {
 				return false
 			}
 			return true
-		case a.Address, a.Article, a.Aside, a.Blockquote, a.Button, a.Center, a.Details, a.Dialog, a.Dir, a.Div, a.Dl, a.Fieldset, a.Figcaption, a.Figure, a.Footer, a.Header, a.Hgroup, a.Listing, a.Main, a.Menu, a.Nav, a.Ol, a.Pre, a.Section, a.Summary, a.Ul:
+		case a.Address, a.Article, a.Aside, a.Blockquote, a.Button, a.Center, a.Details, a.Dialog, a.Dir, a.Div, a.Dl, a.Fieldset, a.Figcaption, a.Figure, a.Footer, a.Header, a.Hgroup, a.Listing, a.Main, a.Menu, a.Nav, a.Ol, a.Pre, a.Search, a.Section, a.Summary, a.Ul:
 			p.popUntil(defaultScope, p.tok.DataAtom)
 		case a.Form:
 			if p.oe.contains(a.Template) {
diff --git a/vendor/golang.org/x/net/html/token.go b/vendor/golang.org/x/net/html/token.go
index 3c57880d6979ace65723338f6a0afcde5c260b2a..6598c1f7b320fbb19ebf73af7e4c752a4f8ba508 100644
--- a/vendor/golang.org/x/net/html/token.go
+++ b/vendor/golang.org/x/net/html/token.go
@@ -839,8 +839,22 @@ func (z *Tokenizer) readStartTag() TokenType {
 	if raw {
 		z.rawTag = strings.ToLower(string(z.buf[z.data.start:z.data.end]))
 	}
-	// Look for a self-closing token like "<br/>".
-	if z.err == nil && z.buf[z.raw.end-2] == '/' {
+	// Look for a self-closing token (e.g. <br/>).
+	//
+	// Originally, we did this by just checking that the last character of the
+	// tag (ignoring the closing bracket) was a solidus (/) character, but this
+	// is not always accurate.
+	//
+	// We need to be careful that we don't misinterpret a non-self-closing tag
+	// as self-closing, as can happen if the tag contains unquoted attribute
+	// values (i.e. <p a=/>).
+	//
+	// To avoid this, we check that the last non-bracket character of the tag
+	// (z.raw.end-2) isn't the same character as the last non-quote character of
+	// the last attribute of the tag (z.pendingAttr[1].end-1), if the tag has
+	// attributes.
+	nAttrs := len(z.attr)
+	if z.err == nil && z.buf[z.raw.end-2] == '/' && (nAttrs == 0 || z.raw.end-2 != z.attr[nAttrs-1][1].end-1) {
 		return SelfClosingTagToken
 	}
 	return StartTagToken
diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go
index 81faec7e75d60f1854c9046cfcfd94cad3895664..97bd8b06f7ac111c72f3dac267d2b093bd5bd0ed 100644
--- a/vendor/golang.org/x/net/http2/frame.go
+++ b/vendor/golang.org/x/net/http2/frame.go
@@ -225,6 +225,11 @@ var fhBytes = sync.Pool{
 	},
 }
 
+func invalidHTTP1LookingFrameHeader() FrameHeader {
+	fh, _ := readFrameHeader(make([]byte, frameHeaderLen), strings.NewReader("HTTP/1.1 "))
+	return fh
+}
+
 // ReadFrameHeader reads 9 bytes from r and returns a FrameHeader.
 // Most users should use Framer.ReadFrame instead.
 func ReadFrameHeader(r io.Reader) (FrameHeader, error) {
@@ -503,10 +508,16 @@ func (fr *Framer) ReadFrame() (Frame, error) {
 		return nil, err
 	}
 	if fh.Length > fr.maxReadSize {
+		if fh == invalidHTTP1LookingFrameHeader() {
+			return nil, fmt.Errorf("http2: failed reading the frame payload: %w, note that the frame header looked like an HTTP/1.1 header", err)
+		}
 		return nil, ErrFrameTooLarge
 	}
 	payload := fr.getReadBuf(fh.Length)
 	if _, err := io.ReadFull(fr.r, payload); err != nil {
+		if fh == invalidHTTP1LookingFrameHeader() {
+			return nil, fmt.Errorf("http2: failed reading the frame payload: %w, note that the frame header looked like an HTTP/1.1 header", err)
+		}
 		return nil, err
 	}
 	f, err := typeFrameParser(fh.Type)(fr.frameCache, fh, fr.countError, payload)
diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go
index b640deb0e0aac7ad4041131e44388cbac5dfe946..51fca38f61d726e0b45bd6cd9f56f21f48c8a2ca 100644
--- a/vendor/golang.org/x/net/http2/server.go
+++ b/vendor/golang.org/x/net/http2/server.go
@@ -1068,7 +1068,10 @@ func (sc *serverConn) serve(conf http2Config) {
 
 func (sc *serverConn) handlePingTimer(lastFrameReadTime time.Time) {
 	if sc.pingSent {
-		sc.vlogf("timeout waiting for PING response")
+		sc.logf("timeout waiting for PING response")
+		if f := sc.countErrorFunc; f != nil {
+			f("conn_close_lost_ping")
+		}
 		sc.conn.Close()
 		return
 	}
diff --git a/vendor/golang.org/x/net/publicsuffix/data/children b/vendor/golang.org/x/net/publicsuffix/data/children
index 08261bffd196fd6942b4cebb5ff06e0ffe53808d..986a246a6c05ca5b49c1d9f3f7fc84cb6628b64e 100644
Binary files a/vendor/golang.org/x/net/publicsuffix/data/children and b/vendor/golang.org/x/net/publicsuffix/data/children differ
diff --git a/vendor/golang.org/x/net/publicsuffix/data/nodes b/vendor/golang.org/x/net/publicsuffix/data/nodes
index 1dae6ede8f292889cb4252aa473312fea1bac46a..38b8999600c8cb35fbceee1b938b5de72da1345f 100644
Binary files a/vendor/golang.org/x/net/publicsuffix/data/nodes and b/vendor/golang.org/x/net/publicsuffix/data/nodes differ
diff --git a/vendor/golang.org/x/net/publicsuffix/data/text b/vendor/golang.org/x/net/publicsuffix/data/text
index 7e516413f6ca03485188d994edb1492d9d0c2af5..b151d97de27601f65ad677921bbfc136bf86f3a4 100644
--- a/vendor/golang.org/x/net/publicsuffix/data/text
+++ b/vendor/golang.org/x/net/publicsuffix/data/text
@@ -1 +1 @@
-birkenesoddtangentinglogoweirbitbucketrzynishikatakayamatta-varjjatjomembersaltdalovepopartysfjordiskussionsbereichatinhlfanishikatsuragitappassenger-associationishikawazukamiokameokamakurazakitaurayasudabitternidisrechtrainingloomy-routerbjarkoybjerkreimdbalsan-suedtirololitapunkapsienamsskoganeibmdeveloperauniteroirmemorialombardiadempresashibetsukumiyamagasakinderoyonagunicloudevelopmentaxiijimarriottayninhaccanthobby-siteval-d-aosta-valleyoriikaracolognebinatsukigataiwanumatajimidsundgcahcesuolocustomer-ocimperiautoscanalytics-gatewayonagoyaveroykenflfanpachihayaakasakawaiishopitsitemasekd1kappenginedre-eikerimo-siemenscaledekaascolipicenoboribetsucks3-eu-west-3utilities-16-balestrandabergentappsseekloges3-eu-west-123paginawebcamauction-acornfshostrodawaraktyubinskaunicommbank123kotisivultrobjectselinogradimo-i-rana4u2-localhostrolekanieruchomoscientistordal-o-g-i-nikolaevents3-ap-northeast-2-ddnsking123homepagefrontappchizip61123saitamakawababia-goracleaningheannakadomarineat-urlimanowarudakuneustarostwodzislawdev-myqnapcloudcontrolledgesuite-stagingdyniamusementdllclstagehirnikonantomobelementorayokosukanoyakumoliserniaurland-4-salernord-aurdalipaywhirlimiteddnslivelanddnss3-ap-south-123siteweberlevagangaviikanonji234lima-cityeats3-ap-southeast-123webseiteambulancechireadmyblogspotaribeiraogakicks-assurfakefurniturealmpmninoheguribigawaurskog-holandinggfarsundds3-ap-southeast-20001wwwedeployokote123hjemmesidealerdalaheadjuegoshikibichuobiraustevollimombetsupplyokoze164-balena-devices3-ca-central-123websiteleaf-south-12hparliamentatsunobninsk8s3-eu-central-1337bjugnishimerablackfridaynightjxn--11b4c3ditchyouripatriabloombergretaijindustriesteinkjerbloxcmsaludivtasvuodnakaiwanairlinekobayashimodatecnologiablushakotanishinomiyashironomniwebview-assetsalvadorbmoattachmentsamegawabmsamnangerbmwellbeingzonebnrweatherchannelsdvrdnsamparalleluxenishinoomotegotsukishiwadavvenjargamvikarpaczest-a-la-maisondre-landivttasvuotnakamai-stagingloppennebomlocalzonebonavstackartuzybondigitaloceanspacesamsclubartowest1-usamsunglugsmall-webspacebookonlineboomlaakesvuemielecceboschristmasakilatiron-riopretoeidsvollovesickaruizawabostik-serverrankoshigayachtsandvikcoromantovalle-d-aostakinouebostonakijinsekikogentlentapisa-geekarumaifmemsetkmaxxn--12c1fe0bradescotksatmpaviancapitalonebouncemerckmsdscloudiybounty-fullensakerrypropertiesangovtoyosatoyokawaboutiquebecologialaichaugiangmbhartiengiangminakamichiharaboutireservdrangedalpusercontentoyotapfizerboyfriendoftheinternetflixn--12cfi8ixb8lublindesnesanjosoyrovnoticiasannanishinoshimattelemarkasaokamikitayamatsurinfinitigopocznore-og-uvdalucaniabozen-sudtiroluccanva-appstmnishiokoppegardray-dnsupdaterbozen-suedtirolukowesteuropencraftoyotomiyazakinsurealtypeformesswithdnsannohekinanporovigonohejinternationaluroybplacedogawarabikomaezakirunordkappgfoggiabrandrayddns5ybrasiliadboxoslockerbresciaogashimadachicappadovaapstemp-dnswatchest-mon-blogueurodirumagazinebrindisiciliabroadwaybroke-itvedestrandraydnsanokashibatakashimashikiyosatokigawabrokerbrothermesserlifestylebtimnetzpisdnpharmaciensantamariakebrowsersafetymarketingmodumetacentrumeteorappharmacymruovatlassian-dev-builderschaefflerbrumunddalutskashiharabrusselsantoandreclaimsanukintlon-2bryanskiptveterinaireadthedocsaobernardovre-eikerbrynebwestus2bzhitomirbzzwhitesnowflakecommunity-prochowicecomodalenissandoycompanyaarphdfcbankasumigaurawa-mazowszexn--1ck2e1bambinagisobetsuldalpha-myqnapcloudaccess3-us-east-2ixboxeroxfinityolasiteastus2comparemarkerryhotelsaves-the-whalessandria-trani-barletta-andriatranibarlettaandriacomsecaasnesoddeno-stagingrondarcondoshifteditorxn--1ctwolominamatarnobrzegrongrossetouchijiwadedyn-berlincolnissayokoshibahikariyaltakazakinzais-a-bookkeepermarshallstatebankasuyalibabahccavuotnagaraholtaleniwaizumiotsurugashimaintenanceomutazasavonarviikaminoyamaxunispaceconferenceconstructionflashdrivefsncf-ipfsaxoconsuladobeio-static-accesscamdvrcampaniaconsultantranoyconsultingroundhandlingroznysaitohnoshookuwanakayamangyshlakdnepropetrovskanlandyndns-freeboxostrowwlkpmgrphilipsyno-dschokokekscholarshipschoolbusinessebycontactivetrailcontagematsubaravendbambleborkdalvdalcest-le-patron-rancherkasydneyukuhashimokawavoues3-sa-east-1contractorskenissedalcookingruecoolblogdnsfor-better-thanhhoarairforcentralus-1cooperativano-frankivskodjeephonefosschoolsztynsetransiphotographysiocoproductionschulplattforminamiechizenisshingucciprianiigatairaumalatvuopmicrolightinguidefinimaringatlancastercorsicafjschulservercosenzakopanecosidnshome-webservercellikescandypopensocialcouchpotatofrieschwarzgwangjuh-ohtawaramotoineppueblockbusternopilawacouncilcouponscrapper-sitecozoravennaharimalborkaszubytemarketscrappinguitarscrysecretrosnubananarepublic-inquiryurihonjoyenthickaragandaxarnetbankanzakiwielunnerepairbusanagochigasakishimabarakawaharaolbia-tempio-olbiatempioolbialowiezachpomorskiengiangjesdalolipopmcdirepbodyn53cqcxn--1lqs03niyodogawacrankyotobetsumidaknongujaratmallcrdyndns-homednscwhminamifuranocreditcardyndns-iphutholdingservehttpbincheonl-ams-1creditunionionjukujitawaravpagecremonashorokanaiecrewhoswholidaycricketnedalcrimeast-kazakhstanangercrotonecrowniphuyencrsvp4cruiseservehumourcuisinellair-traffic-controllagdenesnaaseinet-freakserveircasertainaircraftingvolloansnasaarlanduponthewifidelitypedreamhostersaotomeldaluxurycuneocupcakecuritibacgiangiangryggeecurvalled-aostargets-itranslatedyndns-mailcutegirlfriendyndns-office-on-the-webhoptogurafedoraprojectransurlfeirafembetsukuis-a-bruinsfanfermodenakasatsunairportrapaniizaferraraferraris-a-bulls-fanferrerotikagoshimalopolskanittedalfetsundyndns-wikimobetsumitakagildeskaliszkolamericanfamilydservemp3fgunmaniwamannorth-kazakhstanfhvalerfilegear-augustowiiheyakagefilegear-deatnuniversitysvardofilegear-gbizfilegear-iefilegear-jpmorgangwonporterfilegear-sg-1filminamiizukamiminefinalchikugokasellfyis-a-candidatefinancefinnoyfirebaseappiemontefirenetlifylkesbiblackbaudcdn-edgestackhero-networkinggroupowiathletajimabaria-vungtaudiopsysharpigboatshawilliamhillfirenzefirestonefireweblikes-piedmontravelersinsurancefirmdalegalleryfishingoldpoint2thisamitsukefitjarfitnessettsurugiminamimakis-a-catererfjalerfkatsushikabeebyteappilottonsberguovdageaidnunjargausdalflekkefjordyndns-workservep2phxn--1lqs71dyndns-remotewdyndns-picserveminecraftransporteflesbergushikamifuranorthflankatsuyamashikokuchuoflickragerokunohealthcareershellflierneflirfloginlinefloppythonanywherealtorfloraflorencefloripalmasfjordenfloristanohatajiris-a-celticsfanfloromskogxn--2m4a15eflowershimokitayamafltravinhlonganflynnhosting-clusterfncashgabadaddjabbottoyourafndyndns1fnwkzfolldalfoolfor-ourfor-somegurownproviderfor-theaterfordebianforexrotheworkpccwinbar0emmafann-arborlandd-dnsiskinkyowariasahikawarszawashtenawsmppl-wawsglobalacceleratorahimeshimakanegasakievennodebalancern4t3l3p0rtatarantours3-ap-northeast-123minsidaarborteaches-yogano-ipifony-123miwebaccelastx4432-b-datacenterprisesakijobservableusercontentateshinanomachintaifun-dnsdojournalistoloseyouriparisor-fronavuotnarashinoharaetnabudejjunipereggio-emilia-romagnaroyboltateyamajureggiocalabriakrehamnayoro0o0forgotdnshimonitayanagithubpreviewsaikisarazure-mobileirfjordynnservepicservequakeforli-cesena-forlicesenaforlillehammerfeste-ipimientaketomisatoolshimonosekikawaforsalegoismailillesandefjordynservebbservesarcasmileforsandasuolodingenfortalfortefosneshimosuwalkis-a-chefashionstorebaseljordyndns-serverisignfotrdynulvikatowicefoxn--2scrj9casinordlandurbanamexnetgamersapporomurafozfr-1fr-par-1fr-par-2franamizuhoboleslawiecommerce-shoppingyeongnamdinhachijohanamakisofukushimaoris-a-conservativegarsheiheijis-a-cparachutingfredrikstadynv6freedesktopazimuthaibinhphuocelotenkawakayamagnetcieszynh-servebeero-stageiseiroumugifuchungbukharag-cloud-championshiphoplixn--30rr7yfreemyiphosteurovisionredumbrellangevagrigentobishimadridvagsoygardenebakkeshibechambagricoharugbydgoszczecin-berlindasdaburfreesitefreetlshimotsukefreisennankokubunjis-a-cubicle-slavellinodeobjectshimotsumafrenchkisshikindleikangerfreseniushinichinanfriuli-v-giuliafriuli-ve-giuliafriuli-vegiuliafriuli-venezia-giuliafriuli-veneziagiuliafriuli-vgiuliafriuliv-giuliafriulive-giuliafriulivegiuliafriulivenezia-giuliafriuliveneziagiuliafriulivgiuliafrlfroganshinjotelulubin-vpncateringebunkyonanaoshimamateramockashiwarafrognfrolandynvpnpluservicesevastopolitiendafrom-akamaized-stagingfrom-alfrom-arfrom-azurewebsiteshikagamiishibuyabukihokuizumobaragusabaerobaticketshinjukuleuvenicefrom-campobassociatest-iserveblogsytenrissadistdlibestadultrentin-sudtirolfrom-coachaseljeducationcillahppiacenzaganfrom-ctrentin-sued-tirolfrom-dcatfooddagestangefrom-decagliarikuzentakataikillfrom-flapymntrentin-suedtirolfrom-gap-east-1from-higashiagatsumagoianiafrom-iafrom-idyroyrvikingulenfrom-ilfrom-in-the-bandairtelebitbridgestonemurorangecloudplatform0from-kshinkamigototalfrom-kyfrom-langsonyantakahamalselveruminamiminowafrom-malvikaufentigerfrom-mdfrom-mein-vigorlicefrom-mifunefrom-mnfrom-modshinshinotsurgeryfrom-mshinshirofrom-mtnfrom-ncatholicurus-4from-ndfrom-nefrom-nhs-heilbronnoysundfrom-njshintokushimafrom-nminamioguni5from-nvalledaostargithubusercontentrentino-a-adigefrom-nycaxiaskvollpagesardegnarutolgaulardalvivanovoldafrom-ohdancefrom-okegawassamukawataris-a-democratrentino-aadigefrom-orfrom-panasonichernovtsykkylvenneslaskerrylogisticsardiniafrom-pratohmamurogawatsonrenderfrom-ris-a-designerimarugame-hostyhostingfrom-schmidtre-gauldalfrom-sdfrom-tnfrom-txn--32vp30hachinoheavyfrom-utsiracusagaeroclubmedecin-addrammenuorodoyerfrom-val-daostavalleyfrom-vtrentino-alto-adigefrom-wafrom-wiardwebthingsjcbnpparibashkiriafrom-wvallee-aosteroyfrom-wyfrosinonefrostabackplaneapplebesbyengerdalp1froyal-commissionfruskydivingfujiiderafujikawaguchikonefujiminokamoenairtrafficplexus-2fujinomiyadapliefujiokazakinkobearalvahkikonaibetsubame-south-1fujisatoshoeshintomikasaharafujisawafujishiroishidakabiratoridediboxn--3bst00minamisanrikubetsupportrentino-altoadigefujitsuruokakamigaharafujiyoshidappnodearthainguyenfukayabeardubaikawagoefukuchiyamadatsunanjoburgfukudomigawafukuis-a-doctorfukumitsubishigakirkeneshinyoshitomiokamisatokamachippubetsuikitchenfukuokakegawafukuroishikariwakunigamigrationfukusakirovogradoyfukuyamagatakaharunusualpersonfunabashiriuchinadattorelayfunagatakahashimamakiryuohkurafunahashikamiamakusatsumasendaisenergyeongginowaniihamatamakinoharafundfunkfeuerfuoiskujukuriyamandalfuosskoczowindowskrakowinefurubirafurudonordreisa-hockeynutwentertainmentrentino-s-tirolfurukawajimangolffanshiojirishirifujiedafusoctrangfussagamiharafutabayamaguchinomihachimanagementrentino-stirolfutboldlygoingnowhere-for-more-og-romsdalfuttsurutashinais-a-financialadvisor-aurdalfuturecmshioyamelhushirahamatonbetsurnadalfuturehostingfuturemailingfvghakuis-a-gurunzenhakusandnessjoenhaldenhalfmoonscalebookinghostedpictetrentino-sud-tirolhalsakakinokiaham-radio-opinbar1hamburghammarfeastasiahamurakamigoris-a-hard-workershiraokamisunagawahanamigawahanawahandavvesiidanangodaddyn-o-saurealestatefarmerseinehandcrafteducatorprojectrentino-sudtirolhangglidinghangoutrentino-sued-tirolhannannestadhannosegawahanoipinkazohanyuzenhappouzshiratakahagianghasamap-northeast-3hasaminami-alpshishikuis-a-hunterhashbanghasudazaifudaigodogadobeioruntimedio-campidano-mediocampidanomediohasura-appinokokamikoaniikappudopaashisogndalhasvikazteleportrentino-suedtirolhatogayahoooshikamagayaitakamoriokakudamatsuehatoyamazakitahiroshimarcheapartmentshisuifuettertdasnetzhatsukaichikaiseiyoichipshitaramahattfjelldalhayashimamotobusells-for-lesshizukuishimoichilloutsystemscloudsitehazuminobushibukawahelplfinancialhelsinkitakamiizumisanofidonnakamurataitogliattinnhemneshizuokamitondabayashiogamagoriziahemsedalhepforgeblockshoujis-a-knightpointtokaizukamaishikshacknetrentinoa-adigehetemlbfanhigashichichibuzentsujiiehigashihiroshimanehigashiizumozakitakatakanabeautychyattorneyagawakkanaioirasebastopoleangaviikadenagahamaroyhigashikagawahigashikagurasoedahigashikawakitaaikitakyushunantankazunovecorebungoonow-dnshowahigashikurumeinforumzhigashimatsushimarnardalhigashimatsuyamakitaakitadaitoigawahigashimurayamamotorcycleshowtimeloyhigashinarusells-for-uhigashinehigashiomitamanoshiroomghigashiosakasayamanakakogawahigashishirakawamatakanezawahigashisumiyoshikawaminamiaikitamihamadahigashitsunospamproxyhigashiurausukitamotosunnydayhigashiyamatokoriyamanashiibaclieu-1higashiyodogawahigashiyoshinogaris-a-landscaperspectakasakitanakagusukumoldeliveryhippyhiraizumisatohokkaidontexistmein-iservschulecznakaniikawatanagurahirakatashinagawahiranais-a-lawyerhirarahiratsukaeruhirayaizuwakamatsubushikusakadogawahitachiomiyaginozawaonsensiositehitachiotaketakaokalmykiahitraeumtgeradegreehjartdalhjelmelandholyhomegoodshwinnersiiitesilkddiamondsimple-urlhomeipioneerhomelinkyard-cloudjiffyresdalhomelinuxn--3ds443ghomeofficehomesecuritymacaparecidahomesecuritypchiryukyuragiizehomesenseeringhomeskleppippugliahomeunixn--3e0b707ehondahonjyoitakarazukaluganskfh-muensterhornindalhorsells-itrentinoaadigehortendofinternet-dnsimplesitehospitalhotelwithflightsirdalhotmailhoyangerhoylandetakasagooglecodespotrentinoalto-adigehungyenhurdalhurumajis-a-liberalhyllestadhyogoris-a-libertarianhyugawarahyundaiwafuneis-very-evillasalleitungsenis-very-goodyearis-very-niceis-very-sweetpepperugiais-with-thebandoomdnstraceisk01isk02jenv-arubacninhbinhdinhktistoryjeonnamegawajetztrentinostiroljevnakerjewelryjgorajlljls-sto1jls-sto2jls-sto3jmpixolinodeusercontentrentinosud-tiroljnjcloud-ver-jpchitosetogitsuliguriajoyokaichibahcavuotnagaivuotnagaokakyotambabymilk3jozis-a-musicianjpnjprsolarvikhersonlanxessolundbeckhmelnitskiyamasoykosaigawakosakaerodromegalloabatobamaceratachikawafaicloudineencoreapigeekoseis-a-painterhostsolutionslupskhakassiakosheroykoshimizumakis-a-patsfankoshughesomakosugekotohiradomainstitutekotourakouhokumakogenkounosupersalevangerkouyamasudakouzushimatrixn--3pxu8khplaystation-cloudyclusterkozagawakozakis-a-personaltrainerkozowiosomnarviklabudhabikinokawachinaganoharamcocottekpnkppspbarcelonagawakepnord-odalwaysdatabaseballangenkainanaejrietisalatinabenogiehtavuoatnaamesjevuemielnombrendlyngen-rootaruibxos3-us-gov-west-1krasnikahokutokonamegatakatoris-a-photographerokussldkrasnodarkredstonekrelliankristiansandcatsoowitdkmpspawnextdirectrentinosudtirolkristiansundkrodsheradkrokstadelvaldaostavangerkropyvnytskyis-a-playershiftcryptonomichinomiyakekryminamiyamashirokawanabelaudnedalnkumamotoyamatsumaebashimofusakatakatsukis-a-republicanonoichinosekigaharakumanowtvaokumatorinokumejimatsumotofukekumenanyokkaichirurgiens-dentistes-en-francekundenkunisakis-a-rockstarachowicekunitachiaraisaijolsterkunitomigusukukis-a-socialistgstagekunneppubtlsopotrentinosued-tirolkuokgroupizzakurgankurobegetmyipirangalluplidlugolekagaminorddalkurogimimozaokinawashirosatochiokinoshimagentositempurlkuroisodegaurakuromatsunais-a-soxfankuronkurotakikawasakis-a-studentalkushirogawakustanais-a-teacherkassyncloudkusuppliesor-odalkutchanelkutnokuzumakis-a-techietipslzkvafjordkvalsundkvamsterdamnserverbaniakvanangenkvinesdalkvinnheradkviteseidatingkvitsoykwpspdnsor-varangermishimatsusakahogirlymisugitokorozawamitakeharamitourismartlabelingmitoyoakemiuramiyazurecontainerdpoliticaobangmiyotamatsukuris-an-actormjondalenmonzabrianzaramonzaebrianzamonzaedellabrianzamordoviamorenapolicemoriyamatsuuramoriyoshiminamiashigaramormonstermoroyamatsuzakis-an-actressmushcdn77-sslingmortgagemoscowithgoogleapiszmoseushimogosenmosjoenmoskenesorreisahayakawakamiichikawamisatottoris-an-anarchistjordalshalsenmossortlandmosviknx-serversusakiyosupabaseminemotegit-reposoruminanomoviemovimientokyotangotembaixadattowebhareidsbergmozilla-iotrentinosuedtirolmtranbytomaridagawalmartrentinsud-tirolmuikaminokawanishiaizubangemukoelnmunakatanemuosattemupkomatsushimassa-carrara-massacarraramassabuzzmurmanskomforbar2murotorcraftranakatombetsumy-gatewaymusashinodesakegawamuseumincomcastoripressorfoldmusicapetownnews-stagingmutsuzawamy-vigormy-wanggoupilemyactivedirectorymyamazeplaymyasustor-elvdalmycdmycloudnsoundcastorjdevcloudfunctionsokndalmydattolocalcertificationmyddnsgeekgalaxymydissentrentinsudtirolmydobissmarterthanyoumydrobofageometre-experts-comptablesowamydspectruminisitemyeffectrentinsued-tirolmyfastly-edgekey-stagingmyfirewalledreplittlestargardmyforuminterecifedextraspace-to-rentalstomakomaibaramyfritzmyftpaccesspeedpartnermyhome-servermyjinomykolaivencloud66mymailermymediapchoseikarugalsacemyokohamamatsudamypeplatformsharis-an-artistockholmestrandmypetsphinxn--41amyphotoshibajddarvodkafjordvaporcloudmypictureshinomypsxn--42c2d9amysecuritycamerakermyshopblockspjelkavikommunalforbundmyshopifymyspreadshopselectrentinsuedtirolmytabitordermythic-beastspydebergmytis-a-anarchistg-buildermytuleap-partnersquaresindevicenzamyvnchoshichikashukudoyamakeuppermywirecipescaracallypoivronpokerpokrovskommunepolkowicepoltavalle-aostavernpomorzeszowithyoutuberspacekitagawaponpesaro-urbino-pesarourbinopesaromasvuotnaritakurashikis-bykleclerchitachinakagawaltervistaipeigersundynamic-dnsarlpordenonepornporsangerporsangugeporsgrunnanpoznanpraxihuanprdprgmrprimetelprincipeprivatelinkomonowruzhgorodeoprivatizehealthinsuranceprofesionalprogressivegasrlpromonza-e-della-brianzaptokuyamatsushigepropertysnesrvarggatrevisogneprotectionprotonetroandindependent-inquest-a-la-masionprudentialpruszkowiwatsukiyonotaireserve-onlineprvcyonabarumbriaprzeworskogpunyufuelpupulawypussycatanzarowixsitepvhachirogatakahatakaishimojis-a-geekautokeinotteroypvtrogstadpwchowderpzqhadanorthwesternmutualqldqotoyohashimotoshimaqponiatowadaqslgbtroitskomorotsukagawaqualifioapplatter-applatterplcube-serverquangngais-certifiedugit-pagespeedmobilizeroticaltanissettailscaleforcequangninhthuanquangtritonoshonais-foundationquickconnectromsakuragawaquicksytestreamlitapplumbingouvaresearchitectesrhtrentoyonakagyokutoyakomakizunokunimimatakasugais-an-engineeringquipelementstrippertuscanytushungrytuvalle-daostamayukis-into-animeiwamizawatuxfamilytuyenquangbinhthuantwmailvestnesuzukis-gonevestre-slidreggio-calabriavestre-totennishiawakuravestvagoyvevelstadvibo-valentiaavibovalentiavideovinhphuchromedicinagatorogerssarufutsunomiyawakasaikaitakokonoevinnicarbonia-iglesias-carboniaiglesiascarboniavinnytsiavipsinaapplurinacionalvirginanmokurennebuvirtual-userveexchangevirtualservervirtualuserveftpodhalevisakurais-into-carsnoasakuholeckodairaviterboliviajessheimmobilienvivianvivoryvixn--45br5cylvlaanderennesoyvladikavkazimierz-dolnyvladimirvlogintoyonezawavmintsorocabalashovhachiojiyahikobierzycevologdanskoninjambylvolvolkswagencyouvolyngdalvoorlopervossevangenvotevotingvotoyonovps-hostrowiechungnamdalseidfjordynathomebuiltwithdarkhangelskypecorittogojomeetoystre-slidrettozawawmemergencyahabackdropalermochizukikirarahkkeravjuwmflabsvalbardunloppadualstackomvuxn--3hcrj9chonanbuskerudynamisches-dnsarpsborgripeeweeklylotterywoodsidellogliastradingworse-thanhphohochiminhadselbuyshouseshirakolobrzegersundongthapmircloudletshiranukamishihorowowloclawekonskowolawawpdevcloudwpenginepoweredwphostedmailwpmucdnipropetrovskygearappodlasiellaknoluoktagajobojis-an-entertainerwpmudevcdnaccessojamparaglidingwritesthisblogoipodzonewroclawmcloudwsseoullensvanguardianwtcp4wtfastlylbanzaicloudappspotagereporthruherecreationinomiyakonojorpelandigickarasjohkameyamatotakadawuozuerichardlillywzmiuwajimaxn--4it797konsulatrobeepsondriobranconagareyamaizuruhrxn--4pvxs4allxn--54b7fta0ccistrondheimpertrixcdn77-secureadymadealstahaugesunderxn--55qw42gxn--55qx5dxn--5dbhl8dxn--5js045dxn--5rtp49citadelhichisochimkentozsdell-ogliastraderxn--5rtq34kontuminamiuonumatsunoxn--5su34j936bgsgxn--5tzm5gxn--6btw5axn--6frz82gxn--6orx2rxn--6qq986b3xlxn--7t0a264citicarrdrobakamaiorigin-stagingmxn--12co0c3b4evalleaostaobaomoriguchiharaffleentrycloudflare-ipfstcgroupaaskimitsubatamibulsan-suedtirolkuszczytnoopscbgrimstadrrxn--80aaa0cvacationsvchoyodobashichinohealth-carereforminamidaitomanaustdalxn--80adxhksveioxn--80ao21axn--80aqecdr1axn--80asehdbarclaycards3-us-west-1xn--80aswgxn--80aukraanghkeliwebpaaskoyabeagleboardxn--8dbq2axn--8ltr62konyvelohmusashimurayamassivegridxn--8pvr4uxn--8y0a063axn--90a1affinitylotterybnikeisencowayxn--90a3academiamicable-modemoneyxn--90aeroportsinfolionetworkangerxn--90aishobaraxn--90amckinseyxn--90azhytomyrxn--9dbq2axn--9et52uxn--9krt00axn--andy-iraxn--aroport-byanagawaxn--asky-iraxn--aurskog-hland-jnbarclays3-us-west-2xn--avery-yuasakurastoragexn--b-5gaxn--b4w605ferdxn--balsan-sdtirol-nsbsvelvikongsbergxn--bck1b9a5dre4civilaviationfabricafederation-webredirectmediatechnologyeongbukashiwazakiyosembokutamamuraxn--bdddj-mrabdxn--bearalvhki-y4axn--berlevg-jxaxn--bhcavuotna-s4axn--bhccavuotna-k7axn--bidr-5nachikatsuuraxn--bievt-0qa2xn--bjarky-fyanaizuxn--bjddar-ptarumizusawaxn--blt-elabcienciamallamaceiobbcn-north-1xn--bmlo-graingerxn--bod-2natalxn--bozen-sdtirol-2obanazawaxn--brnny-wuacademy-firewall-gatewayxn--brnnysund-m8accident-investigation-aptibleadpagesquare7xn--brum-voagatrustkanazawaxn--btsfjord-9zaxn--bulsan-sdtirol-nsbarefootballooningjovikarasjoketokashikiyokawaraxn--c1avgxn--c2br7gxn--c3s14misakis-a-therapistoiaxn--cck2b3baremetalombardyn-vpndns3-website-ap-northeast-1xn--cckwcxetdxn--cesena-forl-mcbremangerxn--cesenaforl-i8axn--cg4bkis-into-cartoonsokamitsuexn--ciqpnxn--clchc0ea0b2g2a9gcdxn--czr694bargainstantcloudfrontdoorestauranthuathienhuebinordre-landiherokuapparochernigovernmentjeldsundiscordsays3-website-ap-southeast-1xn--czrs0trvaroyxn--czru2dxn--czrw28barrel-of-knowledgeapplinziitatebayashijonawatebizenakanojoetsumomodellinglassnillfjordiscordsezgoraxn--d1acj3barrell-of-knowledgecomputermezproxyzgorzeleccoffeedbackanagawarmiastalowa-wolayangroupars3-website-ap-southeast-2xn--d1alfaststacksevenassigdalxn--d1atrysiljanxn--d5qv7z876clanbibaiduckdnsaseboknowsitallxn--davvenjrga-y4axn--djrs72d6uyxn--djty4koobindalxn--dnna-grajewolterskluwerxn--drbak-wuaxn--dyry-iraxn--e1a4cldmail-boxaxn--eckvdtc9dxn--efvn9svn-repostuff-4-salexn--efvy88haebaruericssongdalenviknaklodzkochikushinonsenasakuchinotsuchiurakawaxn--ehqz56nxn--elqq16hagakhanhhoabinhduongxn--eveni-0qa01gaxn--f6qx53axn--fct429kooris-a-nascarfanxn--fhbeiarnxn--finny-yuaxn--fiq228c5hsbcleverappsassarinuyamashinazawaxn--fiq64barsycenterprisecloudcontrolappgafanquangnamasteigenoamishirasatochigifts3-website-eu-west-1xn--fiqs8swidnicaravanylvenetogakushimotoganexn--fiqz9swidnikitagatakkomaganexn--fjord-lraxn--fjq720axn--fl-ziaxn--flor-jraxn--flw351exn--forl-cesena-fcbsswiebodzindependent-commissionxn--forlcesena-c8axn--fpcrj9c3dxn--frde-granexn--frna-woaxn--frya-hraxn--fzc2c9e2clickrisinglesjaguarxn--fzys8d69uvgmailxn--g2xx48clinicasacampinagrandebungotakadaemongolianishitosashimizunaminamiawajikintuitoyotsukaidownloadrudtvsaogoncapooguyxn--gckr3f0fastvps-serveronakanotoddenxn--gecrj9cliniquedaklakasamatsudoesntexisteingeekasserversicherungroks-theatrentin-sud-tirolxn--ggaviika-8ya47hagebostadxn--gildeskl-g0axn--givuotna-8yandexcloudxn--gjvik-wuaxn--gk3at1exn--gls-elacaixaxn--gmq050is-into-gamessinamsosnowieconomiasadojin-dslattuminamitanexn--gmqw5axn--gnstigbestellen-zvbrplsbxn--45brj9churcharterxn--gnstigliefern-wobihirosakikamijimayfirstorfjordxn--h-2failxn--h1ahnxn--h1alizxn--h2breg3eveneswinoujsciencexn--h2brj9c8clothingdustdatadetectrani-andria-barletta-trani-andriaxn--h3cuzk1dienbienxn--hbmer-xqaxn--hcesuolo-7ya35barsyonlinehimejiiyamanouchikujoinvilleirvikarasuyamashikemrevistathellequipmentjmaxxxjavald-aostatics3-website-sa-east-1xn--hebda8basicserversejny-2xn--hery-iraxn--hgebostad-g3axn--hkkinen-5waxn--hmmrfeasta-s4accident-prevention-k3swisstufftoread-booksnestudioxn--hnefoss-q1axn--hobl-iraxn--holtlen-hxaxn--hpmir-xqaxn--hxt814exn--hyanger-q1axn--hylandet-54axn--i1b6b1a6a2exn--imr513nxn--indery-fyaotsusonoxn--io0a7is-leetrentinoaltoadigexn--j1adpohlxn--j1aefauskedsmokorsetagayaseralingenovaraxn--j1ael8basilicataniaxn--j1amhaibarakisosakitahatakamatsukawaxn--j6w193gxn--jlq480n2rgxn--jlster-byasakaiminatoyookananiimiharuxn--jrpeland-54axn--jvr189misasaguris-an-accountantsmolaquilaocais-a-linux-useranishiaritabashikaoizumizakitashiobaraxn--k7yn95exn--karmy-yuaxn--kbrq7oxn--kcrx77d1x4axn--kfjord-iuaxn--klbu-woaxn--klt787dxn--kltp7dxn--kltx9axn--klty5xn--45q11circlerkstagentsasayamaxn--koluokta-7ya57haiduongxn--kprw13dxn--kpry57dxn--kput3is-lostre-toteneis-a-llamarumorimachidaxn--krager-gyasugitlabbvieeexn--kranghke-b0axn--krdsherad-m8axn--krehamn-dxaxn--krjohka-hwab49jdfastly-terrariuminamiiseharaxn--ksnes-uuaxn--kvfjord-nxaxn--kvitsy-fyasuokanmakiwakuratexn--kvnangen-k0axn--l-1fairwindsynology-diskstationxn--l1accentureklamborghinikkofuefukihabororosynology-dsuzakadnsaliastudynaliastrynxn--laheadju-7yatominamibosoftwarendalenugxn--langevg-jxaxn--lcvr32dxn--ldingen-q1axn--leagaviika-52basketballfinanzjaworznoticeableksvikaratsuginamikatagamilanotogawaxn--lesund-huaxn--lgbbat1ad8jejuxn--lgrd-poacctulaspeziaxn--lhppi-xqaxn--linds-pramericanexpresservegame-serverxn--loabt-0qaxn--lrdal-sraxn--lrenskog-54axn--lt-liacn-northwest-1xn--lten-granvindafjordxn--lury-iraxn--m3ch0j3axn--mely-iraxn--merker-kuaxn--mgb2ddesxn--mgb9awbfbsbxn--1qqw23axn--mgba3a3ejtunesuzukamogawaxn--mgba3a4f16axn--mgba3a4fra1-deloittexn--mgba7c0bbn0axn--mgbaakc7dvfsxn--mgbaam7a8haiphongonnakatsugawaxn--mgbab2bdxn--mgbah1a3hjkrdxn--mgbai9a5eva00batsfjordiscountry-snowplowiczeladzlgleezeu-2xn--mgbai9azgqp6jelasticbeanstalkharkovalleeaostexn--mgbayh7gparasitexn--mgbbh1a71exn--mgbc0a9azcgxn--mgbca7dzdoxn--mgbcpq6gpa1axn--mgberp4a5d4a87gxn--mgberp4a5d4arxn--mgbgu82axn--mgbi4ecexposedxn--mgbpl2fhskopervikhmelnytskyivalleedaostexn--mgbqly7c0a67fbcngroks-thisayamanobeatsaudaxn--mgbqly7cvafricargoboavistanbulsan-sudtirolxn--mgbt3dhdxn--mgbtf8flatangerxn--mgbtx2bauhauspostman-echofunatoriginstances3-website-us-east-1xn--mgbx4cd0abkhaziaxn--mix082fbx-osewienxn--mix891fbxosexyxn--mjndalen-64axn--mk0axindependent-inquiryxn--mk1bu44cnpyatigorskjervoyagexn--mkru45is-not-certifiedxn--mlatvuopmi-s4axn--mli-tlavagiskexn--mlselv-iuaxn--moreke-juaxn--mori-qsakuratanxn--mosjen-eyatsukannamihokksundxn--mot-tlavangenxn--mre-og-romsdal-qqbuservecounterstrikexn--msy-ula0hair-surveillancexn--mtta-vrjjat-k7aflakstadaokayamazonaws-cloud9guacuiababybluebiteckidsmynasushiobaracingrok-freeddnsfreebox-osascoli-picenogatabuseating-organicbcgjerdrumcprequalifymelbourneasypanelblagrarq-authgear-stagingjerstadeltaishinomakilovecollegefantasyleaguenoharauthgearappspacehosted-by-previderehabmereitattoolforgerockyombolzano-altoadigeorgeorgiauthordalandroideporteatonamidorivnebetsukubankanumazuryomitanocparmautocodebergamoarekembuchikumagayagawafflecelloisirs3-external-180reggioemiliaromagnarusawaustrheimbalsan-sudtirolivingitpagexlivornobserveregruhostingivestbyglandroverhalladeskjakamaiedge-stagingivingjemnes3-eu-west-2038xn--muost-0qaxn--mxtq1misawaxn--ngbc5azdxn--ngbe9e0axn--ngbrxn--4dbgdty6ciscofreakamaihd-stagingriwataraindroppdalxn--nit225koryokamikawanehonbetsuwanouchikuhokuryugasakis-a-nursellsyourhomeftpiwatexn--nmesjevuemie-tcbalatinord-frontierxn--nnx388axn--nodessakurawebsozais-savedxn--nqv7fs00emaxn--nry-yla5gxn--ntso0iqx3axn--ntsq17gxn--nttery-byaeservehalflifeinsurancexn--nvuotna-hwaxn--nyqy26axn--o1achernivtsicilynxn--4dbrk0cexn--o3cw4hakatanortonkotsunndalxn--o3cyx2axn--od0algardxn--od0aq3beneventodayusuharaxn--ogbpf8fldrvelvetromsohuissier-justicexn--oppegrd-ixaxn--ostery-fyatsushiroxn--osyro-wuaxn--otu796dxn--p1acfedjeezxn--p1ais-slickharkivallee-d-aostexn--pgbs0dhlx3xn--porsgu-sta26fedorainfraclouderaxn--pssu33lxn--pssy2uxn--q7ce6axn--q9jyb4cnsauheradyndns-at-homedepotenzamamicrosoftbankasukabedzin-brbalsfjordietgoryoshiokanravocats3-fips-us-gov-west-1xn--qcka1pmcpenzapposxn--qqqt11misconfusedxn--qxa6axn--qxamunexus-3xn--rady-iraxn--rdal-poaxn--rde-ulazioxn--rdy-0nabaris-uberleetrentinos-tirolxn--rennesy-v1axn--rhkkervju-01afedorapeoplefrakkestadyndns-webhostingujogaszxn--rholt-mragowoltlab-democraciaxn--rhqv96gxn--rht27zxn--rht3dxn--rht61exn--risa-5naturalxn--risr-iraxn--rland-uuaxn--rlingen-mxaxn--rmskog-byawaraxn--rny31hakodatexn--rovu88bentleyusuitatamotorsitestinglitchernihivgubs3-website-us-west-1xn--rros-graphicsxn--rskog-uuaxn--rst-0naturbruksgymnxn--rsta-framercanvasxn--rvc1e0am3exn--ryken-vuaxn--ryrvik-byawatahamaxn--s-1faitheshopwarezzoxn--s9brj9cntraniandriabarlettatraniandriaxn--sandnessjen-ogbentrendhostingliwiceu-3xn--sandy-yuaxn--sdtirol-n2axn--seral-lraxn--ses554gxn--sgne-graphoxn--4gbriminiserverxn--skierv-utazurestaticappspaceusercontentunkongsvingerxn--skjervy-v1axn--skjk-soaxn--sknit-yqaxn--sknland-fxaxn--slat-5navigationxn--slt-elabogadobeaemcloud-fr1xn--smla-hraxn--smna-gratangenxn--snase-nraxn--sndre-land-0cbeppublishproxyuufcfanirasakindependent-panelomonza-brianzaporizhzhedmarkarelianceu-4xn--snes-poaxn--snsa-roaxn--sr-aurdal-l8axn--sr-fron-q1axn--sr-odal-q1axn--sr-varanger-ggbeskidyn-ip24xn--srfold-byaxn--srreisa-q1axn--srum-gratis-a-bloggerxn--stfold-9xaxn--stjrdal-s1axn--stjrdalshalsen-sqbestbuyshoparenagasakikuchikuseihicampinashikiminohostfoldnavyuzawaxn--stre-toten-zcbetainaboxfuselfipartindependent-reviewegroweibolognagasukeu-north-1xn--t60b56axn--tckweddingxn--tiq49xqyjelenia-goraxn--tjme-hraxn--tn0agrocerydxn--tnsberg-q1axn--tor131oxn--trany-yuaxn--trentin-sd-tirol-rzbhzc66xn--trentin-sdtirol-7vbialystokkeymachineu-south-1xn--trentino-sd-tirol-c3bielawakuyachimataharanzanishiazaindielddanuorrindigenamerikawauevje-og-hornnes3-website-us-west-2xn--trentino-sdtirol-szbiella-speziaxn--trentinosd-tirol-rzbieszczadygeyachiyodaeguamfamscompute-1xn--trentinosdtirol-7vbievat-band-campaignieznoorstaplesakyotanabellunordeste-idclkarlsoyxn--trentinsd-tirol-6vbifukagawalbrzycharitydalomzaporizhzhiaxn--trentinsdtirol-nsbigv-infolkebiblegnicalvinklein-butterhcloudiscoursesalangenishigotpantheonsitexn--trgstad-r1axn--trna-woaxn--troms-zuaxn--tysvr-vraxn--uc0atventuresinstagingxn--uc0ay4axn--uist22hakonexn--uisz3gxn--unjrga-rtashkenturindalxn--unup4yxn--uuwu58axn--vads-jraxn--valle-aoste-ebbturystykaneyamazoexn--valle-d-aoste-ehboehringerikexn--valleaoste-e7axn--valledaoste-ebbvadsoccertmgreaterxn--vard-jraxn--vegrshei-c0axn--vermgensberater-ctb-hostingxn--vermgensberatung-pwbiharstadotsubetsugarulezajskiervaksdalondonetskarmoyxn--vestvgy-ixa6oxn--vg-yiabruzzombieidskogasawarackmazerbaijan-mayenbaidarmeniaxn--vgan-qoaxn--vgsy-qoa0jellybeanxn--vgu402coguchikuzenishiwakinvestmentsaveincloudyndns-at-workisboringsakershusrcfdyndns-blogsitexn--vhquvestfoldxn--vler-qoaxn--vre-eiker-k8axn--vrggt-xqadxn--vry-yla5gxn--vuq861bihoronobeokagakikugawalesundiscoverdalondrinaplesknsalon-1xn--w4r85el8fhu5dnraxn--w4rs40lxn--wcvs22dxn--wgbh1communexn--wgbl6axn--xhq521bikedaejeonbuk0xn--xkc2al3hye2axn--xkc2dl3a5ee0hakubackyardshiraois-a-greenxn--y9a3aquarelleasingxn--yer-znavois-very-badxn--yfro4i67oxn--ygarden-p1axn--ygbi2ammxn--4it168dxn--ystre-slidre-ujbiofficialorenskoglobodoes-itcouldbeworldishangrilamdongnairkitapps-audibleasecuritytacticsxn--0trq7p7nnishiharaxn--zbx025dxn--zf0ao64axn--zf0avxlxn--zfr164bipartsaloonishiizunazukindustriaxnbayernxz
\ No newline at end of file
+bolzano-altoadigevje-og-hornnes3-website-us-west-2bomlocustomer-ocienciabonavstackarasjoketokuyamashikokuchuobondigitaloceanspacesakurastoragextraspace-to-rentalstomakomaibarabonesakuratanishikatakazakindustriesteinkjerepbodynaliasnesoddeno-staginglobodoes-itcouldbeworfarsundiskussionsbereichateblobanazawarszawashtenawsapprunnerdpoliticaarparliamenthickarasuyamasoybookonlineboomladeskierniewiceboschristmasakilovecollegefantasyleaguedagestangebostik-serveronagasukeyword-oncillahppictetcieszynishikatsuragit-repostre-totendofinternet-dnsakurawebredirectmeiwamizawabostonakijinsekikogentlentapisa-geekaratsuginamikatagamimozaporizhzhegurinfinitigooglecode-builder-stg-buildereporthruhereclaimsakyotanabellunord-odalvdalcest-le-patron-k3salangenishikawazukamishihorobotdashgabadaddjabbotthuathienhuebouncemerckmsdscloudisrechtrafficplexus-4boutiquebecologialaichaugianglogowegroweibolognagasakikugawaltervistaikillondonetskarelianceboutireserve-onlineboyfriendoftheinternetflixn--11b4c3ditchyouriparmabozen-sudtirolondrinaplesknsalatrobeneventoeidsvollorenskogloomy-gatewaybozen-suedtirolovableprojectjeldsundivtasvuodnakamai-stagingloppennebplaceditorxn--12c1fe0bradescotaruinternationalovepoparochernihivgubamblebtimnetzjaworznotebook-fips3-fips-us-gov-east-1brandivttasvuotnakamuratajirintlon-2brasiliadboxoslodingenishimerabravendbarcelonagawakuyabukikiraragusabaerobatickets3-fips-us-gov-west-1bresciaogashimadachicappabianiceobridgestonebrindisiciliabroadwaybroke-itvedestrandixn--12cfi8ixb8lovesickarlsoybrokerevistathellebrothermesserlidlplfinancialpusercontentjmaxxxn--12co0c3b4evalleaostargets-itjomeldalucaniabrumunddaluccampobassociatesalon-1brusselsaloonishinomiyashironobryanskiervadsoccerhcloudyclusterbrynebweirbzhitomirumaintenanceclothingdustdatadetectoyouracngovtoystre-slidrettozawacnpyatigorskjakamaiedge-stagingreatercnsapporocntozsdeliverycodebergrayjayleaguesardegnarutoshimatta-varjjatranatalcodespotenzakopanecoffeedbackanagawatsonrendercommunity-prochowicecomockashiharacompanyantaishinomakimobetsulifestylefrakkestadurumisakindlegnicahcesuolohmusashimurayamaizuruhr-uni-bochuminamiechizenisshingucciminamifuranocomparemarkerryhotelsardiniacomputercomsecretrosnubarclays3-me-south-1condoshiibabymilk3conferenceconstructioniyodogawaconsuladobeio-static-accesscamdvrcampaniaconsultantranbyconsultingretakamoriokakudamatsuecontactivetrail-central-1contagematsubaracontractorstabacgiangiangryconvexecute-apictureshinordkappaviacookingrimstadynathomebuiltwithdarklangevagrarchitectestingripeeweeklylotterycooperativano-frankivskjervoyagecoprofesionalchikugodaddyn-o-saureadymadethis-a-anarchistjordalshalsenl-ams-1corsicafederationfabricable-modemoneycosenzamamidorivnecosidnsdojoburgriwataraindroppdalcouchpotatofriesarlcouncilcouponstackitagawacozoracpservernamegataitogodoesntexisteingeekashiwaracqcxn--1lqs71dyndns-at-homedepotrani-andria-barletta-trani-andriacrankyotobetsulubin-dsldyndns-at-workisboringsakershusrcfdyndns-blogsiteleaf-south-1crdyndns-freeboxosarpsborgroks-theatrentin-sud-tirolcreditcardyndns-homednsarufutsunomiyawakasaikaitakokonoecreditunioncremonasharis-a-bulls-fancrewp2cricketnedalcrimeast-kazakhstanangercrispawnextdirectraniandriabarlettatraniandriacrminamiiseharacrotonecrownipfizercrsasayamacruisesaseboknowsitallcryptonomichiharacuisinellamdongnairflowersassaris-a-candidatecuneocuritibackdropalermobarag-cloud-charitydalp1cutegirlfriendyndns-ipgwangjulvikashiwazakizunokuniminamiashigarafedoraprojectransiphdfcbankasserverrankoshigayakagefeirafembetsukubankasukabeautypedreamhosterscrapper-sitefermodalenferraraferraris-a-celticsfanferreroticallynxn--2scrj9cargoboavistanbulsan-sudtiroluhanskarmoyfetsundyndns-remotewdhlx3fgroundhandlingroznyfhvalerfilegear-sg-1filminamiminowafinalfinancefinnoyfirebaseapphilipscrappingrphonefosscryptedyndns-serverdalfirenetgamerscrysecuritytacticscwestus2firenzeaburfirestonefirmdaleilaocairportranslatedyndns-webhareidsbergroks-thisayamanobearalvahkikonaikawachinaganoharamcoachampionshiphoplixn--1qqw23afishingokasellfyresdalfitjarfitnessettsurugashimamurogawafjalerfkasumigaurayasudaflesbergrueflickragerotikagoshimandalflierneflirflogintohmangoldpoint2thisamitsukefloppymntransportefloraclegovcloudappservehttpbincheonflorencefloripadualstackasuyakumoduminamioguni5floristanohatakaharunservehumourfloromskoguidefinimalopolskanittedalfltransurlflutterflowhitesnowflakeflyfncarrdiyfndyndns-wikinkobayashimofusadojin-the-bandairlinemurorangecloudplatformshakotanpachihayaakasakawaharacingrondarfoolfor-ourfor-somedusajserveircasacampinagrandebulsan-suedtirolukowesleyfor-theaterfordebianforexrotheworkpccwhminamisanrikubetsupersaleksvikaszubytemarketingvollforgotdnserveminecraftrapanikkoelnforli-cesena-forlicesenaforlikescandypopensocialforsalesforceforsandasuoloisirservemp3fortalfosneservep2photographysiofotravelersinsurancefoxn--30rr7yfozfr-1fr-par-1fr-par-2franalytics-gatewayfredrikstadyndns-worksauheradyndns-mailfreedesktopazimuthaibinhphuocprapidyndns1freemyiphostyhostinguitarservepicservequakefreesitefreetlservesarcasmilefreightravinhlonganfrenchkisshikirovogradoyfreseniuservicebuskerudynnsaveincloudyndns-office-on-the-webflowtest-iservebloginlinefriuli-v-giuliarafriuli-ve-giuliafriuli-vegiuliafriuli-venezia-giuliafriuli-veneziagiuliafriuli-vgiuliafriuliv-giuliafriulive-giuliafriulivegiuliafriulivenezia-giuliafriuliveneziagiuliafriulivgiuliafrlfrogansevastopolitiendafrognfrolandynservebbsaves-the-whalessandria-trani-barletta-andriatranibarlettaandriafrom-akamaiorigin-stagingujaratmetacentruminamitanefrom-alfrom-arfrom-azureedgecompute-1from-caltanissettainaircraftraeumtgeradealstahaugesunderfrom-cockpitrdynuniversitysvardofrom-ctrentin-sudtirolfrom-dcasertaipeigersundnparsaltdaluroyfrom-decafjsevenassieradzfrom-flatangerfrom-gap-southeast-3from-higashiagatsumagoianiafrom-iafrom-idynv6from-ilfrom-in-vpncashorokanaiefrom-ksewhoswholidayfrom-kyfrom-langsonyatomigrationfrom-mangyshlakamaized-stagingujohanamakinoharafrom-mdynvpnplusavonarviikamisatokonamerikawauefrom-meetrentin-sued-tirolfrom-mihamadanangoguchilloutsystemscloudscalebookinghosteurodirfrom-mnfrom-modellingulenfrom-msexyfrom-mtnfrom-ncasinordeste-idclkarpaczest-a-la-maisondre-landray-dnsaludrayddns-ipartintuitjxn--1ck2e1barclaycards3-globalatinabelementorayomitanobservableusercontentateyamauth-fipstmninomiyakonojosoyrovnoticeableitungsenirasakibxos3-ca-central-180reggio-emilia-romagnaroyolasitebinordlandeus-canvasitebizenakanojogaszkolamericanfamilyds3-ap-south-12hparallelimodxboxeroxjavald-aostaticsxmitakeharaugustow-corp-staticblitzgorzeleccocotteatonamifunebetsuikirkenes3-ap-northeast-2ixn--0trq7p7nninjambylive-oninohekinanporovigonnakasatsunaibigawaukraanghkembuchikumagayagawakkanaibetsubame-central-123websitebuildersvp4from-ndyroyrvikingrongrossetouchijiwadedyn-berlincolnfrom-nefrom-nhlfanfrom-njsheezyfrom-nminamiuonumatsunofrom-nvalled-aostargithubusercontentrentin-suedtirolfrom-nysagamiharafrom-ohdancefrom-okegawafrom-orfrom-palmasfjordenfrom-pratohnoshookuwanakanotoddenfrom-ris-a-chefashionstorebaseljordyndns-picsbssaudafrom-schmidtre-gauldalfrom-sdfrom-tnfrom-txn--32vp30hachinoheavyfrom-utsiracusagemakerfrom-val-daostavalleyfrom-vtrentino-a-adigefrom-wafrom-wiardwebspaceconfigunmarnardalfrom-wvalledaostarnobrzeguovdageaidnunjargausdalfrom-wyfrosinonefrostalowa-wolawafroyal-commissionfruskydivingushikamifuranorth-kazakhstanfujiiderafujikawaguchikonefujiminokamoenairtelebitbucketrzynh-servebeero-stageiseiroutingthecloudfujinomiyadappnodearthainguyenfujiokazakiryuohkurafujisatoshoeshellfujisawafujishiroishidakabiratoridediboxafujitsuruokakamigaharafujiyoshidatsunanjoetsumidaklakasamatsudogadobeioruntimedicinakaiwanairforcentralus-1fukayabeagleboardfukuchiyamadattorelayfukudomigawafukuis-a-conservativefsnoasakakinokiafukumitsubishigakisarazure-apigeefukuokakegawafukuroishikariwakunigamiharuovatlassian-dev-builderfukusakishiwadattoweberlevagangaviikanonjis-a-cpanelfukuyamagatakahashimamakisofukushimaniwamannordre-landfunabashiriuchinadavvenjargamvikatowicefunagatakahatakaishimokawafunahashikamiamakusatsumasendaisenergyeonggiizefundfunkfeuerfunnelshimonitayanagitapphutholdingsmall-websozais-a-cubicle-slaveroykenfuoiskujukuriyamaoris-a-democratrentino-aadigefuosskodjeezfurubirafurudonordreisa-hockeynutwentertainmentrentino-alto-adigefurukawaiishoppingxn--3bst00minamiyamashirokawanabeepsondriobranconagarahkkeravjunusualpersonfusoctrangyeongnamdinhs-heilbronnoysundfussaikisosakitahatakamatsukawafutabayamaguchinomihachimanagementrentino-altoadigefutboldlygoingnowhere-for-more-og-romsdalfuttsurutashinairtrafficmanagerfuturecmshimonosekikawafuturehosting-clusterfuturemailingzfvghakuis-a-doctoruncontainershimotsukehakusandnessjoenhaldenhalfmoonscaleforcehalsaitamatsukuris-a-financialadvisor-aurdalham-radio-ophuyenhamburghammarfeastasiahamurakamigoris-a-fullstackaufentigerhanamigawahanawahandahandcraftedugit-pages-researchedmarketplacehangglidinghangoutrentino-s-tirolhannannestadhannoshiroomghanoiphxn--3ds443ghanyuzenhappoumuginowaniihamatamakawajimap-southeast-4hasamazoncognitoigawahasaminami-alpshimotsumahashbanghasudahasura-appigboatshinichinanhasvikautokeinotionhatenablogspotrentino-stirolhatenadiaryhatinhachiojiyachiyodazaifudaigojomedio-campidano-mediocampidanomediohatogayachtshinjournalistorfjordhatoyamazakitakatakanezawahatsukaichikawamisatohokkaidontexistmein-iservschulegalleryhattfjelldalhayashimamotobusells-for-lesshinjukuleuvenicehazuminobushibuyahabacninhbinhdinhktrentino-sud-tirolhelpgfoggiahelsinkitakyushunantankazohemneshinkamigotoyokawahemsedalhepforgeblockshinshinotsupplyhetemlbfanheyflowienhigashichichibuzzhigashihiroshimanehigashiizumozakitamihokksundhigashikagawahigashikagurasoedahigashikawakitaaikitamotosumy-routerhigashikurumegurownproviderhigashimatsushimarriottrentino-sudtirolhigashimatsuyamakitaakitadaitomanaustdalhigashimurayamamotorcycleshinshirohigashinarusells-for-uzhhorodhigashinehigashiomitamamurausukitanakagusukumodshintokushimahigashiosakasayamanakakogawahigashishirakawamatakaokalmykiahigashisumiyoshikawaminamiaikitashiobarahigashitsunospamproxyhigashiurawa-mazowszexposeducatorprojectrentino-sued-tirolhigashiyamatokoriyamanashijonawatehigashiyodogawahigashiyoshinogaris-a-geekazunotogawahippythonanywherealminanohiraizumisatokaizukaluganskddiamondshintomikasaharahirakatashinagawahiranais-a-goodyearhirarahiratsukagawahirayahikobeatshinyoshitomiokamisunagawahitachiomiyakehitachiotaketakarazukamaishimodatehitradinghjartdalhjelmelandholyhomegoodshiojirishirifujiedahomeipikehomelinuxn--3e0b707ehomesecuritymacaparecidahomesecuritypcateringebungotakadaptableclerc66116-balsfjordeltaiwanumatajimidsundeportebinatsukigatakahamalvik8s3-ap-northeast-3utilities-12charstadaokagakirunocelotenkawadlugolekadena4ufcfanimsiteasypanelblagrigentobishimafeloansncf-ipfstdlibestadultatarantoyonakagyokutoyonezawapartments3-ap-northeast-123webseiteckidsmynascloudfrontierimo-siemenscaledekaascolipicenoboribetsubsc-paywhirlimitedds3-accesspoint-fips3-ap-east-123miwebaccelastx4432-b-datacenterprisesakihokuizumoarekepnord-aurdalipaynow-dns-dynamic-dnsabruzzombieidskogasawarackmazerbaijan-mayenbaidarmeniajureggio-calabriaknoluoktagajoboji234lima-citychyattorneyagawafflecellclstagehirnayorobninsk123kotisivultrobjectselinogradimo-i-ranamizuhobby-siteaches-yogano-ip-ddnsgeekgalaxyzgierzgorakrehamnfshostrowwlkpnftstorage164-balsan-suedtirolillyokozeastus2000123paginawebadorsiteshikagamiishibechambagricoharugbydgoszczecin-addrammenuorogerscbgdyniaktyubinskaunicommuneustarostwodzislawdev-myqnapcloudflarecn-northwest-123sitewebcamauction-acornikonantotalimanowarudakunexus-2038homesenseeringhomeskleppilottottoris-a-greenhomeunixn--3hcrj9catfoodraydnsalvadorhondahonjyoitakasagonohejis-a-guruzshioyaltakkolobrzegersundongthapmircloudnshome-webservercelliguriahornindalhorsells-itrentino-suedtirolhorteneiheijis-a-hard-workershirahamatonbetsupportrentinoa-adigehospitalhotelwithflightshirakomaganehotmailhoyangerhoylandetakasakitaurahrsnillfjordhungyenhurdalhurumajis-a-hunterhyllestadhyogoris-a-knightpointtokashikitchenhypernodessaitokamachippubetsubetsugaruhyugawarahyundaiwafuneis-uberleetrentinoaltoadigeis-very-badis-very-evillasalleirvikharkovallee-d-aosteis-very-goodis-very-niceis-very-sweetpepperugiais-with-thebandoomdnsiskinkyowariasahikawaisk01isk02jellybeanjenv-arubahcavuotnagahamaroygardenflfanjeonnamsosnowiecaxiaskoyabenoopssejny-1jetztrentinos-tiroljevnakerjewelryjlljls-sto1jls-sto2jls-sto365jmpioneerjnjcloud-ver-jpcatholicurus-3joyentrentinostiroljoyokaichibahccavuotnagaivuotnagaokakyotambabybluebitemasekd1jozis-a-llamashikiwakuratejpmorgangwonjpnjprshoujis-a-musiciankoseis-a-painterhostsolutionshiraokamitsuekosheroykoshimizumakis-a-patsfankoshugheshwiiheyahoooshikamagayaitakashimarshallstatebankhplaystation-cloudsitekosugekotohiradomainsurealtypo3serverkotourakouhokumakogenkounosunnydaykouyamatlabcn-north-1kouzushimatrixn--41akozagawakozakis-a-personaltrainerkozowilliamhillkppspdnsigdalkrasnikahokutokyotangopocznore-og-uvdalkrasnodarkredumbrellapykrelliankristiansandcatsiiitesilklabudhabikinokawabajddarqhachirogatakanabeardubaioiraseekatsushikabedzin-brb-hostingkristiansundkrodsheradkrokstadelvaldaostavangerkropyvnytskyis-a-photographerokuappinkfh-muensterkrymisasaguris-a-playershiftrentinoaadigekumamotoyamatsumaebashimogosenkumanowtvalleedaostekumatorinokumejimatsumotofukekumenanyokkaichirurgiens-dentistes-en-francekundenkunisakis-a-republicanonoichinosekigaharakunitachiaraisaijorpelandkunitomigusukukis-a-rockstarachowicekunneppubtlsimple-urlkuokgroupiwatekurgankurobeebyteappleykurogiminamiawajikis-a-socialistockholmestrandkuroisodegaurakuromatsunais-a-soxfankuronkurotakikawasakis-a-studentalkushirogawakustanais-a-teacherkassyncloudkusupabaseminekutchanelkutnokuzumakis-a-techietis-a-liberalkvafjordkvalsundkvamfamplifyappchizip6kvanangenkvinesdalkvinnheradkviteseidatingkvitsoykwpspectrumisawamjondalenmonza-brianzapposirdalmonza-e-della-brianzaptonsbergmonzabrianzaramonzaebrianzamonzaedellabrianzamordoviamorenapolicemoriyamatsushigemoriyoshiminamibosoftwarendalenugmormonstermoroyamatsuuramortgagemoscowinbarrel-of-knowledgekey-stagingjerstadigickaracolognemrstudio-prodoyonagoyauthgearapps-1and1moseushimoichikuzenmosjoenmoskenesiskomakis-a-therapistoiamosslupskmpspbaremetalpha-myqnapcloudaccess3-sa-east-1mosviknx-serversicherungmotegirlymoviemovimientoolslzmtrainingmuikamiokameokameyamatotakadamukodairamunakatanemuosattemupixolinodeusercontentrentinosud-tirolmurmanskomatsushimasudamurotorcraftrentinosudtirolmusashinodesakatakayamatsuzakis-an-accountantshiratakahagiangmuseumisconfusedmusicanthoboleslawiecommerce-shopitsitevaksdalmutsuzawamutualmy-vigormy-wanggoupilemyactivedirectorymyaddrangedalmyamazeplaymyasustor-elvdalmycloudnasushiobaramydattolocalcertrentinosued-tirolmydbservermyddnskingmydissentrentinosuedtirolmydnsmolaquilarvikomforbargainstitutemp-dnswatches3-us-east-2mydobissmarterthanyoumydrobofageorgeorgiamydsmushcdn77-securecipescaracalculatorskenmyeffectrentinsud-tirolmyfastly-edgemyfirewalledreplittlestargardmyforumishimatsusakahoginozawaonsennanmokurennebuyshousesimplesitemyfritzmyftpaccessojampanasonichernovtsydneymyhome-servermyjinomykolaivencloud66mymailermymediapchiryukyuragifuchungbukharanzanishinoomotegoismailillehammerfeste-ipartsamegawamynetnamegawamyokohamamatsudamypepizzamypetsokananiimilanoticiassurfastly-terrariuminamiizukaminoyamaxunison-servicesaxomyphotoshibalena-devicesokndalmypiemontemypsxn--42c2d9amyrdbxn--45br5cylmysecuritycamerakermyshopblocksolardalmyshopifymyspreadshopselectrentinsudtirolmytabitordermythic-beastsolundbeckommunalforbundmytis-a-bloggermytuleap-partnersomamyvnchitachinakagawassamukawatarittogitsuldalutskartuzymywirebungoonoplurinacionalpmnpodhalepodlasiellakdnepropetrovskanlandpodzonepohlpoivronpokerpokrovskomonotteroypolkowicepoltavalle-aostavernpolyspacepomorzeszowindowsserveftplatter-appkommuneponpesaro-urbino-pesarourbinopesaromasvuotnaritakurashikis-an-actresshishikuis-a-libertarianpordenonepornporsangerporsangugeporsgrunnanpoznanpraxihuanprdprereleaseoullensakerprgmrprimetelprincipenzaprivatelinkyard-cloudletsomnarvikomorotsukaminokawanishiaizubangeprivatizehealthinsuranceprogressivegarsheiyufueliv-dnsoowinepromoliserniapropertysnesopotrentinsued-tirolprotectionprotonetrentinsuedtirolprudentialpruszkowinnersor-odalprvcyprzeworskogpunyukis-an-anarchistoloseyouripinokofuefukihabororoshisogndalpupulawypussycatanzarowiosor-varangerpvhackerpvtrentoyosatoyookaneyamazoepwchitosetogliattipsamnangerpzqotoyohashimotoyakokamimineqponiatowadaqslgbtrevisognequalifioapplatterpl-wawsappspacehostedpicardquangngais-an-artistordalquangninhthuanquangtritonoshonais-an-engineeringquickconnectroandindependent-inquest-a-la-masionquicksytesorfoldquipelementsorocabalestrandabergamochizukijobservablehqldquizzesorreisahayakawakamiichinomiyagithubpreviewskrakowitdkontoguraswinoujscienceswissphinxn--45brj9chonanbunkyonanaoshimaringatlanbibaiduckdnsamparachutinglugsjcbnpparibashkiriasyno-dspjelkavikongsbergsynology-diskstationsynology-dspockongsvingertushungrytuvalle-daostaobaolbia-tempio-olbiatempioolbialowiezaganquangnamasteigenoamishirasatochigiftsrhtrogstadtuxfamilytuyenquangbinhthuantwmailvegasrlvelvetromsohuissier-justiceventurestaurantrustkanieruchomoscientistoripresspydebergvestfoldvestnesrvaomoriguchiharaffleentrycloudflare-ipfsortlandvestre-slidrecreationvestre-totennishiawakuravestvagoyvevelstadvfstreakusercontentroitskoninfernovecorealtorvibo-valentiavibovalentiavideovinhphuchoshichikashukudoyamakeupartysfjordrivelandrobakamaihd-stagingmbhartinnishinoshimattelemarkhangelskaruizawavinnicapitalonevinnytsiavipsinaapplockervirginankokubunjis-byklecznagatorokunohealth-carereformincommbankhakassiavirtual-uservecounterstrikevirtualservervirtualuserveexchangevisakuholeckobierzyceviterboliviajessheimperiavivianvivoryvixn--45q11chowdervlaanderennesoyvladikavkazimierz-dolnyvladimirvlogisticstreamlitapplcube-serversusakis-an-actorvmitourismartlabelingvolvologdanskontumintshowavolyngdalvoorlopervossevangenvotevotingvotoyotap-southeast-5vps-hostreaklinkstrippervusercontentrvaporcloudwiwatsukiyonotairesindevicenzaokinawashirosatochiokinoshimagazinewixsitewixstudio-fipstrynwjgorawkzwloclawekonyvelolipopmcdirwmcloudwmelhustudynamisches-dnsorumisugitomobegetmyipifony-2wmflabstuff-4-salewoodsidell-ogliastrapiapplinzis-certifiedworldworse-thanhphohochiminhadanorthflankatsuyamassa-carrara-massacarraramassabunzenwowithgoogleapiszwpdevcloudwpenginepoweredwphostedmailwpmucdn77-sslingwpmudevelopmentrysiljanewaywpsquaredwritesthisblogoiplumbingotpantheonsitewroclawsglobalacceleratorahimeshimakanegasakievennodebalancernwtcp4wtfastlylbarefootballooningjerdrumemergencyonabarumemorialivornobservereitatsunofficialolitapunkapsienamsskoganeindependent-panelombardiademfakefurniturealestatefarmerseinemrnotebooks-prodeomniwebthings3-object-lambdauthgear-stagingivestbyglandroverhallair-traffic-controllagdenesnaaseinet-freaks3-deprecatedgcagliarissadistgstagempresashibetsukuiitatebayashikaoirmembers3-eu-central-1kapp-ionosegawafaicloudineat-urlive-websitehimejibmdevinapps3-ap-southeast-1337wuozuerichardlillesandefjordwwwithyoutuberspacewzmiuwajimaxn--4it797koobindalxn--4pvxs4allxn--54b7fta0cchromediatechnologyeongbukarumaifmemsetkmaxxn--1ctwolominamatarpitksatmalluxenishiokoppegardrrxn--55qw42gxn--55qx5dxn--5dbhl8dxn--5js045dxn--5rtp49chungnamdalseidfjordtvsangotsukitahiroshimarcherkasykkylvenneslaskerrypropertiesanjotelulublindesnesannanishitosashimizunaminamidaitolgaularavellinodeobjectsannoheliohostrodawaraxn--5rtq34kooris-a-nascarfanxn--5su34j936bgsgxn--5tzm5gxn--6btw5axn--6frz82gxn--6orx2rxn--6qq986b3xlxn--7t0a264churchaselfipirangallupsunappgafanishiwakinuyamashinazawaxn--80aaa0cvacationstufftoread-booksnesoundcastreak-linkomvuxn--3pxu8khmelnitskiyamassivegridxn--80adxhksurnadalxn--80ao21axn--80aqecdr1axn--80asehdbarrell-of-knowledgesuite-stagingjesdalombardyn-vpndns3-us-gov-east-1xn--80aswgxn--80audnedalnxn--8dbq2axn--8ltr62kopervikhmelnytskyivalleeaostexn--8pvr4uxn--8y0a063axn--90a1affinitylotterybnikeisencoreapiacenzachpomorskiengiangxn--90a3academiamibubbleappspotagerxn--90aeroportsinfolkebibleasingrok-freeddnsfreebox-osascoli-picenogatachikawakayamadridvagsoyerxn--90aishobaraoxn--90amckinseyxn--90azhytomyradweblikes-piedmontuckerxn--9dbq2axn--9et52uxn--9krt00axn--andy-iraxn--aroport-byameloyxn--asky-iraxn--aurskog-hland-jnbarsycenterprisecloudbeesusercontentattoolforgerockyonagunicloudiscordsays3-us-gov-west-1xn--avery-yuasakuragawaxn--b-5gaxn--b4w605ferdxn--balsan-sdtirol-nsbarsyonlinequipmentaveusercontentawktoyonomurauthordalandroidienbienishiazaiiyamanouchikujolsterehabmereisenishigotembaixadavvesiidaknongivingjemnes3-eu-north-1xn--bck1b9a5dre4ciprianiigatairaumalatvuopmicrosoftbankasaokamikoaniikappudopaaskvollocaltonetlifyinvestmentsanokashibatakatsukiyosembokutamakiyosunndaluxuryxn--bdddj-mrabdxn--bearalvhki-y4axn--berlevg-jxaxn--bhcavuotna-s4axn--bhccavuotna-k7axn--bidr-5nachikatsuuraxn--bievt-0qa2hosted-by-previderxn--bjarky-fyanagawaxn--bjddar-ptarumizusawaxn--blt-elabkhaziamallamaceiobbcircleaningmodelscapetownnews-stagingmxn--1lqs03nissandoyxn--bmlo-grafana-developerauniterois-coolblogdnshisuifuettertdasnetzxn--bod-2naturalxn--bozen-sdtirol-2obihirosakikamijimayfirstorjdevcloudjiffyxn--brnny-wuacademy-firewall-gatewayxn--brnnysund-m8accident-investigation-aptibleadpagespeedmobilizeropslattumbriaxn--brum-voagatulaspeziaxn--btsfjord-9zaxn--bulsan-sdtirol-nsbasicserver-on-webpaaskimitsubatamicrolightingjovikaragandautoscanaryggeemrappui-productions3-eu-west-1xn--c1avgxn--c2br7gxn--c3s14mitoyoakexn--cck2b3basilicataniavocats3-eu-west-2xn--cckwcxetdxn--cesena-forl-mcbremangerxn--cesenaforl-i8axn--cg4bkis-foundationxn--ciqpnxn--clchc0ea0b2g2a9gcdn77-storagencymrulezajskiptveterinaireadthedocs-hostedogawarabikomaezakishimabarakawagoexn--czr694basketballfinanzlgkpmglassessments3-us-west-1xn--czrs0t0xn--czru2dxn--d1acj3batsfjordiscordsezpisdnipropetrovskygearapparasiteu-2xn--d1alfastvps-serverisignxn--d1atunesquaresinstagingxn--d5qv7z876ciscofreakadns-cloudflareglobalashovhachijoinvilleirfjorduponthewifidelitypeformesswithdnsantamariakexn--davvenjrga-y4axn--djrs72d6uyxn--djty4koryokamikawanehonbetsuwanouchikuhokuryugasakis-a-nursellsyourhomeftpinbrowsersafetymarketshiraois-a-landscaperspectakasugais-a-lawyerxn--dnna-graingerxn--drbak-wuaxn--dyry-iraxn--e1a4cistrondheimeteorappassenger-associationissayokoshibahikariyalibabacloudcsantoandrecifedexperts-comptablesanukinzais-a-bruinsfanissedalvivanovoldaxn--eckvdtc9dxn--efvn9surveysowaxn--efvy88hadselbuzentsujiiexn--ehqz56nxn--elqq16haebaruericssongdalenviknakatombetsumitakagildeskaliszxn--eveni-0qa01gaxn--f6qx53axn--fct429kosaigawaxn--fhbeiarnxn--finny-yuaxn--fiq228c5hsbcitadelhichisochimkentmpatriaxn--fiq64bauhauspostman-echofunatoriginstances3-us-west-2xn--fiqs8susonoxn--fiqz9suzakarpattiaaxn--fjord-lraxn--fjq720axn--fl-ziaxn--flor-jraxn--flw351exn--forl-cesena-fcbentleyoriikarasjohkamikitayamatsurindependent-review-credentialless-staticblitzw-staticblitzxn--forlcesena-c8axn--fpcrj9c3dxn--frde-grajewolterskluwerxn--frna-woaxn--frya-hraxn--fzc2c9e2citicaravanylvenetogakushimotoganexn--fzys8d69uvgmailxn--g2xx48civilaviationionjukujitawaravennaharimalborkdalxn--gckr3f0fauskedsmokorsetagayaseralingenovaraxn--gecrj9clancasterxn--ggaviika-8ya47hagakhanhhoabinhduongxn--gildeskl-g0axn--givuotna-8yanaizuxn--gjvik-wuaxn--gk3at1exn--gls-elacaixaxn--gmq050is-gonexn--gmqw5axn--gnstigbestellen-zvbentrendhostingleezeu-3xn--gnstigliefern-wobiraxn--h-2failxn--h1ahnxn--h1alizxn--h2breg3evenesuzukanazawaxn--h2brj9c8cldmail-boxfuseljeducationporterxn--h3cuzk1dielddanuorris-into-animein-vigorlicexn--hbmer-xqaxn--hcesuolo-7ya35beppublic-inquiryoshiokanumazuryurihonjouwwebhoptokigawavoues3-eu-west-3xn--hebda8beskidyn-ip24xn--hery-iraxn--hgebostad-g3axn--hkkinen-5waxn--hmmrfeasta-s4accident-prevention-fleeklogesquare7xn--hnefoss-q1axn--hobl-iraxn--holtlen-hxaxn--hpmir-xqaxn--hxt814exn--hyanger-q1axn--hylandet-54axn--i1b6b1a6a2exn--imr513nxn--indery-fyandexcloudxn--io0a7is-into-carshitaramaxn--j1adpdnsupdaterxn--j1aefbsbxn--2m4a15exn--j1ael8bestbuyshoparenagareyamagentositenrikuzentakataharaholtalengerdalwaysdatabaseballangenkainanaejrietiengiangheannakadomarineen-rootaribeiraogakicks-assnasaarlandiscountry-snowplowiczeladzxn--j1amhagebostadxn--j6w193gxn--jlq480n2rgxn--jlster-byaotsurgeryxn--jrpeland-54axn--jvr189mittwaldserverxn--k7yn95exn--karmy-yuaxn--kbrq7oxn--kcrx77d1x4axn--kfjord-iuaxn--klbu-woaxn--klt787dxn--kltp7dxn--kltx9axn--klty5xn--4dbgdty6choyodobashichinohealthcareersamsclubartowest1-usamsungminakamichikaiseiyoichipsandvikcoromantovalle-d-aostakinouexn--koluokta-7ya57haibarakitakamiizumisanofidonnakaniikawatanaguraxn--kprw13dxn--kpry57dxn--kput3is-into-cartoonshizukuishimojis-a-linux-useranishiaritabashikshacknetlibp2pimientaketomisatourshiranukamitondabayashiogamagoriziaxn--krager-gyasakaiminatoyotomiyazakis-into-gamessinaklodzkochikushinonsenasakuchinotsuchiurakawaxn--kranghke-b0axn--krdsherad-m8axn--krehamn-dxaxn--krjohka-hwab49jdfirmalselveruminisitexn--ksnes-uuaxn--kvfjord-nxaxn--kvitsy-fyasugitlabbvieeexn--kvnangen-k0axn--l-1fairwindsuzukis-an-entertainerxn--l1accentureklamborghinikolaeventsvalbardunloppadoval-d-aosta-valleyxn--laheadju-7yasuokannamimatakatoris-leetrentinoalto-adigexn--langevg-jxaxn--lcvr32dxn--ldingen-q1axn--leagaviika-52bhzc01xn--lesund-huaxn--lgbbat1ad8jejuxn--lgrd-poacctfcloudflareanycastcgroupowiat-band-campaignoredstonedre-eikerxn--lhppi-xqaxn--linds-pramericanexpresservegame-serverxn--loabt-0qaxn--lrdal-sraxn--lrenskog-54axn--lt-liaclerkstagentsaobernardovre-eikerxn--lten-granexn--lury-iraxn--m3ch0j3axn--mely-iraxn--merker-kuaxn--mgb2ddesvchoseikarugalsacexn--mgb9awbfbx-oschokokekscholarshipschoolbusinessebytomaridagawarmiastapleschoolsztynsetranoyxn--mgba3a3ejtunkonsulatinowruzhgorodxn--mgba3a4f16axn--mgba3a4fra1-dellogliastraderxn--mgba7c0bbn0axn--mgbaam7a8haiduongxn--mgbab2bdxn--mgbah1a3hjkrdxn--mgbai9a5eva00bialystokkeymachineu-4xn--mgbai9azgqp6jelasticbeanstalkhersonlanxesshizuokamogawaxn--mgbayh7gparaglidingxn--mgbbh1a71exn--mgbc0a9azcgxn--mgbca7dzdoxn--mgbcpq6gpa1axn--mgberp4a5d4a87gxn--mgberp4a5d4arxn--mgbgu82axn--mgbi4ecexperimentsveioxn--mgbpl2fhskypecoris-localhostcertificationxn--mgbqly7c0a67fbclever-clouderavpagexn--mgbqly7cvafricapooguyxn--mgbt3dhdxn--mgbtf8fldrvareservdxn--mgbtx2bielawalbrzycharternopilawalesundiscourses3-website-ap-northeast-1xn--mgbx4cd0abogadobeaemcloud-ip-dynamica-west-1xn--mix082fbxoschulplattforminamimakis-a-catererxn--mix891fedjeepharmacienschulserverxn--mjndalen-64axn--mk0axindependent-inquiryxn--mk1bu44cleverappsaogoncanva-appsaotomelbournexn--mkru45is-lostrolekamakurazakiwielunnerxn--mlatvuopmi-s4axn--mli-tlavagiskexn--mlselv-iuaxn--moreke-juaxn--mori-qsakurais-not-axn--mosjen-eyatsukanoyaizuwakamatsubushikusakadogawaxn--mot-tlavangenxn--mre-og-romsdal-qqbuservebolturindalxn--msy-ula0haiphongolffanshimosuwalkis-a-designerxn--mtta-vrjjat-k7aflakstadotsurugimbiella-speziaxarnetbankanzakiyosatokorozawaustevollpagest-mon-blogueurovision-ranchernigovernmentdllivingitpagemprendeatnuh-ohtawaramotoineppueblockbusterniizaustrheimdbambinagisobetsucks3-ap-southeast-2xn--muost-0qaxn--mxtq1miuraxn--ngbc5azdxn--ngbe9e0axn--ngbrxn--4dbrk0cexn--nit225kosakaerodromegalloabatobamaceratabusebastopoleangaviikafjordxn--nmesjevuemie-tcbalsan-sudtirolkuszczytnord-fron-riopretodayxn--nnx388axn--nodeloittexn--nqv7fs00emaxn--nry-yla5gxn--ntso0iqx3axn--ntsq17gxn--nttery-byaeservehalflifeinsurancexn--nvuotna-hwaxn--nyqy26axn--o1achernivtsicilyxn--o3cw4hair-surveillancexn--o3cyx2axn--od0algardxn--od0aq3bielskoczoweddinglitcheap-south-2xn--ogbpf8flekkefjordxn--oppegrd-ixaxn--ostery-fyatsushiroxn--osyro-wuaxn--otu796dxn--p1acfolksvelvikonskowolayangroupippugliaxn--p1ais-not-certifiedxn--pgbs0dhakatanortonkotsumomodenakatsugawaxn--porsgu-sta26fedorainfracloudfunctionschwarzgwesteuropencraftransfer-webappharmacyou2-localplayerxn--pssu33lxn--pssy2uxn--q7ce6axn--q9jyb4clickrisinglesjaguarvodkagaminombrendlyngenebakkeshibukawakeliwebhostingouv0xn--qcka1pmcprequalifymeinforumzxn--qqqt11miyazure-mobilevangerxn--qxa6axn--qxamiyotamanoxn--rady-iraxn--rdal-poaxn--rde-ulazioxn--rdy-0nabaris-savedxn--rennesy-v1axn--rhkkervju-01afedorapeopleikangerxn--rholt-mragowoltlab-democraciaxn--rhqv96gxn--rht27zxn--rht3dxn--rht61exn--risa-5naturbruksgymnxn--risr-iraxn--rland-uuaxn--rlingen-mxaxn--rmskog-byawaraxn--rny31hakodatexn--rovu88bieszczadygeyachimataijinderoyusuharazurefdietateshinanomachintaifun-dnsaliases121xn--rros-granvindafjordxn--rskog-uuaxn--rst-0navigationxn--rsta-framercanvasvn-repospeedpartnerxn--rvc1e0am3exn--ryken-vuaxn--ryrvik-byawatahamaxn--s-1faitheshopwarezzoxn--s9brj9clientoyotsukaidownloadurbanamexnetfylkesbiblackbaudcdn-edgestackhero-networkinggroupperxn--sandnessjen-ogbizxn--sandy-yuaxn--sdtirol-n2axn--seral-lraxn--ses554gxn--sgne-graphicswidnicaobangxn--skierv-utazurecontainerimamateramombetsupplieswidnikitagatamayukuhashimokitayamaxn--skjervy-v1axn--skjk-soaxn--sknit-yqaxn--sknland-fxaxn--slat-5navoizumizakis-slickharkivallee-aosteroyxn--slt-elabievathletajimabaria-vungtaudiopsys3-website-ap-southeast-1xn--smla-hraxn--smna-gratangenxn--snase-nraxn--sndre-land-0cbifukagawalmartaxiijimarugame-hostrowieconomiasagaeroclubmedecin-berlindasdaeguambulancechireadmyblogsytecnologiazurestaticappspaceusercontentproxy9guacuiababia-goraclecloudappschaefflereggiocalabriaurland-4-salernooreggioemiliaromagnarusawaurskog-holandinggff5xn--snes-poaxn--snsa-roaxn--sr-aurdal-l8axn--sr-fron-q1axn--sr-odal-q1axn--sr-varanger-ggbigv-infolldalomoldegreeu-central-2xn--srfold-byaxn--srreisa-q1axn--srum-gratis-a-bookkeepermashikexn--stfold-9xaxn--stjrdal-s1axn--stjrdalshalsen-sqbiharvanedgeappengineu-south-1xn--stre-toten-zcbihoronobeokayamagasakikuchikuseihicampinashikiminohostfoldiscoverbaniazurewebsitests3-external-1xn--t60b56axn--tckwebview-assetswiebodzindependent-commissionxn--tiq49xqyjelenia-goraxn--tjme-hraxn--tn0agrocerydxn--tnsberg-q1axn--tor131oxn--trany-yuaxn--trentin-sd-tirol-rzbikedaejeonbuk0emmafann-arborlandd-dnsfor-better-thanhhoarairkitapps-audiblebesbyencowayokosukanraetnaamesjevuemielnogiehtavuoatnabudejjuniper2-ddnss3-123minsidaarborteamsterdamnserverseating-organicbcg123homepagexl-o-g-i-navyokote123hjemmesidealerdalaheadjuegoshikibichuo0o0g0xn--trentin-sdtirol-7vbiomutazas3-website-ap-southeast-2xn--trentino-sd-tirol-c3birkenesoddtangentapps3-website-eu-west-1xn--trentino-sdtirol-szbittermezproxyusuitatamotors3-website-sa-east-1xn--trentinosd-tirol-rzbjarkoyuullensvanguardisharparisor-fronishiharaxn--trentinosdtirol-7vbjerkreimmobilieniwaizumiotsukumiyamazonaws-cloud9xn--trentinsd-tirol-6vbjugnieznorddalomzaporizhzhiaxn--trentinsdtirol-nsblackfridaynightayninhaccalvinklein-butterepairbusanagochigasakindigenakayamarumorimachidaxn--trgstad-r1axn--trna-woaxn--troms-zuaxn--tysvr-vraxn--uc0atvarggatromsakegawaxn--uc0ay4axn--uist22hakonexn--uisz3gxn--unjrga-rtashkenturystykanmakiyokawaraxn--unup4yxn--uuwu58axn--vads-jraxn--valle-aoste-ebbtuscanyxn--valle-d-aoste-ehboehringerikerxn--valleaoste-e7axn--valledaoste-ebbvaapstempurlxn--vard-jraxn--vegrshei-c0axn--vermgensberater-ctb-hostingxn--vermgensberatung-pwbloombergentingliwiceu-south-2xn--vestvgy-ixa6oxn--vg-yiablushangrilaakesvuemieleccevervaultgoryuzawaxn--vgan-qoaxn--vgsy-qoa0j0xn--vgu402clinicarbonia-iglesias-carboniaiglesiascarboniaxn--vhquvaroyxn--vler-qoaxn--vre-eiker-k8axn--vrggt-xqadxn--vry-yla5gxn--vuq861bmoattachments3-website-us-east-1xn--w4r85el8fhu5dnraxn--w4rs40lxn--wcvs22dxn--wgbh1cliniquenoharaxn--wgbl6axn--xhq521bms3-website-us-gov-west-1xn--xkc2al3hye2axn--xkc2dl3a5ee0hakubaclieu-1xn--y9a3aquarelleborkangerxn--yer-znavuotnarashinoharaxn--yfro4i67oxn--ygarden-p1axn--ygbi2ammxn--4gbriminiserverxn--ystre-slidre-ujbmwcloudnonproddaemongolianishiizunazukindustriaxn--zbx025dxn--zf0avxn--4it168dxn--zfr164bnrweatherchannelsdvrdns3-website-us-west-1xnbayernxz
\ No newline at end of file
diff --git a/vendor/golang.org/x/net/publicsuffix/list.go b/vendor/golang.org/x/net/publicsuffix/list.go
index 56069d04296968dd4915b4bf5485f445faae0c5f..047cb30eb151d14202724f626b0629d173a65088 100644
--- a/vendor/golang.org/x/net/publicsuffix/list.go
+++ b/vendor/golang.org/x/net/publicsuffix/list.go
@@ -77,7 +77,7 @@ func (list) String() string {
 // privately managed domain (and in practice, not a top level domain) or an
 // unmanaged top level domain (and not explicitly mentioned in the
 // publicsuffix.org list). For example, "foo.org" and "foo.co.uk" are ICANN
-// domains, "foo.dyndns.org" and "foo.blogspot.co.uk" are private domains and
+// domains, "foo.dyndns.org" is a private domain and
 // "cromulent" is an unmanaged top level domain.
 //
 // Use cases for distinguishing ICANN domains like "foo.com" from private
diff --git a/vendor/golang.org/x/net/publicsuffix/table.go b/vendor/golang.org/x/net/publicsuffix/table.go
index 78d400fa6533a7907f188c015c1de5968f4ea6e4..0fadf9527f7ae8d3c05b6d1a2fa00b4278a5d482 100644
--- a/vendor/golang.org/x/net/publicsuffix/table.go
+++ b/vendor/golang.org/x/net/publicsuffix/table.go
@@ -4,7 +4,7 @@ package publicsuffix
 
 import _ "embed"
 
-const version = "publicsuffix.org's public_suffix_list.dat, git revision 63cbc63d470d7b52c35266aa96c4c98c96ec499c (2023-08-03T10:01:25Z)"
+const version = "publicsuffix.org's public_suffix_list.dat, git revision 2c960dac3d39ba521eb5db9da192968f5be0aded (2025-03-18T07:22:13Z)"
 
 const (
 	nodesBits           = 40
@@ -26,7 +26,7 @@ const (
 )
 
 // numTLD is the number of top level domains.
-const numTLD = 1474
+const numTLD = 1454
 
 // text is the combined text of all labels.
 //
@@ -63,8 +63,8 @@ var nodes uint40String
 //go:embed data/children
 var children uint32String
 
-// max children 743 (capacity 1023)
-// max text offset 30876 (capacity 65535)
+// max children 870 (capacity 1023)
+// max text offset 31785 (capacity 65535)
 // max text length 31 (capacity 63)
-// max hi 9322 (capacity 16383)
-// max lo 9317 (capacity 16383)
+// max hi 10100 (capacity 16383)
+// max lo 10095 (capacity 16383)
diff --git a/vendor/golang.org/x/net/websocket/websocket.go b/vendor/golang.org/x/net/websocket/websocket.go
index ac76165cebb0af89805271193ad465fb316e0a2a..3448d20395cea816bd5b253f4b0a4ef4c23caef5 100644
--- a/vendor/golang.org/x/net/websocket/websocket.go
+++ b/vendor/golang.org/x/net/websocket/websocket.go
@@ -6,9 +6,10 @@
 // as specified in RFC 6455.
 //
 // This package currently lacks some features found in an alternative
-// and more actively maintained WebSocket package:
+// and more actively maintained WebSocket packages:
 //
-//	https://pkg.go.dev/github.com/coder/websocket
+//   - [github.com/gorilla/websocket]
+//   - [github.com/coder/websocket]
 package websocket // import "golang.org/x/net/websocket"
 
 import (
diff --git a/vendor/golang.org/x/oauth2/oauth2.go b/vendor/golang.org/x/oauth2/oauth2.go
index 74f052aa9fa8a6e4d3542f6338b7bb96c3f842bf..eacdd7fd9336e1773273b39fb59db87d20b0a56d 100644
--- a/vendor/golang.org/x/oauth2/oauth2.go
+++ b/vendor/golang.org/x/oauth2/oauth2.go
@@ -288,7 +288,7 @@ func (tf *tokenRefresher) Token() (*Token, error) {
 	if tf.refreshToken != tk.RefreshToken {
 		tf.refreshToken = tk.RefreshToken
 	}
-	return tk, err
+	return tk, nil
 }
 
 // reuseTokenSource is a TokenSource that holds a single token in memory
@@ -356,11 +356,15 @@ func NewClient(ctx context.Context, src TokenSource) *http.Client {
 	if src == nil {
 		return internal.ContextClient(ctx)
 	}
+	cc := internal.ContextClient(ctx)
 	return &http.Client{
 		Transport: &Transport{
-			Base:   internal.ContextClient(ctx).Transport,
+			Base:   cc.Transport,
 			Source: ReuseTokenSource(nil, src),
 		},
+		CheckRedirect: cc.CheckRedirect,
+		Jar:           cc.Jar,
+		Timeout:       cc.Timeout,
 	}
 }
 
diff --git a/vendor/golang.org/x/oauth2/pkce.go b/vendor/golang.org/x/oauth2/pkce.go
index 50593b6dfec6bc2aead53b65e4664994e1241547..6a95da975ce35dd9de545f8ce5d3d405fd14e6c4 100644
--- a/vendor/golang.org/x/oauth2/pkce.go
+++ b/vendor/golang.org/x/oauth2/pkce.go
@@ -21,7 +21,7 @@ const (
 //
 // A fresh verifier should be generated for each authorization.
 // S256ChallengeOption(verifier) should then be passed to Config.AuthCodeURL
-// (or Config.DeviceAccess) and VerifierOption(verifier) to Config.Exchange
+// (or Config.DeviceAuth) and VerifierOption(verifier) to Config.Exchange
 // (or Config.DeviceAccessToken).
 func GenerateVerifier() string {
 	// "RECOMMENDED that the output of a suitable random number generator be
@@ -51,7 +51,7 @@ func S256ChallengeFromVerifier(verifier string) string {
 }
 
 // S256ChallengeOption derives a PKCE code challenge derived from verifier with
-// method S256. It should be passed to Config.AuthCodeURL or Config.DeviceAccess
+// method S256. It should be passed to Config.AuthCodeURL or Config.DeviceAuth
 // only.
 func S256ChallengeOption(verifier string) AuthCodeOption {
 	return challengeOption{
diff --git a/vendor/golang.org/x/oauth2/token.go b/vendor/golang.org/x/oauth2/token.go
index 109997d77cea5a7df4aef15479ac43c35bd02f7a..8c31136c402e58ad1ceac7e4878547e761727328 100644
--- a/vendor/golang.org/x/oauth2/token.go
+++ b/vendor/golang.org/x/oauth2/token.go
@@ -169,7 +169,7 @@ func tokenFromInternal(t *internal.Token) *Token {
 
 // retrieveToken takes a *Config and uses that to retrieve an *internal.Token.
 // This token is then mapped from *internal.Token into an *oauth2.Token which is returned along
-// with an error..
+// with an error.
 func retrieveToken(ctx context.Context, c *Config, v url.Values) (*Token, error) {
 	tk, err := internal.RetrieveToken(ctx, c.ClientID, c.ClientSecret, c.Endpoint.TokenURL, v, internal.AuthStyle(c.Endpoint.AuthStyle), c.authStyleCache.Get())
 	if err != nil {
diff --git a/vendor/golang.org/x/term/terminal.go b/vendor/golang.org/x/term/terminal.go
index f636667fb04258b937b90d77b1f0398a532d7067..14f89470ab5010ed6da1311a9ee7d2d0eed9113f 100644
--- a/vendor/golang.org/x/term/terminal.go
+++ b/vendor/golang.org/x/term/terminal.go
@@ -44,6 +44,8 @@ type Terminal struct {
 	// bytes, as an index into |line|). If it returns ok=false, the key
 	// press is processed normally. Otherwise it returns a replacement line
 	// and the new cursor position.
+	//
+	// This will be disabled during ReadPassword.
 	AutoCompleteCallback func(line string, pos int, key rune) (newLine string, newPos int, ok bool)
 
 	// Escape contains a pointer to the escape codes for this terminal.
@@ -692,6 +694,8 @@ func (t *Terminal) Write(buf []byte) (n int, err error) {
 
 // ReadPassword temporarily changes the prompt and reads a password, without
 // echo, from the terminal.
+//
+// The AutoCompleteCallback is disabled during this call.
 func (t *Terminal) ReadPassword(prompt string) (line string, err error) {
 	t.lock.Lock()
 	defer t.lock.Unlock()
@@ -699,6 +703,11 @@ func (t *Terminal) ReadPassword(prompt string) (line string, err error) {
 	oldPrompt := t.prompt
 	t.prompt = []rune(prompt)
 	t.echo = false
+	oldAutoCompleteCallback := t.AutoCompleteCallback
+	t.AutoCompleteCallback = nil
+	defer func() {
+		t.AutoCompleteCallback = oldAutoCompleteCallback
+	}()
 
 	line, err = t.readLine()
 
diff --git a/vendor/golang.org/x/time/rate/rate.go b/vendor/golang.org/x/time/rate/rate.go
index 93a798ab63704ab6b6f65e3861f656a2027b5428..794b2e32bfaa23989cc1135dff0c8d3ed4756a02 100644
--- a/vendor/golang.org/x/time/rate/rate.go
+++ b/vendor/golang.org/x/time/rate/rate.go
@@ -85,7 +85,7 @@ func (lim *Limiter) Burst() int {
 // TokensAt returns the number of tokens available at time t.
 func (lim *Limiter) TokensAt(t time.Time) float64 {
 	lim.mu.Lock()
-	_, tokens := lim.advance(t) // does not mutate lim
+	tokens := lim.advance(t) // does not mutate lim
 	lim.mu.Unlock()
 	return tokens
 }
@@ -186,7 +186,7 @@ func (r *Reservation) CancelAt(t time.Time) {
 		return
 	}
 	// advance time to now
-	t, tokens := r.lim.advance(t)
+	tokens := r.lim.advance(t)
 	// calculate new number of tokens
 	tokens += restoreTokens
 	if burst := float64(r.lim.burst); tokens > burst {
@@ -307,7 +307,7 @@ func (lim *Limiter) SetLimitAt(t time.Time, newLimit Limit) {
 	lim.mu.Lock()
 	defer lim.mu.Unlock()
 
-	t, tokens := lim.advance(t)
+	tokens := lim.advance(t)
 
 	lim.last = t
 	lim.tokens = tokens
@@ -324,7 +324,7 @@ func (lim *Limiter) SetBurstAt(t time.Time, newBurst int) {
 	lim.mu.Lock()
 	defer lim.mu.Unlock()
 
-	t, tokens := lim.advance(t)
+	tokens := lim.advance(t)
 
 	lim.last = t
 	lim.tokens = tokens
@@ -347,7 +347,7 @@ func (lim *Limiter) reserveN(t time.Time, n int, maxFutureReserve time.Duration)
 		}
 	}
 
-	t, tokens := lim.advance(t)
+	tokens := lim.advance(t)
 
 	// Calculate the remaining number of tokens resulting from the request.
 	tokens -= float64(n)
@@ -380,10 +380,11 @@ func (lim *Limiter) reserveN(t time.Time, n int, maxFutureReserve time.Duration)
 	return r
 }
 
-// advance calculates and returns an updated state for lim resulting from the passage of time.
+// advance calculates and returns an updated number of tokens for lim
+// resulting from the passage of time.
 // lim is not changed.
 // advance requires that lim.mu is held.
-func (lim *Limiter) advance(t time.Time) (newT time.Time, newTokens float64) {
+func (lim *Limiter) advance(t time.Time) (newTokens float64) {
 	last := lim.last
 	if t.Before(last) {
 		last = t
@@ -396,7 +397,7 @@ func (lim *Limiter) advance(t time.Time) (newT time.Time, newTokens float64) {
 	if burst := float64(lim.burst); tokens > burst {
 		tokens = burst
 	}
-	return t, tokens
+	return tokens
 }
 
 // durationFromTokens is a unit conversion function from the number of tokens to the duration
@@ -405,8 +406,15 @@ func (limit Limit) durationFromTokens(tokens float64) time.Duration {
 	if limit <= 0 {
 		return InfDuration
 	}
-	seconds := tokens / float64(limit)
-	return time.Duration(float64(time.Second) * seconds)
+
+	duration := (tokens / float64(limit)) * float64(time.Second)
+
+	// Cap the duration to the maximum representable int64 value, to avoid overflow.
+	if duration > float64(math.MaxInt64) {
+		return InfDuration
+	}
+
+	return time.Duration(duration)
 }
 
 // tokensFromDuration is a unit conversion function from a time duration to the number of tokens
diff --git a/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go b/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go
index f388426b08f7d51e91546d38b3881dc5821be734..d083dde3ed782a6f58e62e2c0c06ae2aa71edfd3 100644
--- a/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go
+++ b/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go
@@ -1,4 +1,4 @@
-// Copyright 2024 Google LLC
+// Copyright 2025 Google LLC
 //
 // Licensed under the Apache License, Version 2.0 (the "License");
 // you may not use this file except in compliance with the License.
diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go
index 3cd9a5bb8e62b775aba82ddf18359a48f1186e52..e017ef07142923e4aedd407c563d2ca41c4d2c26 100644
--- a/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go
+++ b/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go
@@ -1,4 +1,4 @@
-// Copyright 2024 Google LLC
+// Copyright 2025 Google LLC
 //
 // Licensed under the Apache License, Version 2.0 (the "License");
 // you may not use this file except in compliance with the License.
@@ -703,6 +703,65 @@ type QuotaFailure_Violation struct {
 	// For example: "Service disabled" or "Daily Limit for read operations
 	// exceeded".
 	Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
+	// The API Service from which the `QuotaFailure.Violation` orginates. In
+	// some cases, Quota issues originate from an API Service other than the one
+	// that was called. In other words, a dependency of the called API Service
+	// could be the cause of the `QuotaFailure`, and this field would have the
+	// dependency API service name.
+	//
+	// For example, if the called API is Kubernetes Engine API
+	// (container.googleapis.com), and a quota violation occurs in the
+	// Kubernetes Engine API itself, this field would be
+	// "container.googleapis.com". On the other hand, if the quota violation
+	// occurs when the Kubernetes Engine API creates VMs in the Compute Engine
+	// API (compute.googleapis.com), this field would be
+	// "compute.googleapis.com".
+	ApiService string `protobuf:"bytes,3,opt,name=api_service,json=apiService,proto3" json:"api_service,omitempty"`
+	// The metric of the violated quota. A quota metric is a named counter to
+	// measure usage, such as API requests or CPUs. When an activity occurs in a
+	// service, such as Virtual Machine allocation, one or more quota metrics
+	// may be affected.
+	//
+	// For example, "compute.googleapis.com/cpus_per_vm_family",
+	// "storage.googleapis.com/internet_egress_bandwidth".
+	QuotaMetric string `protobuf:"bytes,4,opt,name=quota_metric,json=quotaMetric,proto3" json:"quota_metric,omitempty"`
+	// The id of the violated quota. Also know as "limit name", this is the
+	// unique identifier of a quota in the context of an API service.
+	//
+	// For example, "CPUS-PER-VM-FAMILY-per-project-region".
+	QuotaId string `protobuf:"bytes,5,opt,name=quota_id,json=quotaId,proto3" json:"quota_id,omitempty"`
+	// The dimensions of the violated quota. Every non-global quota is enforced
+	// on a set of dimensions. While quota metric defines what to count, the
+	// dimensions specify for what aspects the counter should be increased.
+	//
+	// For example, the quota "CPUs per region per VM family" enforces a limit
+	// on the metric "compute.googleapis.com/cpus_per_vm_family" on dimensions
+	// "region" and "vm_family". And if the violation occurred in region
+	// "us-central1" and for VM family "n1", the quota_dimensions would be,
+	//
+	//	{
+	//	  "region": "us-central1",
+	//	  "vm_family": "n1",
+	//	}
+	//
+	// When a quota is enforced globally, the quota_dimensions would always be
+	// empty.
+	QuotaDimensions map[string]string `protobuf:"bytes,6,rep,name=quota_dimensions,json=quotaDimensions,proto3" json:"quota_dimensions,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	// The enforced quota value at the time of the `QuotaFailure`.
+	//
+	// For example, if the enforced quota value at the time of the
+	// `QuotaFailure` on the number of CPUs is "10", then the value of this
+	// field would reflect this quantity.
+	QuotaValue int64 `protobuf:"varint,7,opt,name=quota_value,json=quotaValue,proto3" json:"quota_value,omitempty"`
+	// The new quota value being rolled out at the time of the violation. At the
+	// completion of the rollout, this value will be enforced in place of
+	// quota_value. If no rollout is in progress at the time of the violation,
+	// this field is not set.
+	//
+	// For example, if at the time of the violation a rollout is in progress
+	// changing the number of CPUs quota from 10 to 20, 20 would be the value of
+	// this field.
+	FutureQuotaValue *int64 `protobuf:"varint,8,opt,name=future_quota_value,json=futureQuotaValue,proto3,oneof" json:"future_quota_value,omitempty"`
 }
 
 func (x *QuotaFailure_Violation) Reset() {
@@ -751,6 +810,48 @@ func (x *QuotaFailure_Violation) GetDescription() string {
 	return ""
 }
 
+func (x *QuotaFailure_Violation) GetApiService() string {
+	if x != nil {
+		return x.ApiService
+	}
+	return ""
+}
+
+func (x *QuotaFailure_Violation) GetQuotaMetric() string {
+	if x != nil {
+		return x.QuotaMetric
+	}
+	return ""
+}
+
+func (x *QuotaFailure_Violation) GetQuotaId() string {
+	if x != nil {
+		return x.QuotaId
+	}
+	return ""
+}
+
+func (x *QuotaFailure_Violation) GetQuotaDimensions() map[string]string {
+	if x != nil {
+		return x.QuotaDimensions
+	}
+	return nil
+}
+
+func (x *QuotaFailure_Violation) GetQuotaValue() int64 {
+	if x != nil {
+		return x.QuotaValue
+	}
+	return 0
+}
+
+func (x *QuotaFailure_Violation) GetFutureQuotaValue() int64 {
+	if x != nil && x.FutureQuotaValue != nil {
+		return *x.FutureQuotaValue
+	}
+	return 0
+}
+
 // A message type used to describe a single precondition failure.
 type PreconditionFailure_Violation struct {
 	state         protoimpl.MessageState
@@ -775,7 +876,7 @@ type PreconditionFailure_Violation struct {
 func (x *PreconditionFailure_Violation) Reset() {
 	*x = PreconditionFailure_Violation{}
 	if protoimpl.UnsafeEnabled {
-		mi := &file_google_rpc_error_details_proto_msgTypes[12]
+		mi := &file_google_rpc_error_details_proto_msgTypes[13]
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		ms.StoreMessageInfo(mi)
 	}
@@ -788,7 +889,7 @@ func (x *PreconditionFailure_Violation) String() string {
 func (*PreconditionFailure_Violation) ProtoMessage() {}
 
 func (x *PreconditionFailure_Violation) ProtoReflect() protoreflect.Message {
-	mi := &file_google_rpc_error_details_proto_msgTypes[12]
+	mi := &file_google_rpc_error_details_proto_msgTypes[13]
 	if protoimpl.UnsafeEnabled && x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
@@ -886,7 +987,7 @@ type BadRequest_FieldViolation struct {
 func (x *BadRequest_FieldViolation) Reset() {
 	*x = BadRequest_FieldViolation{}
 	if protoimpl.UnsafeEnabled {
-		mi := &file_google_rpc_error_details_proto_msgTypes[13]
+		mi := &file_google_rpc_error_details_proto_msgTypes[14]
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		ms.StoreMessageInfo(mi)
 	}
@@ -899,7 +1000,7 @@ func (x *BadRequest_FieldViolation) String() string {
 func (*BadRequest_FieldViolation) ProtoMessage() {}
 
 func (x *BadRequest_FieldViolation) ProtoReflect() protoreflect.Message {
-	mi := &file_google_rpc_error_details_proto_msgTypes[13]
+	mi := &file_google_rpc_error_details_proto_msgTypes[14]
 	if protoimpl.UnsafeEnabled && x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
@@ -958,7 +1059,7 @@ type Help_Link struct {
 func (x *Help_Link) Reset() {
 	*x = Help_Link{}
 	if protoimpl.UnsafeEnabled {
-		mi := &file_google_rpc_error_details_proto_msgTypes[14]
+		mi := &file_google_rpc_error_details_proto_msgTypes[15]
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		ms.StoreMessageInfo(mi)
 	}
@@ -971,7 +1072,7 @@ func (x *Help_Link) String() string {
 func (*Help_Link) ProtoMessage() {}
 
 func (x *Help_Link) ProtoReflect() protoreflect.Message {
-	mi := &file_google_rpc_error_details_proto_msgTypes[14]
+	mi := &file_google_rpc_error_details_proto_msgTypes[15]
 	if protoimpl.UnsafeEnabled && x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
@@ -1029,79 +1130,102 @@ var file_google_rpc_error_details_proto_rawDesc = []byte{
 	0x0a, 0x0d, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18,
 	0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x45, 0x6e, 0x74, 0x72,
 	0x69, 0x65, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x18, 0x02, 0x20,
-	0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x22, 0x9b, 0x01, 0x0a, 0x0c,
+	0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x22, 0x8e, 0x04, 0x0a, 0x0c,
 	0x51, 0x75, 0x6f, 0x74, 0x61, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x12, 0x42, 0x0a, 0x0a,
 	0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b,
 	0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x51, 0x75,
 	0x6f, 0x74, 0x61, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x2e, 0x56, 0x69, 0x6f, 0x6c, 0x61,
 	0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73,
-	0x1a, 0x47, 0x0a, 0x09, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a,
-	0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07,
-	0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72,
-	0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65,
-	0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xbd, 0x01, 0x0a, 0x13, 0x50, 0x72,
-	0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72,
-	0x65, 0x12, 0x49, 0x0a, 0x0a, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18,
-	0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72,
-	0x70, 0x63, 0x2e, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x46,
-	0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x2e, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e,
-	0x52, 0x0a, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x5b, 0x0a, 0x09,
-	0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70,
-	0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x18, 0x0a,
-	0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07,
-	0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72,
-	0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65,
-	0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x8c, 0x02, 0x0a, 0x0a, 0x42, 0x61,
-	0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x50, 0x0a, 0x10, 0x66, 0x69, 0x65, 0x6c,
-	0x64, 0x5f, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03,
-	0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e,
-	0x42, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64,
-	0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0f, 0x66, 0x69, 0x65, 0x6c, 0x64,
-	0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0xab, 0x01, 0x0a, 0x0e, 0x46,
-	0x69, 0x65, 0x6c, 0x64, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a,
-	0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x66, 0x69,
-	0x65, 0x6c, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69,
-	0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69,
-	0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18,
-	0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x49, 0x0a,
-	0x11, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61,
-	0x67, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
-	0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x4d,
-	0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x10, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x7a, 0x65,
-	0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x4f, 0x0a, 0x0b, 0x52, 0x65, 0x71, 0x75,
-	0x65, 0x73, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65,
-	0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x72, 0x65, 0x71,
-	0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e,
-	0x67, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65,
-	0x72, 0x76, 0x69, 0x6e, 0x67, 0x44, 0x61, 0x74, 0x61, 0x22, 0x90, 0x01, 0x0a, 0x0c, 0x52, 0x65,
-	0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65,
-	0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
-	0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12,
-	0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65,
-	0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
-	0x4e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x03, 0x20,
-	0x01, 0x28, 0x09, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65,
-	0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52,
-	0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x6f, 0x0a, 0x04,
-	0x48, 0x65, 0x6c, 0x70, 0x12, 0x2b, 0x0a, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x18, 0x01, 0x20,
-	0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63,
-	0x2e, 0x48, 0x65, 0x6c, 0x70, 0x2e, 0x4c, 0x69, 0x6e, 0x6b, 0x52, 0x05, 0x6c, 0x69, 0x6e, 0x6b,
-	0x73, 0x1a, 0x3a, 0x0a, 0x04, 0x4c, 0x69, 0x6e, 0x6b, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73,
-	0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b,
-	0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x75,
-	0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x22, 0x44, 0x0a,
-	0x10, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67,
-	0x65, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
-	0x09, 0x52, 0x06, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73,
-	0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73,
-	0x61, 0x67, 0x65, 0x42, 0x6c, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
-	0x65, 0x2e, 0x72, 0x70, 0x63, 0x42, 0x11, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x44, 0x65, 0x74, 0x61,
-	0x69, 0x6c, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3f, 0x67, 0x6f, 0x6f, 0x67,
-	0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65,
-	0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69,
-	0x73, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x65, 0x72, 0x72, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73,
-	0x3b, 0x65, 0x72, 0x72, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0xa2, 0x02, 0x03, 0x52, 0x50,
-	0x43, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+	0x1a, 0xb9, 0x03, 0x0a, 0x09, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18,
+	0x0a, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
+	0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63,
+	0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64,
+	0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x70,
+	0x69, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52,
+	0x0a, 0x61, 0x70, 0x69, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x71,
+	0x75, 0x6f, 0x74, 0x61, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x18, 0x04, 0x20, 0x01, 0x28,
+	0x09, 0x52, 0x0b, 0x71, 0x75, 0x6f, 0x74, 0x61, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, 0x19,
+	0x0a, 0x08, 0x71, 0x75, 0x6f, 0x74, 0x61, 0x5f, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09,
+	0x52, 0x07, 0x71, 0x75, 0x6f, 0x74, 0x61, 0x49, 0x64, 0x12, 0x62, 0x0a, 0x10, 0x71, 0x75, 0x6f,
+	0x74, 0x61, 0x5f, 0x64, 0x69, 0x6d, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20,
+	0x03, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63,
+	0x2e, 0x51, 0x75, 0x6f, 0x74, 0x61, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x2e, 0x56, 0x69,
+	0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x51, 0x75, 0x6f, 0x74, 0x61, 0x44, 0x69, 0x6d,
+	0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0f, 0x71, 0x75,
+	0x6f, 0x74, 0x61, 0x44, 0x69, 0x6d, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a,
+	0x0b, 0x71, 0x75, 0x6f, 0x74, 0x61, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01,
+	0x28, 0x03, 0x52, 0x0a, 0x71, 0x75, 0x6f, 0x74, 0x61, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x31,
+	0x0a, 0x12, 0x66, 0x75, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x71, 0x75, 0x6f, 0x74, 0x61, 0x5f, 0x76,
+	0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x10, 0x66, 0x75,
+	0x74, 0x75, 0x72, 0x65, 0x51, 0x75, 0x6f, 0x74, 0x61, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x88, 0x01,
+	0x01, 0x1a, 0x42, 0x0a, 0x14, 0x51, 0x75, 0x6f, 0x74, 0x61, 0x44, 0x69, 0x6d, 0x65, 0x6e, 0x73,
+	0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79,
+	0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76,
+	0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75,
+	0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x15, 0x0a, 0x13, 0x5f, 0x66, 0x75, 0x74, 0x75, 0x72, 0x65,
+	0x5f, 0x71, 0x75, 0x6f, 0x74, 0x61, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xbd, 0x01, 0x0a,
+	0x13, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x61, 0x69,
+	0x6c, 0x75, 0x72, 0x65, 0x12, 0x49, 0x0a, 0x0a, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f,
+	0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+	0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69,
+	0x6f, 0x6e, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x2e, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74,
+	0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a,
+	0x5b, 0x0a, 0x09, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04,
+	0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65,
+	0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28,
+	0x09, 0x52, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65,
+	0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52,
+	0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x8c, 0x02, 0x0a,
+	0x0a, 0x42, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x50, 0x0a, 0x10, 0x66,
+	0x69, 0x65, 0x6c, 0x64, 0x5f, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18,
+	0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72,
+	0x70, 0x63, 0x2e, 0x42, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x46, 0x69,
+	0x65, 0x6c, 0x64, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0f, 0x66, 0x69,
+	0x65, 0x6c, 0x64, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0xab, 0x01,
+	0x0a, 0x0e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+	0x12, 0x14, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
+	0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69,
+	0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73,
+	0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73,
+	0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e,
+	0x12, 0x49, 0x0a, 0x11, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x6d, 0x65,
+	0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f,
+	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x7a,
+	0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x10, 0x6c, 0x6f, 0x63, 0x61, 0x6c,
+	0x69, 0x7a, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x4f, 0x0a, 0x0b, 0x52,
+	0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65,
+	0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09,
+	0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72,
+	0x76, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
+	0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x44, 0x61, 0x74, 0x61, 0x22, 0x90, 0x01, 0x0a,
+	0x0c, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x23, 0x0a,
+	0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01,
+	0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79,
+	0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6e,
+	0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75,
+	0x72, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72,
+	0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x20, 0x0a,
+	0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01,
+	0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22,
+	0x6f, 0x0a, 0x04, 0x48, 0x65, 0x6c, 0x70, 0x12, 0x2b, 0x0a, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73,
+	0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+	0x72, 0x70, 0x63, 0x2e, 0x48, 0x65, 0x6c, 0x70, 0x2e, 0x4c, 0x69, 0x6e, 0x6b, 0x52, 0x05, 0x6c,
+	0x69, 0x6e, 0x6b, 0x73, 0x1a, 0x3a, 0x0a, 0x04, 0x4c, 0x69, 0x6e, 0x6b, 0x12, 0x20, 0x0a, 0x0b,
+	0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28,
+	0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10,
+	0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c,
+	0x22, 0x44, 0x0a, 0x10, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x4d, 0x65, 0x73,
+	0x73, 0x61, 0x67, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x65, 0x18, 0x01,
+	0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x65, 0x12, 0x18, 0x0a, 0x07,
+	0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d,
+	0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x6c, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f,
+	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x42, 0x11, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x44,
+	0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3f, 0x67,
+	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67,
+	0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+	0x61, 0x70, 0x69, 0x73, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x65, 0x72, 0x72, 0x64, 0x65, 0x74, 0x61,
+	0x69, 0x6c, 0x73, 0x3b, 0x65, 0x72, 0x72, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0xa2, 0x02,
+	0x03, 0x52, 0x50, 0x43, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
 }
 
 var (
@@ -1116,7 +1240,7 @@ func file_google_rpc_error_details_proto_rawDescGZIP() []byte {
 	return file_google_rpc_error_details_proto_rawDescData
 }
 
-var file_google_rpc_error_details_proto_msgTypes = make([]protoimpl.MessageInfo, 15)
+var file_google_rpc_error_details_proto_msgTypes = make([]protoimpl.MessageInfo, 16)
 var file_google_rpc_error_details_proto_goTypes = []interface{}{
 	(*ErrorInfo)(nil),                     // 0: google.rpc.ErrorInfo
 	(*RetryInfo)(nil),                     // 1: google.rpc.RetryInfo
@@ -1130,24 +1254,26 @@ var file_google_rpc_error_details_proto_goTypes = []interface{}{
 	(*LocalizedMessage)(nil),              // 9: google.rpc.LocalizedMessage
 	nil,                                   // 10: google.rpc.ErrorInfo.MetadataEntry
 	(*QuotaFailure_Violation)(nil),        // 11: google.rpc.QuotaFailure.Violation
-	(*PreconditionFailure_Violation)(nil), // 12: google.rpc.PreconditionFailure.Violation
-	(*BadRequest_FieldViolation)(nil),     // 13: google.rpc.BadRequest.FieldViolation
-	(*Help_Link)(nil),                     // 14: google.rpc.Help.Link
-	(*durationpb.Duration)(nil),           // 15: google.protobuf.Duration
+	nil,                                   // 12: google.rpc.QuotaFailure.Violation.QuotaDimensionsEntry
+	(*PreconditionFailure_Violation)(nil), // 13: google.rpc.PreconditionFailure.Violation
+	(*BadRequest_FieldViolation)(nil),     // 14: google.rpc.BadRequest.FieldViolation
+	(*Help_Link)(nil),                     // 15: google.rpc.Help.Link
+	(*durationpb.Duration)(nil),           // 16: google.protobuf.Duration
 }
 var file_google_rpc_error_details_proto_depIdxs = []int32{
 	10, // 0: google.rpc.ErrorInfo.metadata:type_name -> google.rpc.ErrorInfo.MetadataEntry
-	15, // 1: google.rpc.RetryInfo.retry_delay:type_name -> google.protobuf.Duration
+	16, // 1: google.rpc.RetryInfo.retry_delay:type_name -> google.protobuf.Duration
 	11, // 2: google.rpc.QuotaFailure.violations:type_name -> google.rpc.QuotaFailure.Violation
-	12, // 3: google.rpc.PreconditionFailure.violations:type_name -> google.rpc.PreconditionFailure.Violation
-	13, // 4: google.rpc.BadRequest.field_violations:type_name -> google.rpc.BadRequest.FieldViolation
-	14, // 5: google.rpc.Help.links:type_name -> google.rpc.Help.Link
-	9,  // 6: google.rpc.BadRequest.FieldViolation.localized_message:type_name -> google.rpc.LocalizedMessage
-	7,  // [7:7] is the sub-list for method output_type
-	7,  // [7:7] is the sub-list for method input_type
-	7,  // [7:7] is the sub-list for extension type_name
-	7,  // [7:7] is the sub-list for extension extendee
-	0,  // [0:7] is the sub-list for field type_name
+	13, // 3: google.rpc.PreconditionFailure.violations:type_name -> google.rpc.PreconditionFailure.Violation
+	14, // 4: google.rpc.BadRequest.field_violations:type_name -> google.rpc.BadRequest.FieldViolation
+	15, // 5: google.rpc.Help.links:type_name -> google.rpc.Help.Link
+	12, // 6: google.rpc.QuotaFailure.Violation.quota_dimensions:type_name -> google.rpc.QuotaFailure.Violation.QuotaDimensionsEntry
+	9,  // 7: google.rpc.BadRequest.FieldViolation.localized_message:type_name -> google.rpc.LocalizedMessage
+	8,  // [8:8] is the sub-list for method output_type
+	8,  // [8:8] is the sub-list for method input_type
+	8,  // [8:8] is the sub-list for extension type_name
+	8,  // [8:8] is the sub-list for extension extendee
+	0,  // [0:8] is the sub-list for field type_name
 }
 
 func init() { file_google_rpc_error_details_proto_init() }
@@ -1288,7 +1414,7 @@ func file_google_rpc_error_details_proto_init() {
 				return nil
 			}
 		}
-		file_google_rpc_error_details_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
+		file_google_rpc_error_details_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
 			switch v := v.(*PreconditionFailure_Violation); i {
 			case 0:
 				return &v.state
@@ -1300,7 +1426,7 @@ func file_google_rpc_error_details_proto_init() {
 				return nil
 			}
 		}
-		file_google_rpc_error_details_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
+		file_google_rpc_error_details_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
 			switch v := v.(*BadRequest_FieldViolation); i {
 			case 0:
 				return &v.state
@@ -1312,7 +1438,7 @@ func file_google_rpc_error_details_proto_init() {
 				return nil
 			}
 		}
-		file_google_rpc_error_details_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
+		file_google_rpc_error_details_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} {
 			switch v := v.(*Help_Link); i {
 			case 0:
 				return &v.state
@@ -1325,13 +1451,14 @@ func file_google_rpc_error_details_proto_init() {
 			}
 		}
 	}
+	file_google_rpc_error_details_proto_msgTypes[11].OneofWrappers = []interface{}{}
 	type x struct{}
 	out := protoimpl.TypeBuilder{
 		File: protoimpl.DescBuilder{
 			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
 			RawDescriptor: file_google_rpc_error_details_proto_rawDesc,
 			NumEnums:      0,
-			NumMessages:   15,
+			NumMessages:   16,
 			NumExtensions: 0,
 			NumServices:   0,
 		},
diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go
index 6ad1b1c1df017b943d16e81755e7146510380bcf..06a3f71063360269a92e87c8e19479730c44518e 100644
--- a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go
+++ b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go
@@ -1,4 +1,4 @@
-// Copyright 2024 Google LLC
+// Copyright 2025 Google LLC
 //
 // Licensed under the Apache License, Version 2.0 (the "License");
 // you may not use this file except in compliance with the License.
diff --git a/vendor/k8s.io/kube-openapi/pkg/handler3/handler.go b/vendor/k8s.io/kube-openapi/pkg/handler3/handler.go
index fc45634887b1c8f27017a6e71d878637595a65c8..10f0b385fa10ae86e193779306923546096d31d4 100644
--- a/vendor/k8s.io/kube-openapi/pkg/handler3/handler.go
+++ b/vendor/k8s.io/kube-openapi/pkg/handler3/handler.go
@@ -29,10 +29,10 @@ import (
 	"sync"
 	"time"
 
-	"github.com/golang/protobuf/proto"
 	openapi_v3 "github.com/google/gnostic-models/openapiv3"
 	"github.com/google/uuid"
 	"github.com/munnerz/goautoneg"
+	"google.golang.org/protobuf/proto"
 
 	"k8s.io/klog/v2"
 	"k8s.io/kube-openapi/pkg/cached"
diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/fuzz.go b/vendor/k8s.io/kube-openapi/pkg/spec3/fuzz.go
index 08b6246cebbdce6767556c219966f7efaef5f004..25e4fd09ebf3441e7f66e17a4703c16d3f07ada0 100644
--- a/vendor/k8s.io/kube-openapi/pkg/spec3/fuzz.go
+++ b/vendor/k8s.io/kube-openapi/pkg/spec3/fuzz.go
@@ -4,7 +4,7 @@ import (
 	"math/rand"
 	"strings"
 
-	fuzz "github.com/google/gofuzz"
+	"sigs.k8s.io/randfill"
 
 	"k8s.io/kube-openapi/pkg/validation/spec"
 )
@@ -25,15 +25,15 @@ func randAlphanumString() string {
 }
 
 var OpenAPIV3FuzzFuncs []interface{} = []interface{}{
-	func(s *string, c fuzz.Continue) {
+	func(s *string, c randfill.Continue) {
 		// All OpenAPI V3 map keys must follow the corresponding
 		// regex. Note that this restricts the range for all other
 		// string values as well.
 		str := randAlphanumString()
 		*s = str
 	},
-	func(o *OpenAPI, c fuzz.Continue) {
-		c.FuzzNoCustom(o)
+	func(o *OpenAPI, c randfill.Continue) {
+		c.FillNoCustom(o)
 		o.Version = "3.0.0"
 		for i, val := range o.SecurityRequirement {
 			if val == nil {
@@ -48,45 +48,45 @@ var OpenAPIV3FuzzFuncs []interface{} = []interface{}{
 		}
 
 	},
-	func(r *interface{}, c fuzz.Continue) {
+	func(r *interface{}, c randfill.Continue) {
 		switch c.Intn(3) {
 		case 0:
 			*r = nil
 		case 1:
-			n := c.RandString() + "x"
+			n := c.String(0) + "x"
 			*r = n
 		case 2:
 			n := c.Float64()
 			*r = n
 		}
 	},
-	func(v **spec.Info, c fuzz.Continue) {
+	func(v **spec.Info, c randfill.Continue) {
 		// Info is never nil
 		*v = &spec.Info{}
-		c.FuzzNoCustom(*v)
-		(*v).Title = c.RandString() + "x"
+		c.FillNoCustom(*v)
+		(*v).Title = c.String(0) + "x"
 	},
-	func(v *Paths, c fuzz.Continue) {
-		c.Fuzz(&v.VendorExtensible)
+	func(v *Paths, c randfill.Continue) {
+		c.Fill(&v.VendorExtensible)
 		num := c.Intn(5)
 		if num > 0 {
 			v.Paths = make(map[string]*Path)
 		}
 		for i := 0; i < num; i++ {
 			val := Path{}
-			c.Fuzz(&val)
-			v.Paths["/"+c.RandString()] = &val
+			c.Fill(&val)
+			v.Paths["/"+c.String(0)] = &val
 		}
 	},
-	func(v *SecurityScheme, c fuzz.Continue) {
+	func(v *SecurityScheme, c randfill.Continue) {
 		if c.Intn(refChance) == 0 {
-			c.Fuzz(&v.Refable)
+			c.Fill(&v.Refable)
 			return
 		}
 		switch c.Intn(4) {
 		case 0:
 			v.Type = "apiKey"
-			v.Name = c.RandString() + "x"
+			v.Name = c.String(0) + "x"
 			switch c.Intn(3) {
 			case 0:
 				v.In = "query"
@@ -101,17 +101,17 @@ var OpenAPIV3FuzzFuncs []interface{} = []interface{}{
 			v.Type = "oauth2"
 			v.Flows = make(map[string]*OAuthFlow)
 			flow := OAuthFlow{}
-			flow.AuthorizationUrl = c.RandString() + "x"
+			flow.AuthorizationUrl = c.String(0) + "x"
 			v.Flows["implicit"] = &flow
 			flow.Scopes = make(map[string]string)
 			flow.Scopes["foo"] = "bar"
 		case 3:
 			v.Type = "openIdConnect"
-			v.OpenIdConnectUrl = "https://" + c.RandString()
+			v.OpenIdConnectUrl = "https://" + c.String(0)
 		}
 		v.Scheme = "basic"
 	},
-	func(v *spec.Ref, c fuzz.Continue) {
+	func(v *spec.Ref, c randfill.Continue) {
 		switch c.Intn(7) {
 		case 0:
 			*v = spec.MustCreateRef("#/components/schemas/" + randAlphanumString())
@@ -127,13 +127,13 @@ var OpenAPIV3FuzzFuncs []interface{} = []interface{}{
 			*v = spec.MustCreateRef("#/components/requestBodies/" + randAlphanumString())
 		}
 	},
-	func(v *Parameter, c fuzz.Continue) {
+	func(v *Parameter, c randfill.Continue) {
 		if c.Intn(refChance) == 0 {
-			c.Fuzz(&v.Refable)
+			c.Fill(&v.Refable)
 			return
 		}
-		c.Fuzz(&v.ParameterProps)
-		c.Fuzz(&v.VendorExtensible)
+		c.Fill(&v.ParameterProps)
+		c.Fill(&v.VendorExtensible)
 
 		switch c.Intn(3) {
 		case 0:
@@ -145,44 +145,44 @@ var OpenAPIV3FuzzFuncs []interface{} = []interface{}{
 			v.In = "cookie"
 		}
 	},
-	func(v *RequestBody, c fuzz.Continue) {
+	func(v *RequestBody, c randfill.Continue) {
 		if c.Intn(refChance) == 0 {
-			c.Fuzz(&v.Refable)
+			c.Fill(&v.Refable)
 			return
 		}
-		c.Fuzz(&v.RequestBodyProps)
-		c.Fuzz(&v.VendorExtensible)
+		c.Fill(&v.RequestBodyProps)
+		c.Fill(&v.VendorExtensible)
 	},
-	func(v *Header, c fuzz.Continue) {
+	func(v *Header, c randfill.Continue) {
 		if c.Intn(refChance) == 0 {
-			c.Fuzz(&v.Refable)
+			c.Fill(&v.Refable)
 			return
 		}
-		c.Fuzz(&v.HeaderProps)
-		c.Fuzz(&v.VendorExtensible)
+		c.Fill(&v.HeaderProps)
+		c.Fill(&v.VendorExtensible)
 	},
-	func(v *ResponsesProps, c fuzz.Continue) {
-		c.Fuzz(&v.Default)
+	func(v *ResponsesProps, c randfill.Continue) {
+		c.Fill(&v.Default)
 		n := c.Intn(5)
 		for i := 0; i < n; i++ {
 			r2 := Response{}
-			c.Fuzz(&r2)
+			c.Fill(&r2)
 			// HTTP Status code in 100-599 Range
 			code := c.Intn(500) + 100
 			v.StatusCodeResponses = make(map[int]*Response)
 			v.StatusCodeResponses[code] = &r2
 		}
 	},
-	func(v *Response, c fuzz.Continue) {
+	func(v *Response, c randfill.Continue) {
 		if c.Intn(refChance) == 0 {
-			c.Fuzz(&v.Refable)
+			c.Fill(&v.Refable)
 			return
 		}
-		c.Fuzz(&v.ResponseProps)
-		c.Fuzz(&v.VendorExtensible)
+		c.Fill(&v.ResponseProps)
+		c.Fill(&v.VendorExtensible)
 	},
-	func(v *Operation, c fuzz.Continue) {
-		c.FuzzNoCustom(v)
+	func(v *Operation, c randfill.Continue) {
+		c.FillNoCustom(v)
 		// Do not fuzz null values into the array.
 		for i, val := range v.SecurityRequirement {
 			if val == nil {
@@ -196,85 +196,85 @@ var OpenAPIV3FuzzFuncs []interface{} = []interface{}{
 			}
 		}
 	},
-	func(v *spec.Extensions, c fuzz.Continue) {
+	func(v *spec.Extensions, c randfill.Continue) {
 		numChildren := c.Intn(5)
 		for i := 0; i < numChildren; i++ {
 			if *v == nil {
 				*v = spec.Extensions{}
 			}
-			(*v)["x-"+c.RandString()] = c.RandString()
+			(*v)["x-"+c.String(0)] = c.String(0)
 		}
 	},
-	func(v *spec.ExternalDocumentation, c fuzz.Continue) {
-		c.Fuzz(&v.Description)
+	func(v *spec.ExternalDocumentation, c randfill.Continue) {
+		c.Fill(&v.Description)
 		v.URL = "https://" + randAlphanumString()
 	},
-	func(v *spec.SchemaURL, c fuzz.Continue) {
+	func(v *spec.SchemaURL, c randfill.Continue) {
 		*v = spec.SchemaURL("https://" + randAlphanumString())
 	},
-	func(v *spec.SchemaOrBool, c fuzz.Continue) {
+	func(v *spec.SchemaOrBool, c randfill.Continue) {
 		*v = spec.SchemaOrBool{}
 
-		if c.RandBool() {
-			v.Allows = c.RandBool()
+		if c.Bool() {
+			v.Allows = c.Bool()
 		} else {
 			v.Schema = &spec.Schema{}
 			v.Allows = true
-			c.Fuzz(&v.Schema)
+			c.Fill(&v.Schema)
 		}
 	},
-	func(v *spec.SchemaOrArray, c fuzz.Continue) {
+	func(v *spec.SchemaOrArray, c randfill.Continue) {
 		*v = spec.SchemaOrArray{}
-		if c.RandBool() {
+		if c.Bool() {
 			schema := spec.Schema{}
-			c.Fuzz(&schema)
+			c.Fill(&schema)
 			v.Schema = &schema
 		} else {
 			v.Schemas = []spec.Schema{}
 			numChildren := c.Intn(5)
 			for i := 0; i < numChildren; i++ {
 				schema := spec.Schema{}
-				c.Fuzz(&schema)
+				c.Fill(&schema)
 				v.Schemas = append(v.Schemas, schema)
 			}
 
 		}
 
 	},
-	func(v *spec.SchemaOrStringArray, c fuzz.Continue) {
-		if c.RandBool() {
+	func(v *spec.SchemaOrStringArray, c randfill.Continue) {
+		if c.Bool() {
 			*v = spec.SchemaOrStringArray{}
-			if c.RandBool() {
-				c.Fuzz(&v.Property)
+			if c.Bool() {
+				c.Fill(&v.Property)
 			} else {
-				c.Fuzz(&v.Schema)
+				c.Fill(&v.Schema)
 			}
 		}
 	},
-	func(v *spec.Schema, c fuzz.Continue) {
+	func(v *spec.Schema, c randfill.Continue) {
 		if c.Intn(refChance) == 0 {
-			c.Fuzz(&v.Ref)
+			c.Fill(&v.Ref)
 			return
 		}
-		if c.RandBool() {
+		if c.Bool() {
 			// file schema
-			c.Fuzz(&v.Default)
-			c.Fuzz(&v.Description)
-			c.Fuzz(&v.Example)
-			c.Fuzz(&v.ExternalDocs)
+			c.Fill(&v.Default)
+			c.Fill(&v.Description)
+			c.Fill(&v.Example)
+			c.Fill(&v.ExternalDocs)
 
-			c.Fuzz(&v.Format)
-			c.Fuzz(&v.ReadOnly)
-			c.Fuzz(&v.Required)
-			c.Fuzz(&v.Title)
+			c.Fill(&v.Format)
+			c.Fill(&v.ReadOnly)
+			c.Fill(&v.Required)
+			c.Fill(&v.Title)
 			v.Type = spec.StringOrArray{"file"}
 
 		} else {
 			// normal schema
-			c.Fuzz(&v.SchemaProps)
-			c.Fuzz(&v.SwaggerSchemaProps)
-			c.Fuzz(&v.VendorExtensible)
-			c.Fuzz(&v.ExtraProps)
+			c.Fill(&v.SchemaProps)
+			c.Fill(&v.SwaggerSchemaProps)
+			c.Fill(&v.VendorExtensible)
+			c.Fill(&v.ExtraProps)
 		}
 
 	},
diff --git a/vendor/lukechampine.com/uint128/uint128.go b/vendor/lukechampine.com/uint128/uint128.go
index 04e65783ab9b4d54e62066daf5bf8fe42b524956..31b5703088a6bc37b72888c96722db25724d5832 100644
--- a/vendor/lukechampine.com/uint128/uint128.go
+++ b/vendor/lukechampine.com/uint128/uint128.go
@@ -379,6 +379,12 @@ func (u Uint128) PutBytes(b []byte) {
 	binary.LittleEndian.PutUint64(b[8:], u.Hi)
 }
 
+// PutBytesBE stores u in b in big-endian order. It panics if len(ip) < 16.
+func (u Uint128) PutBytesBE(b []byte) {
+	binary.BigEndian.PutUint64(b[:8], u.Hi)
+	binary.BigEndian.PutUint64(b[8:], u.Lo)
+}
+
 // Big returns u as a *big.Int.
 func (u Uint128) Big() *big.Int {
 	i := new(big.Int).SetUint64(u.Hi)
@@ -420,6 +426,14 @@ func FromBytes(b []byte) Uint128 {
 	)
 }
 
+// FromBytesBE converts big-endian b to a Uint128 value.
+func FromBytesBE(b []byte) Uint128 {
+	return New(
+		binary.BigEndian.Uint64(b[8:]),
+		binary.BigEndian.Uint64(b[:8]),
+	)
+}
+
 // FromBig converts i to a Uint128 value. It panics if i is negative or
 // overflows 128 bits.
 func FromBig(i *big.Int) (u Uint128) {
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 69839927a9bb4208775cedbea4ededdeb0f4d8f0..f1c4b11f0014e48e04b5bfa85a6ce588c87c0f0c 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -32,8 +32,8 @@ github.com/caarlos0/env/v11
 # github.com/cenkalti/backoff/v4 v4.3.0
 ## explicit; go 1.18
 github.com/cenkalti/backoff/v4
-# github.com/cenkalti/hub v1.0.1
-## explicit
+# github.com/cenkalti/hub v1.0.2
+## explicit; go 1.20
 github.com/cenkalti/hub
 # github.com/cenkalti/rpc2 v0.0.0-20210604223624-c1acbc6ec984
 ## explicit
@@ -89,14 +89,17 @@ github.com/cpuguy83/go-md2man/v2/md2man
 # github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc
 ## explicit
 github.com/davecgh/go-spew/spew
+# github.com/dennwc/varint v1.0.0
+## explicit; go 1.12
+github.com/dennwc/varint
 # github.com/dustin/go-humanize v1.0.1
 ## explicit; go 1.16
 github.com/dustin/go-humanize
-# github.com/emicklei/go-restful/v3 v3.12.1
+# github.com/emicklei/go-restful/v3 v3.12.2
 ## explicit; go 1.13
 github.com/emicklei/go-restful/v3
 github.com/emicklei/go-restful/v3/log
-# github.com/evanphx/json-patch/v5 v5.9.0
+# github.com/evanphx/json-patch/v5 v5.9.11
 ## explicit; go 1.18
 github.com/evanphx/json-patch/v5
 github.com/evanphx/json-patch/v5/internal/json
@@ -104,8 +107,8 @@ github.com/evanphx/json-patch/v5/internal/json
 ## explicit; go 1.17
 github.com/fsnotify/fsnotify
 github.com/fsnotify/fsnotify/internal
-# github.com/fxamacker/cbor/v2 v2.7.0
-## explicit; go 1.17
+# github.com/fxamacker/cbor/v2 v2.8.0
+## explicit; go 1.20
 github.com/fxamacker/cbor/v2
 # github.com/gavv/monotime v0.0.0-20190418164738-30dba4353424
 ## explicit
@@ -122,7 +125,7 @@ github.com/go-kit/kit/log/logrus
 ## explicit; go 1.17
 github.com/go-kit/log
 github.com/go-kit/log/level
-# github.com/go-logfmt/logfmt v0.5.1
+# github.com/go-logfmt/logfmt v0.6.0
 ## explicit; go 1.17
 github.com/go-logfmt/logfmt
 # github.com/go-logr/logr v1.4.2
@@ -132,14 +135,14 @@ github.com/go-logr/logr/funcr
 # github.com/go-logr/stdr v1.2.2
 ## explicit; go 1.16
 github.com/go-logr/stdr
-# github.com/go-openapi/jsonpointer v0.21.0
+# github.com/go-openapi/jsonpointer v0.21.1
 ## explicit; go 1.20
 github.com/go-openapi/jsonpointer
 # github.com/go-openapi/jsonreference v0.21.0
 ## explicit; go 1.20
 github.com/go-openapi/jsonreference
 github.com/go-openapi/jsonreference/internal
-# github.com/go-openapi/swag v0.23.0
+# github.com/go-openapi/swag v0.23.1
 ## explicit; go 1.20
 github.com/go-openapi/swag
 # github.com/goccy/go-json v0.10.5
@@ -163,15 +166,11 @@ github.com/gogo/protobuf/types
 # github.com/golang/protobuf v1.5.4
 ## explicit; go 1.17
 github.com/golang/protobuf/proto
-github.com/golang/protobuf/ptypes
-github.com/golang/protobuf/ptypes/any
-github.com/golang/protobuf/ptypes/duration
-github.com/golang/protobuf/ptypes/timestamp
-# github.com/golang/snappy v0.0.4
+# github.com/golang/snappy v1.0.0
 ## explicit
 github.com/golang/snappy
-# github.com/google/gnostic-models v0.6.8
-## explicit; go 1.18
+# github.com/google/gnostic-models v0.6.9
+## explicit; go 1.21
 github.com/google/gnostic-models/compiler
 github.com/google/gnostic-models/extensions
 github.com/google/gnostic-models/jsonschema
@@ -195,11 +194,15 @@ github.com/google/uuid
 ## explicit; go 1.22.0
 github.com/gopacket/gopacket
 github.com/gopacket/gopacket/layers
-# github.com/gorilla/websocket v1.5.0
+# github.com/gorilla/websocket v1.5.3
 ## explicit; go 1.12
 github.com/gorilla/websocket
-# github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.1
-## explicit; go 1.22
+# github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc
+## explicit; go 1.21
+github.com/grafana/regexp
+github.com/grafana/regexp/syntax
+# github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3
+## explicit; go 1.23.0
 github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule
 github.com/grpc-ecosystem/grpc-gateway/v2/runtime
 github.com/grpc-ecosystem/grpc-gateway/v2/utilities
@@ -239,11 +242,11 @@ github.com/klauspost/compress/zstd/internal/xxhash
 # github.com/klauspost/cpuid/v2 v2.2.10
 ## explicit; go 1.22
 github.com/klauspost/cpuid/v2
-# github.com/libp2p/go-reuseport v0.3.0
-## explicit; go 1.19
+# github.com/libp2p/go-reuseport v0.4.0
+## explicit; go 1.20
 github.com/libp2p/go-reuseport
-# github.com/mailru/easyjson v0.7.7
-## explicit; go 1.12
+# github.com/mailru/easyjson v0.9.0
+## explicit; go 1.20
 github.com/mailru/easyjson/buffer
 github.com/mailru/easyjson/jlexer
 github.com/mailru/easyjson/jwriter
@@ -259,7 +262,7 @@ github.com/minio/crc64nvme
 # github.com/minio/md5-simd v1.1.2
 ## explicit; go 1.14
 github.com/minio/md5-simd
-# github.com/minio/minio-go/v7 v7.0.89
+# github.com/minio/minio-go/v7 v7.0.90
 ## explicit; go 1.23.0
 github.com/minio/minio-go/v7
 github.com/minio/minio-go/v7/pkg/cors
@@ -295,7 +298,7 @@ github.com/mwitkow/go-conntrack
 # github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f
 ## explicit
 github.com/mxk/go-flowrate/flowrate
-# github.com/netobserv/flowlogs-pipeline v1.7.0-community.0.20250407055901-c9a2490f9235
+# github.com/netobserv/flowlogs-pipeline v1.7.0-community.0.20250407055901-c9a2490f9235 => github.com/jotak/flowlogs-pipeline v0.0.0-20250425114852-1cb601f3ba70
 ## explicit; go 1.23.0
 github.com/netobserv/flowlogs-pipeline/pkg/api
 github.com/netobserv/flowlogs-pipeline/pkg/config
@@ -331,8 +334,8 @@ github.com/netobserv/flowlogs-pipeline/pkg/utils/filters
 ## explicit; go 1.18
 github.com/netobserv/gopipes/pkg/node
 github.com/netobserv/gopipes/pkg/node/internal/connect
-# github.com/netobserv/loki-client-go v0.0.0-20220927092034-f37122a54500
-## explicit; go 1.15
+# github.com/netobserv/loki-client-go v0.0.0-20250425113517-526b43e51847
+## explicit; go 1.23.0
 github.com/netobserv/loki-client-go/loki
 github.com/netobserv/loki-client-go/pkg/backoff
 github.com/netobserv/loki-client-go/pkg/helpers
@@ -409,8 +412,8 @@ github.com/pion/dtls/v2/pkg/protocol/alert
 github.com/pion/dtls/v2/pkg/protocol/extension
 github.com/pion/dtls/v2/pkg/protocol/handshake
 github.com/pion/dtls/v2/pkg/protocol/recordlayer
-# github.com/pion/logging v0.2.2
-## explicit; go 1.12
+# github.com/pion/logging v0.2.3
+## explicit; go 1.20
 github.com/pion/logging
 # github.com/pion/transport/v2 v2.2.10
 ## explicit; go 1.12
@@ -425,39 +428,45 @@ github.com/pkg/errors
 # github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2
 ## explicit
 github.com/pmezard/go-difflib/difflib
-# github.com/prometheus/client_golang v1.21.1
-## explicit; go 1.21
+# github.com/prometheus/client_golang v1.22.0
+## explicit; go 1.22
 github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil
 github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header
 github.com/prometheus/client_golang/prometheus
 github.com/prometheus/client_golang/prometheus/collectors
 github.com/prometheus/client_golang/prometheus/internal
 github.com/prometheus/client_golang/prometheus/promhttp
-# github.com/prometheus/client_model v0.6.1
-## explicit; go 1.19
+github.com/prometheus/client_golang/prometheus/promhttp/internal
+# github.com/prometheus/client_model v0.6.2
+## explicit; go 1.22.0
 github.com/prometheus/client_model/go
-# github.com/prometheus/common v0.62.0
+# github.com/prometheus/common v0.63.0
 ## explicit; go 1.21
 github.com/prometheus/common/config
 github.com/prometheus/common/expfmt
 github.com/prometheus/common/model
 github.com/prometheus/common/version
-# github.com/prometheus/procfs v0.15.1
-## explicit; go 1.20
+# github.com/prometheus/procfs v0.16.0
+## explicit; go 1.21
 github.com/prometheus/procfs
 github.com/prometheus/procfs/internal/fs
 github.com/prometheus/procfs/internal/util
-# github.com/prometheus/prometheus v1.8.2-0.20201028100903-3245b3267b24
-## explicit; go 1.14
-github.com/prometheus/prometheus/pkg/labels
-github.com/prometheus/prometheus/pkg/value
+# github.com/prometheus/prometheus v0.303.0
+## explicit; go 1.23.0
+github.com/prometheus/prometheus/model/exemplar
+github.com/prometheus/prometheus/model/histogram
+github.com/prometheus/prometheus/model/labels
+github.com/prometheus/prometheus/model/metadata
+github.com/prometheus/prometheus/model/timestamp
+github.com/prometheus/prometheus/model/value
 github.com/prometheus/prometheus/promql/parser
+github.com/prometheus/prometheus/promql/parser/posrange
 github.com/prometheus/prometheus/storage
 github.com/prometheus/prometheus/tsdb/chunkenc
 github.com/prometheus/prometheus/tsdb/chunks
 github.com/prometheus/prometheus/tsdb/errors
 github.com/prometheus/prometheus/tsdb/fileutil
-github.com/prometheus/prometheus/tsdb/tsdbutil
+github.com/prometheus/prometheus/util/annotations
 github.com/prometheus/prometheus/util/strutil
 # github.com/rs/xid v1.6.0
 ## explicit; go 1.16
@@ -465,7 +474,7 @@ github.com/rs/xid
 # github.com/russross/blackfriday/v2 v2.1.0
 ## explicit
 github.com/russross/blackfriday/v2
-# github.com/safchain/ethtool v0.3.1-0.20231027162144-83e5e0097c91
+# github.com/safchain/ethtool v0.5.10
 ## explicit; go 1.16
 github.com/safchain/ethtool
 # github.com/segmentio/kafka-go v0.4.47
@@ -539,17 +548,17 @@ github.com/stretchr/testify/assert
 github.com/stretchr/testify/assert/yaml
 github.com/stretchr/testify/mock
 github.com/stretchr/testify/require
-# github.com/urfave/cli/v2 v2.27.2
+# github.com/urfave/cli/v2 v2.27.6
 ## explicit; go 1.18
 github.com/urfave/cli/v2
-# github.com/vishvananda/netlink v1.3.0
+# github.com/vishvananda/netlink v1.3.1-0.20250206174618-62fb240731fa
 ## explicit; go 1.12
 github.com/vishvananda/netlink
 github.com/vishvananda/netlink/nl
 # github.com/vishvananda/netns v0.0.5
 ## explicit; go 1.17
 github.com/vishvananda/netns
-# github.com/vladimirvivien/gexe v0.4.1
+# github.com/vladimirvivien/gexe v0.5.0
 ## explicit; go 1.23
 github.com/vladimirvivien/gexe
 github.com/vladimirvivien/gexe/exec
@@ -559,7 +568,7 @@ github.com/vladimirvivien/gexe/net
 github.com/vladimirvivien/gexe/prog
 github.com/vladimirvivien/gexe/str
 github.com/vladimirvivien/gexe/vars
-# github.com/vmware/go-ipfix v0.13.0
+# github.com/vmware/go-ipfix v0.14.0
 ## explicit; go 1.23.0
 github.com/vmware/go-ipfix/pkg/collector
 github.com/vmware/go-ipfix/pkg/entities
@@ -577,8 +586,8 @@ github.com/xdg-go/scram
 # github.com/xdg-go/stringprep v1.0.4
 ## explicit; go 1.11
 github.com/xdg-go/stringprep
-# github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913
-## explicit; go 1.15.0
+# github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1
+## explicit; go 1.15
 github.com/xrash/smetrics
 # go.opentelemetry.io/auto/sdk v1.1.0
 ## explicit; go 1.22.0
@@ -617,14 +626,14 @@ go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/trans
 ## explicit; go 1.22.0
 go.opentelemetry.io/otel/exporters/otlp/otlptrace
 go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform
-# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0
+# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.35.0
 ## explicit; go 1.22.0
 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc
 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal
 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig
 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig
 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry
-# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.34.0
+# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.35.0
 ## explicit; go 1.22.0
 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp
 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal
@@ -668,17 +677,17 @@ go.opentelemetry.io/proto/otlp/logs/v1
 go.opentelemetry.io/proto/otlp/metrics/v1
 go.opentelemetry.io/proto/otlp/resource/v1
 go.opentelemetry.io/proto/otlp/trace/v1
-# go.uber.org/atomic v1.9.0
-## explicit; go 1.13
+# go.uber.org/atomic v1.11.0
+## explicit; go 1.18
 go.uber.org/atomic
-# golang.org/x/crypto v0.36.0
+# golang.org/x/crypto v0.37.0
 ## explicit; go 1.23.0
 golang.org/x/crypto/argon2
 golang.org/x/crypto/blake2b
 golang.org/x/crypto/cryptobyte
 golang.org/x/crypto/cryptobyte/asn1
 golang.org/x/crypto/curve25519
-# golang.org/x/net v0.37.0
+# golang.org/x/net v0.39.0
 ## explicit; go 1.23.0
 golang.org/x/net/bpf
 golang.org/x/net/context
@@ -700,8 +709,8 @@ golang.org/x/net/proxy
 golang.org/x/net/publicsuffix
 golang.org/x/net/trace
 golang.org/x/net/websocket
-# golang.org/x/oauth2 v0.26.0
-## explicit; go 1.18
+# golang.org/x/oauth2 v0.29.0
+## explicit; go 1.23.0
 golang.org/x/oauth2
 golang.org/x/oauth2/clientcredentials
 golang.org/x/oauth2/internal
@@ -712,23 +721,23 @@ golang.org/x/sys/plan9
 golang.org/x/sys/unix
 golang.org/x/sys/windows
 golang.org/x/sys/windows/registry
-# golang.org/x/term v0.30.0
+# golang.org/x/term v0.31.0
 ## explicit; go 1.23.0
 golang.org/x/term
-# golang.org/x/text v0.23.0
+# golang.org/x/text v0.24.0
 ## explicit; go 1.23.0
 golang.org/x/text/secure/bidirule
 golang.org/x/text/transform
 golang.org/x/text/unicode/bidi
 golang.org/x/text/unicode/norm
-# golang.org/x/time v0.7.0
-## explicit; go 1.18
+# golang.org/x/time v0.11.0
+## explicit; go 1.23.0
 golang.org/x/time/rate
-# google.golang.org/genproto/googleapis/api v0.0.0-20250218202821-56aae31c358a
-## explicit; go 1.22
+# google.golang.org/genproto/googleapis/api v0.0.0-20250421163800-61c742ae3ef0
+## explicit; go 1.23.0
 google.golang.org/genproto/googleapis/api/httpbody
-# google.golang.org/genproto/googleapis/rpc v0.0.0-20250218202821-56aae31c358a
-## explicit; go 1.22
+# google.golang.org/genproto/googleapis/rpc v0.0.0-20250414145226-207652e42e2e
+## explicit; go 1.23.0
 google.golang.org/genproto/googleapis/rpc/errdetails
 google.golang.org/genproto/googleapis/rpc/status
 # google.golang.org/grpc v1.71.1
@@ -1260,7 +1269,7 @@ k8s.io/client-go/util/homedir
 k8s.io/client-go/util/keyutil
 k8s.io/client-go/util/watchlist
 k8s.io/client-go/util/workqueue
-# k8s.io/component-base v0.32.1
+# k8s.io/component-base v0.32.3
 ## explicit; go 1.23.0
 k8s.io/component-base/cli/flag
 k8s.io/component-base/featuregate
@@ -1280,8 +1289,8 @@ k8s.io/klog/v2/internal/severity
 k8s.io/klog/v2/internal/sloghandler
 k8s.io/klog/v2/internal/verbosity
 k8s.io/klog/v2/textlogger
-# k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f
-## explicit; go 1.20
+# k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff
+## explicit; go 1.21
 k8s.io/kube-openapi/pkg/cached
 k8s.io/kube-openapi/pkg/common
 k8s.io/kube-openapi/pkg/handler3
@@ -1302,10 +1311,10 @@ k8s.io/utils/net
 k8s.io/utils/pointer
 k8s.io/utils/ptr
 k8s.io/utils/trace
-# lukechampine.com/uint128 v1.2.0
+# lukechampine.com/uint128 v1.3.0
 ## explicit; go 1.12
 lukechampine.com/uint128
-# sigs.k8s.io/controller-runtime v0.20.0
+# sigs.k8s.io/controller-runtime v0.20.4
 ## explicit; go 1.23.0
 sigs.k8s.io/controller-runtime/pkg/client
 sigs.k8s.io/controller-runtime/pkg/client/apiutil
@@ -1332,11 +1341,15 @@ sigs.k8s.io/e2e-framework/pkg/utils
 sigs.k8s.io/e2e-framework/support
 sigs.k8s.io/e2e-framework/support/kind
 sigs.k8s.io/e2e-framework/third_party/kind
-# sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3
-## explicit; go 1.21
+# sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8
+## explicit; go 1.23
 sigs.k8s.io/json
 sigs.k8s.io/json/internal/golang/encoding/json
-# sigs.k8s.io/structured-merge-diff/v4 v4.4.2
+# sigs.k8s.io/randfill v1.0.0
+## explicit; go 1.18
+sigs.k8s.io/randfill
+sigs.k8s.io/randfill/bytesource
+# sigs.k8s.io/structured-merge-diff/v4 v4.7.0
 ## explicit; go 1.13
 sigs.k8s.io/structured-merge-diff/v4/fieldpath
 sigs.k8s.io/structured-merge-diff/v4/merge
@@ -1347,3 +1360,4 @@ sigs.k8s.io/structured-merge-diff/v4/value
 ## explicit; go 1.12
 sigs.k8s.io/yaml
 sigs.k8s.io/yaml/goyaml.v2
+# github.com/netobserv/flowlogs-pipeline => github.com/jotak/flowlogs-pipeline v0.0.0-20250425114852-1cb601f3ba70
diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/restmapper.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/restmapper.go
index ad898617fa9d077ae9bce26ce0b237241cfa05e9..7a7a0d114551eadd24641370009bb2419f08b9c7 100644
--- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/restmapper.go
+++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/restmapper.go
@@ -246,10 +246,18 @@ func (m *mapper) addGroupVersionResourcesToCacheAndReloadLocked(gvr map[schema.G
 		}
 
 		if !found {
-			groupResources.Group.Versions = append(groupResources.Group.Versions, metav1.GroupVersionForDiscovery{
+			gv := metav1.GroupVersionForDiscovery{
 				GroupVersion: metav1.GroupVersion{Group: groupVersion.Group, Version: version}.String(),
 				Version:      version,
-			})
+			}
+
+			// Prepend if preferred version, else append. The upstream DiscoveryRestMappper assumes
+			// the first version is the preferred one: https://github.com/kubernetes/kubernetes/blob/ef54ac803b712137871c1a1f8d635d50e69ffa6c/staging/src/k8s.io/apimachinery/pkg/api/meta/restmapper.go#L458-L461
+			if group, ok := m.apiGroups[groupVersion.Group]; ok && group.PreferredVersion.Version == version {
+				groupResources.Group.Versions = append([]metav1.GroupVersionForDiscovery{gv}, groupResources.Group.Versions...)
+			} else {
+				groupResources.Group.Versions = append(groupResources.Group.Versions, gv)
+			}
 		}
 
 		// Update data in the cache.
@@ -284,14 +292,14 @@ func (m *mapper) findAPIGroupByNameAndMaybeAggregatedDiscoveryLocked(groupName s
 	}
 
 	m.initialDiscoveryDone = true
-	if len(maybeResources) > 0 {
-		didAggregatedDiscovery = true
-		m.addGroupVersionResourcesToCacheAndReloadLocked(maybeResources)
-	}
 	for i := range apiGroups.Groups {
 		group := &apiGroups.Groups[i]
 		m.apiGroups[group.Name] = group
 	}
+	if len(maybeResources) > 0 {
+		didAggregatedDiscovery = true
+		m.addGroupVersionResourcesToCacheAndReloadLocked(maybeResources)
+	}
 
 	// Looking in the cache again.
 	// Don't return an error here if the API group is not present.
diff --git a/vendor/sigs.k8s.io/randfill/CONTRIBUTING.md b/vendor/sigs.k8s.io/randfill/CONTRIBUTING.md
new file mode 100644
index 0000000000000000000000000000000000000000..7566c879ce95385495046a90d515037851504873
--- /dev/null
+++ b/vendor/sigs.k8s.io/randfill/CONTRIBUTING.md
@@ -0,0 +1,43 @@
+# Contributing Guidelines
+
+Welcome to Kubernetes. We are excited about the prospect of you joining our [community](https://git.k8s.io/community)! The Kubernetes community abides by the CNCF [code of conduct](code-of-conduct.md). Here is an excerpt:
+
+_As contributors and maintainers of this project, and in the interest of fostering an open and welcoming community, we pledge to respect all people who contribute through reporting issues, posting feature requests, updating documentation, submitting pull requests or patches, and other activities._
+
+## Getting Started
+
+We have full documentation on how to get started contributing here:
+
+<!---
+If your repo has certain guidelines for contribution, put them here ahead of the general k8s resources
+-->
+
+- [Contributor License Agreement](https://git.k8s.io/community/CLA.md) Kubernetes projects require that you sign a Contributor License Agreement (CLA) before we can accept your pull requests
+- [Kubernetes Contributor Guide](https://git.k8s.io/community/contributors/guide) - Main contributor documentation, or you can just jump directly to the [contributing section](https://git.k8s.io/community/contributors/guide#contributing)
+- [Contributor Cheat Sheet](https://git.k8s.io/community/contributors/guide/contributor-cheatsheet) - Common resources for existing developers
+
+## Mentorship
+
+- [Mentoring Initiatives](https://git.k8s.io/community/mentoring) - We have a diverse set of mentorship programs available that are always looking for volunteers!
+
+<!---
+Custom Information - if you're copying this template for the first time you can add custom content here, for example:
+
+## Contact Information
+
+- [Slack channel](https://kubernetes.slack.com/messages/kubernetes-users) - Replace `kubernetes-users` with your slack channel string, this will send users directly to your channel. 
+- [Mailing list](URL)
+
+-->
+
+## Project Management
+
+The [maintainers](https://github.com/kubernetes-sigs/randfill/blob/main/OWNERS_ALIASES#L12) of this project (and often others who have official positions on the [contributor ladder](https://github.com/kubernetes-sigs/randfill/blob/main/OWNERS_ALIASES)) are responsible for performing project management which oversees development and maintenance of the API, tests, tools, e.t.c. While we try to be generally flexible when it comes to the management of individual pieces (such as Issues or PRs), we have some rules and guidelines which help us plan, coordinate and reduce waste. In this section you'll find some rules/guidelines for contributors related to project management which may extend or go beyond what you would find in the standard [Kubernetes Contributor Guide](https://git.k8s.io/community/contributors/guide).
+
+### Bumping stale and closed Issues & PRs
+
+Maintainers are ultimately responsible for triaging new issues and PRs, accepting or declining them, deciding priority and fitting them into milestones intended for future releases. Bots are responsible for marking issues and PRs which stagnate as stale, or closing them if progress does not continue for a long period of time. Due to the nature of this community-driven development effort (we do not have dedicated engineering resources, we rely on the community which is effectively "volunteer time") **not all issues can be accepted, prioritized or completed**.
+
+You may find times when an issue you're subscribed to and interested in seems to stagnate, or perhaps gets auto-closed. Prior to bumping or directly re-opening issues yourself, we generally ask that you bring these up for discussion on the agenda for one of our community syncs if possible, or bring them up for discussion in Slack or the mailing list as this gives us a better opportunity to discuss the issue and determine viability and logistics. If feasible we **highly recommend being ready to contribute directly** to any stale or unprioritized effort that you want to see move forward, as **the best way to ensure progress is to engage with the community and personally invest time**.
+
+We (the community) aren't opposed to making exceptions in some cases, but when in doubt please follow the above guidelines before bumping closed or stale issues if you're not ready to personally invest time in them. We are responsible for managing these and without further context or engagement we may set these back to how they were previously organized.
diff --git a/vendor/sigs.k8s.io/randfill/LICENSE b/vendor/sigs.k8s.io/randfill/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..9dd29274c3d4d94c4a44726f15064d4ae600d26a
--- /dev/null
+++ b/vendor/sigs.k8s.io/randfill/LICENSE
@@ -0,0 +1,202 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "{}"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright 2014 The gofuzz Authors
+   Copyright 2025 The Kubernetes Authors
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/sigs.k8s.io/randfill/NOTICE b/vendor/sigs.k8s.io/randfill/NOTICE
new file mode 100644
index 0000000000000000000000000000000000000000..6984e71f654ef86f1b12b5f4b298cad16ddb22e3
--- /dev/null
+++ b/vendor/sigs.k8s.io/randfill/NOTICE
@@ -0,0 +1,24 @@
+When donating the randfill project to the CNCF, we could not reach all the
+gofuzz contributors to sign the CNCF CLA. As such, according to the CNCF rules
+to donate a repository, we must add a NOTICE referencing section 7 of the CLA
+with a list of developers who could not be reached.
+
+`7. Should You wish to submit work that is not Your original creation, You may
+submit it to the Foundation separately from any Contribution, identifying the
+complete details of its source and of any license or other restriction
+(including, but not limited to, related patents, trademarks, and license
+agreements) of which you are personally aware, and conspicuously marking the
+work as "Submitted on behalf of a third-party: [named here]".`
+
+Submitted on behalf of a third-party: @dnephin (Daniel Nephin)
+Submitted on behalf of a third-party: @AlekSi (Alexey Palazhchenko)
+Submitted on behalf of a third-party: @bbigras (Bruno Bigras)
+Submitted on behalf of a third-party: @samirkut (Samir)
+Submitted on behalf of a third-party: @posener (Eyal Posener)
+Submitted on behalf of a third-party: @Ashikpaul (Ashik Paul)
+Submitted on behalf of a third-party: @kwongtailau (Kwongtai)
+Submitted on behalf of a third-party: @ericcornelissen (Eric Cornelissen)
+Submitted on behalf of a third-party: @eclipseo (Robert-André Mauchin)
+Submitted on behalf of a third-party: @yanzhoupan (Andrew Pan)
+Submitted on behalf of a third-party: @STRRL (Zhiqiang ZHOU)
+Submitted on behalf of a third-party: @disconnect3d (Disconnect3d)
diff --git a/vendor/sigs.k8s.io/randfill/OWNERS b/vendor/sigs.k8s.io/randfill/OWNERS
new file mode 100644
index 0000000000000000000000000000000000000000..59f6a50f6b6f90e7cad210699d1cc8db93f0f6cf
--- /dev/null
+++ b/vendor/sigs.k8s.io/randfill/OWNERS
@@ -0,0 +1,8 @@
+# See the OWNERS docs at https://go.k8s.io/owners
+# See the OWNERS_ALIASES file at https://github.com/kubernetes-sigs/randfill/blob/main/OWNERS_ALIASES for a list of members for each alias.
+
+approvers:
+  - sig-testing-leads
+  - thockin
+
+reviewers: []
diff --git a/vendor/sigs.k8s.io/randfill/OWNERS_ALIASES b/vendor/sigs.k8s.io/randfill/OWNERS_ALIASES
new file mode 100644
index 0000000000000000000000000000000000000000..927f1209b1d18cd749849fc3e422c7652ff02d3e
--- /dev/null
+++ b/vendor/sigs.k8s.io/randfill/OWNERS_ALIASES
@@ -0,0 +1,14 @@
+# See the OWNERS docs: https://git.k8s.io/community/contributors/guide/owners.md
+# This file should be kept in sync with k/org.
+
+aliases:
+  # Reference: https://github.com/kubernetes/org/blob/main/OWNERS_ALIASES
+  sig-testing-leads:
+    - BenTheElder
+    - alvaroaleman
+    - aojea
+    - cjwagner
+    - jbpratt
+    - michelle192837
+    - pohly
+    - xmcqueen
diff --git a/vendor/sigs.k8s.io/randfill/README.md b/vendor/sigs.k8s.io/randfill/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..d892fc9f5d5ee42d15cd05fd941653fb6a84ef16
--- /dev/null
+++ b/vendor/sigs.k8s.io/randfill/README.md
@@ -0,0 +1,98 @@
+randfill
+======
+
+randfill is a library for populating go objects with random values.
+
+This is a fork of github.com/google/gofuzz, which was archived.
+
+NOTE: This repo is supported only for use within Kubernetes.  It is not our
+intention to support general use.  That said, if it works for you, that's
+great!  If you have a problem, please feel free to file an issue, but be aware
+that it may not be a priority for us to fix it unless it is affecting
+Kubernetes.  PRs are welcome, within reason.
+
+[![GoDoc](https://godoc.org/sigs.k8s.io/randfill?status.svg)](https://godoc.org/sigs.k8s.io/randfill)
+
+This is useful for testing:
+
+* Do your project's objects really serialize/unserialize correctly in all cases?
+* Is there an incorrectly formatted object that will cause your project to panic?
+
+Import with ```import "sigs.k8s.io/randfill"```
+
+You can use it on single variables:
+```go
+f := randfill.New()
+var myInt int
+f.Fill(&myInt) // myInt gets a random value.
+```
+
+You can use it on maps:
+```go
+f := randfill.New().NilChance(0).NumElements(1, 1)
+var myMap map[ComplexKeyType]string
+f.Fill(&myMap) // myMap will have exactly one element.
+```
+
+Customize the chance of getting a nil pointer:
+```go
+f := randfill.New().NilChance(.5)
+var fancyStruct struct {
+  A, B, C, D *string
+}
+f.Fill(&fancyStruct) // About half the pointers should be set.
+```
+
+You can even customize the randomization completely if needed:
+```go
+type MyEnum string
+const (
+        A MyEnum = "A"
+        B MyEnum = "B"
+)
+type MyInfo struct {
+        Type MyEnum
+        AInfo *string
+        BInfo *string
+}
+
+f := randfill.New().NilChance(0).Funcs(
+        func(e *MyInfo, c randfill.Continue) {
+                switch c.Intn(2) {
+                case 0:
+                        e.Type = A
+                        c.Fill(&e.AInfo)
+                case 1:
+                        e.Type = B
+                        c.Fill(&e.BInfo)
+                }
+        },
+)
+
+var myObject MyInfo
+f.Fill(&myObject) // Type will correspond to whether A or B info is set.
+```
+
+See more examples in ```example_test.go```.
+
+## dvyukov/go-fuzz integration
+
+You can use this library for easier [go-fuzz](https://github.com/dvyukov/go-fuzz)ing.
+go-fuzz provides the user a byte-slice, which should be converted to different inputs
+for the tested function. This library can help convert the byte slice. Consider for
+example a fuzz test for a the function `mypackage.MyFunc` that takes an int arguments:
+```go
+// +build gofuzz
+package mypackage
+
+import "sigs.k8s.io/randfill"
+
+func Fuzz(data []byte) int {
+        var i int
+        randfill.NewFromGoFuzz(data).Fill(&i)
+        MyFunc(i)
+        return 0
+}
+```
+
+Happy testing!
diff --git a/vendor/sigs.k8s.io/randfill/SECURITY_CONTACTS b/vendor/sigs.k8s.io/randfill/SECURITY_CONTACTS
new file mode 100644
index 0000000000000000000000000000000000000000..91d78533783084a0ac0d827a30f8fbfd8ba6c8f7
--- /dev/null
+++ b/vendor/sigs.k8s.io/randfill/SECURITY_CONTACTS
@@ -0,0 +1,16 @@
+# Defined below are the security contacts for this repo.
+#
+# They are the contact point for the Product Security Committee to reach out
+# to for triaging and handling of incoming issues.
+#
+# The below names agree to abide by the
+# [Embargo Policy](https://git.k8s.io/security/private-distributors-list.md#embargo-policy)
+# and will be removed and replaced if they violate that agreement.
+#
+# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE
+# INSTRUCTIONS AT https://kubernetes.io/security/
+
+thockin
+BenTheElder
+aojea
+pohly
diff --git a/vendor/sigs.k8s.io/randfill/bytesource/bytesource.go b/vendor/sigs.k8s.io/randfill/bytesource/bytesource.go
new file mode 100644
index 0000000000000000000000000000000000000000..5bb365949691eb734286d4b8b3e2ff78acaf5d86
--- /dev/null
+++ b/vendor/sigs.k8s.io/randfill/bytesource/bytesource.go
@@ -0,0 +1,81 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package bytesource provides a rand.Source64 that is determined by a slice of bytes.
+package bytesource
+
+import (
+	"bytes"
+	"encoding/binary"
+	"io"
+	"math/rand"
+)
+
+// ByteSource implements rand.Source64 determined by a slice of bytes. The random numbers are
+// generated from each 8 bytes in the slice, until the last bytes are consumed, from which a
+// fallback pseudo random source is created in case more random numbers are required.
+// It also exposes a `bytes.Reader` API, which lets callers consume the bytes directly.
+type ByteSource struct {
+	*bytes.Reader
+	fallback rand.Source
+}
+
+// New returns a new ByteSource from a given slice of bytes.
+func New(input []byte) *ByteSource {
+	s := &ByteSource{
+		Reader:   bytes.NewReader(input),
+		fallback: rand.NewSource(0),
+	}
+	if len(input) > 0 {
+		s.fallback = rand.NewSource(int64(s.consumeUint64()))
+	}
+	return s
+}
+
+func (s *ByteSource) Uint64() uint64 {
+	// Return from input if it was not exhausted.
+	if s.Len() > 0 {
+		return s.consumeUint64()
+	}
+
+	// Input was exhausted, return random number from fallback (in this case fallback should not be
+	// nil). Try first having a Uint64 output (Should work in current rand implementation),
+	// otherwise return a conversion of Int63.
+	if s64, ok := s.fallback.(rand.Source64); ok {
+		return s64.Uint64()
+	}
+	return uint64(s.fallback.Int63())
+}
+
+func (s *ByteSource) Int63() int64 {
+	return int64(s.Uint64() >> 1)
+}
+
+func (s *ByteSource) Seed(seed int64) {
+	s.fallback = rand.NewSource(seed)
+	s.Reader = bytes.NewReader(nil)
+}
+
+// consumeUint64 reads 8 bytes from the input and convert them to a uint64. It assumes that the the
+// bytes reader is not empty.
+func (s *ByteSource) consumeUint64() uint64 {
+	var bytes [8]byte
+	_, err := s.Read(bytes[:])
+	if err != nil && err != io.EOF {
+		panic("failed reading source") // Should not happen.
+	}
+	return binary.BigEndian.Uint64(bytes[:])
+}
diff --git a/vendor/sigs.k8s.io/randfill/code-of-conduct.md b/vendor/sigs.k8s.io/randfill/code-of-conduct.md
new file mode 100644
index 0000000000000000000000000000000000000000..0d15c00cf32529a51f1db0697ab8b0b0669fdc13
--- /dev/null
+++ b/vendor/sigs.k8s.io/randfill/code-of-conduct.md
@@ -0,0 +1,3 @@
+# Kubernetes Community Code of Conduct
+
+Please refer to our [Kubernetes Community Code of Conduct](https://git.k8s.io/community/code-of-conduct.md)
diff --git a/vendor/sigs.k8s.io/randfill/randfill.go b/vendor/sigs.k8s.io/randfill/randfill.go
new file mode 100644
index 0000000000000000000000000000000000000000..b73482484409ba7b5af0b495ac26ca272637836e
--- /dev/null
+++ b/vendor/sigs.k8s.io/randfill/randfill.go
@@ -0,0 +1,682 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+Copyright 2014 The gofuzz Authors.
+Copyright 2025 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package randfill is a library for populating go objects with random values.
+package randfill
+
+import (
+	"fmt"
+	"math/rand"
+	"reflect"
+	"regexp"
+	"sync"
+	"time"
+	"unsafe"
+
+	"strings"
+
+	"sigs.k8s.io/randfill/bytesource"
+)
+
+// funcMap is a map from a type to a function that randfills that type.  The
+// function is a reflect.Value because the type being filled is different for
+// each func.
+type funcMap map[reflect.Type]reflect.Value
+
+// Filler knows how to fill any object with random fields.
+type Filler struct {
+	customFuncs           funcMap
+	defaultFuncs          funcMap
+	r                     *rand.Rand
+	nilChance             float64
+	minElements           int
+	maxElements           int
+	maxDepth              int
+	allowUnexportedFields bool
+	skipFieldPatterns     []*regexp.Regexp
+
+	lock sync.Mutex
+}
+
+// New returns a new Filler. Customize your Filler further by calling Funcs,
+// RandSource, NilChance, or NumElements in any order.
+func New() *Filler {
+	return NewWithSeed(time.Now().UnixNano())
+}
+
+func NewWithSeed(seed int64) *Filler {
+	f := &Filler{
+		defaultFuncs: funcMap{
+			reflect.TypeOf(&time.Time{}): reflect.ValueOf(randfillTime),
+		},
+
+		customFuncs:           funcMap{},
+		r:                     rand.New(rand.NewSource(seed)),
+		nilChance:             .2,
+		minElements:           1,
+		maxElements:           10,
+		maxDepth:              100,
+		allowUnexportedFields: false,
+	}
+	return f
+}
+
+// NewFromGoFuzz is a helper function that enables using randfill (this
+// project) with go-fuzz (https://github.com/dvyukov/go-fuzz) for continuous
+// fuzzing. Essentially, it enables translating the fuzzing bytes from
+// go-fuzz to any Go object using this library.
+//
+// This implementation promises a constant translation from a given slice of
+// bytes to the fuzzed objects. This promise will remain over future
+// versions of Go and of this library.
+//
+// Note: the returned Filler should not be shared between multiple goroutines,
+// as its deterministic output will no longer be available.
+//
+// Example: use go-fuzz to test the function `MyFunc(int)` in the package
+// `mypackage`. Add the file: "mypackage_fuzz.go" with the content:
+//
+// // +build gofuzz
+// package mypackage
+// import "sigs.k8s.io/randfill"
+//
+//	func Fuzz(data []byte) int {
+//		var i int
+//		randfill.NewFromGoFuzz(data).Fill(&i)
+//		MyFunc(i)
+//		return 0
+//	}
+func NewFromGoFuzz(data []byte) *Filler {
+	return New().RandSource(bytesource.New(data))
+}
+
+// Funcs registers custom fill functions for this Filler.
+//
+// Each entry in customFuncs must be a function taking two parameters.
+// The first parameter must be a pointer or map. It is the variable that
+// function will fill with random data. The second parameter must be a
+// randfill.Continue, which will provide a source of randomness and a way
+// to automatically continue filling smaller pieces of the first parameter.
+//
+// These functions are called sensibly, e.g., if you wanted custom string
+// filling, the function `func(s *string, c randfill.Continue)` would get
+// called and passed the address of strings. Maps and pointers will always
+// be made/new'd for you, ignoring the NilChance option. For slices, it
+// doesn't make much sense to pre-create them--Filler doesn't know how
+// long you want your slice--so take a pointer to a slice, and make it
+// yourself. (If you don't want your map/pointer type pre-made, take a
+// pointer to it, and make it yourself.) See the examples for a range of
+// custom functions.
+//
+// If a function is already registered for a type, and a new function is
+// provided, the previous function will be replaced with the new one.
+func (f *Filler) Funcs(customFuncs ...interface{}) *Filler {
+	for i := range customFuncs {
+		v := reflect.ValueOf(customFuncs[i])
+		if v.Kind() != reflect.Func {
+			panic("Filler.Funcs: all arguments must be functions")
+		}
+		t := v.Type()
+		if t.NumIn() != 2 || t.NumOut() != 0 {
+			panic("Filler.Funcs: all customFuncs must have 2 arguments and 0 returns")
+		}
+		argT := t.In(0)
+		switch argT.Kind() {
+		case reflect.Ptr, reflect.Map:
+		default:
+			panic("Filler.Funcs: customFuncs' first argument must be a pointer or map type")
+		}
+		if t.In(1) != reflect.TypeOf(Continue{}) {
+			panic("Filler.Funcs: customFuncs' second argument must be a randfill.Continue")
+		}
+		f.customFuncs[argT] = v
+	}
+	return f
+}
+
+// RandSource causes this Filler to get values from the given source of
+// randomness. Use this if you want deterministic filling.
+func (f *Filler) RandSource(s rand.Source) *Filler {
+	f.r = rand.New(s)
+	return f
+}
+
+// NilChance sets the probability of creating a nil pointer, map, or slice to
+// 'p'. 'p' should be between 0 (no nils) and 1 (all nils), inclusive.
+func (f *Filler) NilChance(p float64) *Filler {
+	if p < 0 || p > 1 {
+		panic("Filler.NilChance: p must be between 0 and 1, inclusive")
+	}
+	f.nilChance = p
+	return f
+}
+
+// NumElements sets the minimum and maximum number of elements that will be
+// added to a non-nil map or slice.
+func (f *Filler) NumElements(min, max int) *Filler {
+	if min < 0 {
+		panic("Filler.NumElements: min must be >= 0")
+	}
+	if min > max {
+		panic("Filler.NumElements: min must be <= max")
+	}
+	f.minElements = min
+	f.maxElements = max
+	return f
+}
+
+func (f *Filler) genElementCount() int {
+	if f.minElements == f.maxElements {
+		return f.minElements
+	}
+	return f.minElements + f.r.Intn(f.maxElements-f.minElements+1)
+}
+
+func (f *Filler) genShouldFill() bool {
+	return f.r.Float64() >= f.nilChance
+}
+
+// MaxDepth sets the maximum number of recursive fill calls that will be made
+// before stopping.  This includes struct members, pointers, and map and slice
+// elements.
+func (f *Filler) MaxDepth(d int) *Filler {
+	f.maxDepth = d
+	return f
+}
+
+// AllowUnexportedFields defines whether to fill unexported fields.
+func (f *Filler) AllowUnexportedFields(flag bool) *Filler {
+	f.allowUnexportedFields = flag
+	return f
+}
+
+// SkipFieldsWithPattern tells this Filler to skip any field whose name matches
+// the supplied pattern. Call this multiple times if needed. This is useful to
+// skip XXX_ fields generated by protobuf.
+func (f *Filler) SkipFieldsWithPattern(pattern *regexp.Regexp) *Filler {
+	f.skipFieldPatterns = append(f.skipFieldPatterns, pattern)
+	return f
+}
+
+// SimpleSelfFiller represents an object that knows how to randfill itself.
+//
+// Unlike NativeSelfFiller, this interface does not cause the type in question
+// to depend on the randfill package.  This is most useful for simple types.  For
+// more complex types, consider using NativeSelfFiller.
+type SimpleSelfFiller interface {
+	// RandFill fills the current object with random data.
+	RandFill(r *rand.Rand)
+}
+
+// NativeSelfFiller represents an object that knows how to randfill itself.
+//
+// Unlike SimpleSelfFiller, this interface allows for recursive filling of
+// child objects with the same rules as the parent Filler.
+type NativeSelfFiller interface {
+	// RandFill fills the current object with random data.
+	RandFill(c Continue)
+}
+
+// Fill recursively fills all of obj's fields with something random.  First
+// this tries to find a custom fill function (see Funcs).  If there is no
+// custom function, this tests whether the object implements SimpleSelfFiller
+// or NativeSelfFiller and if so, calls RandFill on it to fill itself.  If that
+// fails, this will see if there is a default fill function provided by this
+// package. If all of that fails, this will generate random values for all
+// primitive fields and then recurse for all non-primitives.
+//
+// This is safe for cyclic or tree-like structs, up to a limit.  Use the
+// MaxDepth method to adjust how deep you need it to recurse.
+//
+// obj must be a pointer. Exported (public) fields can always be set, and if
+// the AllowUnexportedFields() modifier was called it can try to set unexported
+// (private) fields, too.
+//
+// This is intended for tests, so will panic on bad input or unimplemented
+// types.  This method takes a lock for the whole Filler, so it is not
+// reentrant.  See Continue.
+func (f *Filler) Fill(obj interface{}) {
+	f.lock.Lock()
+	defer f.lock.Unlock()
+
+	v := reflect.ValueOf(obj)
+	if v.Kind() != reflect.Ptr {
+		panic("Filler.Fill: obj must be a pointer")
+	}
+	v = v.Elem()
+	f.fillWithContext(v, 0)
+}
+
+// FillNoCustom is just like Fill, except that any custom fill function for
+// obj's type will not be called and obj will not be tested for
+// SimpleSelfFiller or NativeSelfFiller. This applies only to obj and not other
+// instances of obj's type or to obj's child fields.
+//
+// obj must be a pointer. Exported (public) fields can always be set, and if
+// the AllowUnexportedFields() modifier was called it can try to set unexported
+// (private) fields, too.
+//
+// This is intended for tests, so will panic on bad input or unimplemented
+// types.  This method takes a lock for the whole Filler, so it is not
+// reentrant.  See Continue.
+func (f *Filler) FillNoCustom(obj interface{}) {
+	f.lock.Lock()
+	defer f.lock.Unlock()
+
+	v := reflect.ValueOf(obj)
+	if v.Kind() != reflect.Ptr {
+		panic("Filler.FillNoCustom: obj must be a pointer")
+	}
+	v = v.Elem()
+	f.fillWithContext(v, flagNoCustomFill)
+}
+
+const (
+	// Do not try to find a custom fill function.  Does not apply recursively.
+	flagNoCustomFill uint64 = 1 << iota
+)
+
+func (f *Filler) fillWithContext(v reflect.Value, flags uint64) {
+	fc := &fillerContext{filler: f}
+	fc.doFill(v, flags)
+}
+
+// fillerContext carries context about a single filling run, which lets Filler
+// be thread-safe.
+type fillerContext struct {
+	filler   *Filler
+	curDepth int
+}
+
+func (fc *fillerContext) doFill(v reflect.Value, flags uint64) {
+	if fc.curDepth >= fc.filler.maxDepth {
+		return
+	}
+	fc.curDepth++
+	defer func() { fc.curDepth-- }()
+
+	if !v.CanSet() {
+		if !fc.filler.allowUnexportedFields || !v.CanAddr() {
+			return
+		}
+		v = reflect.NewAt(v.Type(), unsafe.Pointer(v.UnsafeAddr())).Elem()
+	}
+
+	if flags&flagNoCustomFill == 0 {
+		// Check for both pointer and non-pointer custom functions.
+		if v.CanAddr() && fc.tryCustom(v.Addr()) {
+			return
+		}
+		if fc.tryCustom(v) {
+			return
+		}
+	}
+
+	if fn, ok := fillFuncMap[v.Kind()]; ok {
+		fn(v, fc.filler.r)
+		return
+	}
+
+	switch v.Kind() {
+	case reflect.Map:
+		if fc.filler.genShouldFill() {
+			v.Set(reflect.MakeMap(v.Type()))
+			n := fc.filler.genElementCount()
+			for i := 0; i < n; i++ {
+				key := reflect.New(v.Type().Key()).Elem()
+				fc.doFill(key, 0)
+				val := reflect.New(v.Type().Elem()).Elem()
+				fc.doFill(val, 0)
+				v.SetMapIndex(key, val)
+			}
+			return
+		}
+		v.Set(reflect.Zero(v.Type()))
+	case reflect.Ptr:
+		if fc.filler.genShouldFill() {
+			v.Set(reflect.New(v.Type().Elem()))
+			fc.doFill(v.Elem(), 0)
+			return
+		}
+		v.Set(reflect.Zero(v.Type()))
+	case reflect.Slice:
+		if fc.filler.genShouldFill() {
+			n := fc.filler.genElementCount()
+			v.Set(reflect.MakeSlice(v.Type(), n, n))
+			for i := 0; i < n; i++ {
+				fc.doFill(v.Index(i), 0)
+			}
+			return
+		}
+		v.Set(reflect.Zero(v.Type()))
+	case reflect.Array:
+		if fc.filler.genShouldFill() {
+			n := v.Len()
+			for i := 0; i < n; i++ {
+				fc.doFill(v.Index(i), 0)
+			}
+			return
+		}
+		v.Set(reflect.Zero(v.Type()))
+	case reflect.Struct:
+		for i := 0; i < v.NumField(); i++ {
+			skipField := false
+			fieldName := v.Type().Field(i).Name
+			for _, pattern := range fc.filler.skipFieldPatterns {
+				if pattern.MatchString(fieldName) {
+					skipField = true
+					break
+				}
+			}
+			if !skipField {
+				fc.doFill(v.Field(i), 0)
+			}
+		}
+	case reflect.Chan:
+		fallthrough
+	case reflect.Func:
+		fallthrough
+	case reflect.Interface:
+		fallthrough
+	default:
+		panic(fmt.Sprintf("can't fill type %v, kind %v", v.Type(), v.Kind()))
+	}
+}
+
+// tryCustom searches for custom handlers, and returns true iff it finds a match
+// and successfully randomizes v.
+func (fc *fillerContext) tryCustom(v reflect.Value) bool {
+	// First: see if we have a fill function for it.
+	doCustom, ok := fc.filler.customFuncs[v.Type()]
+	if !ok {
+		// Second: see if it can fill itself.
+		if v.CanInterface() {
+			intf := v.Interface()
+			if fillable, ok := intf.(SimpleSelfFiller); ok {
+				fillable.RandFill(fc.filler.r)
+				return true
+			}
+			if fillable, ok := intf.(NativeSelfFiller); ok {
+				fillable.RandFill(Continue{fc: fc, Rand: fc.filler.r})
+				return true
+			}
+		}
+		// Finally: see if there is a default fill function.
+		doCustom, ok = fc.filler.defaultFuncs[v.Type()]
+		if !ok {
+			return false
+		}
+	}
+
+	switch v.Kind() {
+	case reflect.Ptr:
+		if v.IsNil() {
+			if !v.CanSet() {
+				return false
+			}
+			v.Set(reflect.New(v.Type().Elem()))
+		}
+	case reflect.Map:
+		if v.IsNil() {
+			if !v.CanSet() {
+				return false
+			}
+			v.Set(reflect.MakeMap(v.Type()))
+		}
+	default:
+		return false
+	}
+
+	doCustom.Call([]reflect.Value{
+		v,
+		reflect.ValueOf(Continue{
+			fc:   fc,
+			Rand: fc.filler.r,
+		}),
+	})
+	return true
+}
+
+// Continue can be passed to custom fill functions to allow them to use
+// the correct source of randomness and to continue filling their members.
+type Continue struct {
+	fc *fillerContext
+
+	// For convenience, Continue implements rand.Rand via embedding.
+	// Use this for generating any randomness if you want your filling
+	// to be repeatable for a given seed.
+	*rand.Rand
+}
+
+// Fill continues filling obj. obj must be a pointer or a reflect.Value of a
+// pointer.  See Filler.Fill.
+func (c Continue) Fill(obj interface{}) {
+	v, ok := obj.(reflect.Value)
+	if !ok {
+		v = reflect.ValueOf(obj)
+	}
+	if v.Kind() != reflect.Ptr {
+		panic("Continue.Fill: obj must be a pointer")
+	}
+	v = v.Elem()
+	c.fc.doFill(v, 0)
+}
+
+// FillNoCustom continues filling obj, except that any custom fill function for
+// obj's type will not be called and obj will not be tested for
+// SimpleSelfFiller or NativeSelfFiller.  See Filler.FillNoCustom.
+func (c Continue) FillNoCustom(obj interface{}) {
+	v, ok := obj.(reflect.Value)
+	if !ok {
+		v = reflect.ValueOf(obj)
+	}
+	if v.Kind() != reflect.Ptr {
+		panic("Continue.FillNoCustom: obj must be a pointer")
+	}
+	v = v.Elem()
+	c.fc.doFill(v, flagNoCustomFill)
+}
+
+const defaultStringMaxLen = 20
+
+// String makes a random string up to n characters long. If n is 0, the default
+// size range is [0-20). The returned string may include a variety of (valid)
+// UTF-8 encodings.
+func (c Continue) String(n int) string {
+	return randString(c.Rand, n)
+}
+
+// Uint64 makes random 64 bit numbers.
+// Weirdly, rand doesn't have a function that gives you 64 random bits.
+func (c Continue) Uint64() uint64 {
+	return randUint64(c.Rand)
+}
+
+// Bool returns true or false randomly.
+func (c Continue) Bool() bool {
+	return randBool(c.Rand)
+}
+
+func fillInt(v reflect.Value, r *rand.Rand) {
+	v.SetInt(int64(randUint64(r)))
+}
+
+func fillUint(v reflect.Value, r *rand.Rand) {
+	v.SetUint(randUint64(r))
+}
+
+func randfillTime(t *time.Time, c Continue) {
+	var sec, nsec int64
+	// Allow for about 1000 years of random time values, which keeps things
+	// like JSON parsing reasonably happy.
+	sec = c.Rand.Int63n(1000 * 365 * 24 * 60 * 60)
+	// Nanosecond values greater than 1Bn are technically allowed but result in
+	// time.Time values with invalid timezone offsets.
+	nsec = c.Rand.Int63n(999999999)
+	*t = time.Unix(sec, nsec)
+}
+
+var fillFuncMap = map[reflect.Kind]func(reflect.Value, *rand.Rand){
+	reflect.Bool: func(v reflect.Value, r *rand.Rand) {
+		v.SetBool(randBool(r))
+	},
+	reflect.Int:     fillInt,
+	reflect.Int8:    fillInt,
+	reflect.Int16:   fillInt,
+	reflect.Int32:   fillInt,
+	reflect.Int64:   fillInt,
+	reflect.Uint:    fillUint,
+	reflect.Uint8:   fillUint,
+	reflect.Uint16:  fillUint,
+	reflect.Uint32:  fillUint,
+	reflect.Uint64:  fillUint,
+	reflect.Uintptr: fillUint,
+	reflect.Float32: func(v reflect.Value, r *rand.Rand) {
+		v.SetFloat(float64(r.Float32()))
+	},
+	reflect.Float64: func(v reflect.Value, r *rand.Rand) {
+		v.SetFloat(r.Float64())
+	},
+	reflect.Complex64: func(v reflect.Value, r *rand.Rand) {
+		v.SetComplex(complex128(complex(r.Float32(), r.Float32())))
+	},
+	reflect.Complex128: func(v reflect.Value, r *rand.Rand) {
+		v.SetComplex(complex(r.Float64(), r.Float64()))
+	},
+	reflect.String: func(v reflect.Value, r *rand.Rand) {
+		v.SetString(randString(r, 0))
+	},
+	reflect.UnsafePointer: func(v reflect.Value, r *rand.Rand) {
+		panic("filling of UnsafePointers is not implemented")
+	},
+}
+
+// randBool returns true or false randomly.
+func randBool(r *rand.Rand) bool {
+	return r.Int31()&(1<<30) == 0
+}
+
+type int63nPicker interface {
+	Int63n(int64) int64
+}
+
+// UnicodeRange describes a sequential range of unicode characters.
+// Last must be numerically greater than First.
+type UnicodeRange struct {
+	First, Last rune
+}
+
+// UnicodeRanges describes an arbitrary number of sequential ranges of unicode characters.
+// To be useful, each range must have at least one character (First <= Last) and
+// there must be at least one range.
+type UnicodeRanges []UnicodeRange
+
+// choose returns a random unicode character from the given range, using the
+// given randomness source.
+func (ur UnicodeRange) choose(r int63nPicker) rune {
+	count := int64(ur.Last - ur.First + 1)
+	return ur.First + rune(r.Int63n(count))
+}
+
+// CustomStringFillFunc constructs a FillFunc which produces random strings.
+// Each character is selected from the range ur. If there are no characters
+// in the range (cr.Last < cr.First), this will panic.
+func (ur UnicodeRange) CustomStringFillFunc(n int) func(s *string, c Continue) {
+	ur.check()
+	return func(s *string, c Continue) {
+		*s = ur.randString(c.Rand, n)
+	}
+}
+
+// check is a function that used to check whether the first of ur(UnicodeRange)
+// is greater than the last one.
+func (ur UnicodeRange) check() {
+	if ur.Last < ur.First {
+		panic("UnicodeRange.check: the last encoding must be greater than the first")
+	}
+}
+
+// randString of UnicodeRange makes a random string up to 20 characters long.
+// Each character is selected form ur(UnicodeRange).
+func (ur UnicodeRange) randString(r *rand.Rand, max int) string {
+	if max == 0 {
+		max = defaultStringMaxLen
+	}
+	n := r.Intn(max)
+	sb := strings.Builder{}
+	sb.Grow(n)
+	for i := 0; i < n; i++ {
+		sb.WriteRune(ur.choose(r))
+	}
+	return sb.String()
+}
+
+// defaultUnicodeRanges sets a default unicode range when users do not set
+// CustomStringFillFunc() but want to fill strings.
+var defaultUnicodeRanges = UnicodeRanges{
+	{' ', '~'},           // ASCII characters
+	{'\u00a0', '\u02af'}, // Multi-byte encoded characters
+	{'\u4e00', '\u9fff'}, // Common CJK (even longer encodings)
+}
+
+// CustomStringFillFunc constructs a FillFunc which produces random strings.
+// Each character is selected from one of the ranges of ur(UnicodeRanges).
+// Each range has an equal probability of being chosen. If there are no ranges,
+// or a selected range has no characters (.Last < .First), this will panic.
+// Do not modify any of the ranges in ur after calling this function.
+func (ur UnicodeRanges) CustomStringFillFunc(n int) func(s *string, c Continue) {
+	// Check unicode ranges slice is empty.
+	if len(ur) == 0 {
+		panic("UnicodeRanges is empty")
+	}
+	// if not empty, each range should be checked.
+	for i := range ur {
+		ur[i].check()
+	}
+	return func(s *string, c Continue) {
+		*s = ur.randString(c.Rand, n)
+	}
+}
+
+// randString of UnicodeRanges makes a random string up to 20 characters long.
+// Each character is selected form one of the ranges of ur(UnicodeRanges),
+// and each range has an equal probability of being chosen.
+func (ur UnicodeRanges) randString(r *rand.Rand, max int) string {
+	if max == 0 {
+		max = defaultStringMaxLen
+	}
+	n := r.Intn(max)
+	sb := strings.Builder{}
+	sb.Grow(n)
+	for i := 0; i < n; i++ {
+		sb.WriteRune(ur[r.Intn(len(ur))].choose(r))
+	}
+	return sb.String()
+}
+
+// randString makes a random string up to 20 characters long. The returned string
+// may include a variety of (valid) UTF-8 encodings.
+func randString(r *rand.Rand, max int) string {
+	return defaultUnicodeRanges.randString(r, max)
+}
+
+// randUint64 makes random 64 bit numbers.
+// Weirdly, rand doesn't have a function that gives you 64 random bits.
+func randUint64(r *rand.Rand) uint64 {
+	return uint64(r.Uint32())<<32 | uint64(r.Uint32())
+}
diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/merge/update.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/merge/update.go
index 34ab2d6fb4f06d6d26753bef22e753709f220e4b..455818ff85873f82a7dfaf482bbc92476dd08434 100644
--- a/vendor/sigs.k8s.io/structured-merge-diff/v4/merge/update.go
+++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/merge/update.go
@@ -33,6 +33,9 @@ type UpdaterBuilder struct {
 	Converter    Converter
 	IgnoreFilter map[fieldpath.APIVersion]fieldpath.Filter
 
+	// IgnoredFields provides a set of fields to ignore for each
+	IgnoredFields map[fieldpath.APIVersion]*fieldpath.Set
+
 	// Stop comparing the new object with old object after applying.
 	// This was initially used to avoid spurious etcd update, but
 	// since that's vastly inefficient, we've come-up with a better
@@ -46,6 +49,7 @@ func (u *UpdaterBuilder) BuildUpdater() *Updater {
 	return &Updater{
 		Converter:         u.Converter,
 		IgnoreFilter:      u.IgnoreFilter,
+		IgnoredFields:     u.IgnoredFields,
 		returnInputOnNoop: u.ReturnInputOnNoop,
 	}
 }
@@ -56,6 +60,9 @@ type Updater struct {
 	// Deprecated: This will eventually become private.
 	Converter Converter
 
+	// Deprecated: This will eventually become private.
+	IgnoredFields map[fieldpath.APIVersion]*fieldpath.Set
+
 	// Deprecated: This will eventually become private.
 	IgnoreFilter map[fieldpath.APIVersion]fieldpath.Filter
 
@@ -70,8 +77,19 @@ func (s *Updater) update(oldObject, newObject *typed.TypedValue, version fieldpa
 		return nil, nil, fmt.Errorf("failed to compare objects: %v", err)
 	}
 
-	versions := map[fieldpath.APIVersion]*typed.Comparison{
-		version: compare.FilterFields(s.IgnoreFilter[version]),
+	var versions map[fieldpath.APIVersion]*typed.Comparison
+
+	if s.IgnoredFields != nil && s.IgnoreFilter != nil {
+		return nil, nil, fmt.Errorf("IgnoreFilter and IgnoreFilter may not both be set")
+	}
+	if s.IgnoredFields != nil {
+		versions = map[fieldpath.APIVersion]*typed.Comparison{
+			version: compare.ExcludeFields(s.IgnoredFields[version]),
+		}
+	} else {
+		versions = map[fieldpath.APIVersion]*typed.Comparison{
+			version: compare.FilterFields(s.IgnoreFilter[version]),
+		}
 	}
 
 	for manager, managerSet := range managers {
@@ -101,7 +119,12 @@ func (s *Updater) update(oldObject, newObject *typed.TypedValue, version fieldpa
 			if err != nil {
 				return nil, nil, fmt.Errorf("failed to compare objects: %v", err)
 			}
-			versions[managerSet.APIVersion()] = compare.FilterFields(s.IgnoreFilter[managerSet.APIVersion()])
+
+			if s.IgnoredFields != nil {
+				versions[managerSet.APIVersion()] = compare.ExcludeFields(s.IgnoredFields[managerSet.APIVersion()])
+			} else {
+				versions[managerSet.APIVersion()] = compare.FilterFields(s.IgnoreFilter[managerSet.APIVersion()])
+			}
 		}
 
 		conflictSet := managerSet.Set().Intersection(compare.Modified.Union(compare.Added))
@@ -154,7 +177,16 @@ func (s *Updater) Update(liveObject, newObject *typed.TypedValue, version fieldp
 		managers[manager] = fieldpath.NewVersionedSet(fieldpath.NewSet(), version, false)
 	}
 	set := managers[manager].Set().Difference(compare.Removed).Union(compare.Modified).Union(compare.Added)
-	ignoreFilter := s.IgnoreFilter[version]
+
+	if s.IgnoredFields != nil && s.IgnoreFilter != nil {
+		return nil, nil, fmt.Errorf("IgnoreFilter and IgnoreFilter may not both be set")
+	}
+	var ignoreFilter fieldpath.Filter
+	if s.IgnoredFields != nil {
+		ignoreFilter = fieldpath.NewExcludeSetFilter(s.IgnoredFields[version])
+	} else {
+		ignoreFilter = s.IgnoreFilter[version]
+	}
 	if ignoreFilter != nil {
 		set = ignoreFilter.Filter(set)
 	}
@@ -189,7 +221,15 @@ func (s *Updater) Apply(liveObject, configObject *typed.TypedValue, version fiel
 		return nil, fieldpath.ManagedFields{}, fmt.Errorf("failed to get field set: %v", err)
 	}
 
-	ignoreFilter := s.IgnoreFilter[version]
+	if s.IgnoredFields != nil && s.IgnoreFilter != nil {
+		return nil, nil, fmt.Errorf("IgnoreFilter and IgnoreFilter may not both be set")
+	}
+	var ignoreFilter fieldpath.Filter
+	if s.IgnoredFields != nil {
+		ignoreFilter = fieldpath.NewExcludeSetFilter(s.IgnoredFields[version])
+	} else {
+		ignoreFilter = s.IgnoreFilter[version]
+	}
 	if ignoreFilter != nil {
 		set = ignoreFilter.Filter(set)
 	}
diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/typed.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/typed.go
index 9be902828062eda4a3517db93e23824ea309ab00..7edaa6d4892000fabfa81174b532b3172f816068 100644
--- a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/typed.go
+++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/typed.go
@@ -32,6 +32,21 @@ const (
 	AllowDuplicates ValidationOptions = iota
 )
 
+// extractItemsOptions is the options available when extracting items.
+type extractItemsOptions struct {
+	appendKeyFields bool
+}
+
+type ExtractItemsOption func(*extractItemsOptions)
+
+// WithAppendKeyFields configures ExtractItems to include key fields.
+// It is exported for use in configuring ExtractItems.
+func WithAppendKeyFields() ExtractItemsOption {
+	return func(opts *extractItemsOptions) {
+		opts.appendKeyFields = true
+	}
+}
+
 // AsTyped accepts a value and a type and returns a TypedValue. 'v' must have
 // type 'typeName' in the schema. An error is returned if the v doesn't conform
 // to the schema.
@@ -187,7 +202,37 @@ func (tv TypedValue) RemoveItems(items *fieldpath.Set) *TypedValue {
 }
 
 // ExtractItems returns a value with only the provided list or map items extracted from the value.
-func (tv TypedValue) ExtractItems(items *fieldpath.Set) *TypedValue {
+func (tv TypedValue) ExtractItems(items *fieldpath.Set, opts ...ExtractItemsOption) *TypedValue {
+	options := &extractItemsOptions{}
+	for _, opt := range opts {
+		opt(options)
+	}
+	if options.appendKeyFields {
+		tvPathSet, err := tv.ToFieldSet()
+		if err == nil {
+			keyFieldPathSet := fieldpath.NewSet()
+			items.Iterate(func(path fieldpath.Path) {
+				if !tvPathSet.Has(path) {
+					return
+				}
+				for i, pe := range path {
+					if pe.Key == nil {
+						continue
+					}
+					for _, keyField := range *pe.Key {
+						keyName := keyField.Name
+						// Create a new slice with the same elements as path[:i+1], but set its capacity to len(path[:i+1]).
+						// This ensures that appending to keyFieldPath creates a new underlying array, avoiding accidental
+						// modification of the original slice (path).
+						keyFieldPath := append(path[:i+1:i+1], fieldpath.PathElement{FieldName: &keyName})
+						keyFieldPathSet.Insert(keyFieldPath)
+					}
+				}
+			})
+			items = items.Union(keyFieldPathSet)
+		}
+	}
+
 	tv.value = removeItemsWithSchema(tv.value, items, tv.schema, tv.typeRef, true)
 	return &tv
 }
diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/validate.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/validate.go
index 652e24c819a00ec30dd0e75b3de27634274b9bbd..c38234c5ab968670b1c1de5cf76b1f02e1b0d248 100644
--- a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/validate.go
+++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/validate.go
@@ -157,7 +157,7 @@ func (v *validatingObjectWalker) visitListItems(t *schema.List, list value.List)
 func (v *validatingObjectWalker) doList(t *schema.List) (errs ValidationErrors) {
 	list, err := listValue(v.allocator, v.value)
 	if err != nil {
-		return errorf(err.Error())
+		return errorf("%v", err)
 	}
 
 	if list == nil {
@@ -193,7 +193,7 @@ func (v *validatingObjectWalker) visitMapItems(t *schema.Map, m value.Map) (errs
 func (v *validatingObjectWalker) doMap(t *schema.Map) (errs ValidationErrors) {
 	m, err := mapValue(v.allocator, v.value)
 	if err != nil {
-		return errorf(err.Error())
+		return errorf("%v", err)
 	}
 	if m == nil {
 		return nil
diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/value/jsontagutil.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/value/jsontagutil.go
index d4adb8fc9d25d9415c03eb2c41c37370b0db356b..3aadceb2226280c8df65421d14d4bdc9da473859 100644
--- a/vendor/sigs.k8s.io/structured-merge-diff/v4/value/jsontagutil.go
+++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/value/jsontagutil.go
@@ -22,22 +22,77 @@ import (
 	"strings"
 )
 
+type isZeroer interface {
+	IsZero() bool
+}
+
+var isZeroerType = reflect.TypeOf((*isZeroer)(nil)).Elem()
+
+func reflectIsZero(dv reflect.Value) bool {
+	return dv.IsZero()
+}
+
+// OmitZeroFunc returns a function for a type for a given struct field
+// which determines if the value for that field is a zero value, matching
+// how the stdlib JSON implementation.
+func OmitZeroFunc(t reflect.Type) func(reflect.Value) bool {
+	// Provide a function that uses a type's IsZero method.
+	// This matches the go 1.24 custom IsZero() implementation matching
+	switch {
+	case t.Kind() == reflect.Interface && t.Implements(isZeroerType):
+		return func(v reflect.Value) bool {
+			// Avoid panics calling IsZero on a nil interface or
+			// non-nil interface with nil pointer.
+			return safeIsNil(v) ||
+				(v.Elem().Kind() == reflect.Pointer && v.Elem().IsNil()) ||
+				v.Interface().(isZeroer).IsZero()
+		}
+	case t.Kind() == reflect.Pointer && t.Implements(isZeroerType):
+		return func(v reflect.Value) bool {
+			// Avoid panics calling IsZero on nil pointer.
+			return safeIsNil(v) || v.Interface().(isZeroer).IsZero()
+		}
+	case t.Implements(isZeroerType):
+		return func(v reflect.Value) bool {
+			return v.Interface().(isZeroer).IsZero()
+		}
+	case reflect.PointerTo(t).Implements(isZeroerType):
+		return func(v reflect.Value) bool {
+			if !v.CanAddr() {
+				// Temporarily box v so we can take the address.
+				v2 := reflect.New(v.Type()).Elem()
+				v2.Set(v)
+				v = v2
+			}
+			return v.Addr().Interface().(isZeroer).IsZero()
+		}
+	default:
+		// default to the reflect.IsZero implementation
+		return reflectIsZero
+	}
+}
+
 // TODO: This implements the same functionality as https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/runtime/converter.go#L236
 // but is based on the highly efficient approach from https://golang.org/src/encoding/json/encode.go
 
-func lookupJsonTags(f reflect.StructField) (name string, omit bool, inline bool, omitempty bool) {
+func lookupJsonTags(f reflect.StructField) (name string, omit bool, inline bool, omitempty bool, omitzero func(reflect.Value) bool) {
 	tag := f.Tag.Get("json")
 	if tag == "-" {
-		return "", true, false, false
+		return "", true, false, false, nil
 	}
 	name, opts := parseTag(tag)
 	if name == "" {
 		name = f.Name
 	}
-	return name, false, opts.Contains("inline"), opts.Contains("omitempty")
+
+	if opts.Contains("omitzero") {
+		omitzero = OmitZeroFunc(f.Type)
+	}
+
+	return name, false, opts.Contains("inline"), opts.Contains("omitempty"), omitzero
 }
 
-func isZero(v reflect.Value) bool {
+func isEmpty(v reflect.Value) bool {
 	switch v.Kind() {
 	case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
 		return v.Len() == 0
diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/value/reflectcache.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/value/reflectcache.go
index 88693b87e8fc5e929e38bb205eecdb21edfde8cc..3b4a402ee1ab8a3b39ac7e1e72da747fa18f1826 100644
--- a/vendor/sigs.k8s.io/structured-merge-diff/v4/value/reflectcache.go
+++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/value/reflectcache.go
@@ -59,6 +59,8 @@ type FieldCacheEntry struct {
 	JsonName string
 	// isOmitEmpty is true if the field has the json 'omitempty' tag.
 	isOmitEmpty bool
+	// omitzero is set if the field has the json 'omitzero' tag.
+	omitzero func(reflect.Value) bool
 	// fieldPath is a list of field indices (see FieldByIndex) to lookup the value of
 	// a field in a reflect.Value struct. The field indices in the list form a path used
 	// to traverse through intermediary 'inline' fields.
@@ -69,7 +71,13 @@ type FieldCacheEntry struct {
 }
 
 func (f *FieldCacheEntry) CanOmit(fieldVal reflect.Value) bool {
-	return f.isOmitEmpty && (safeIsNil(fieldVal) || isZero(fieldVal))
+	if f.isOmitEmpty && (safeIsNil(fieldVal) || isEmpty(fieldVal)) {
+		return true
+	}
+	if f.omitzero != nil && f.omitzero(fieldVal) {
+		return true
+	}
+	return false
 }
 
 // GetFrom returns the field identified by this FieldCacheEntry from the provided struct.
@@ -147,7 +155,7 @@ func typeReflectEntryOf(cm reflectCacheMap, t reflect.Type, updates reflectCache
 func buildStructCacheEntry(t reflect.Type, infos map[string]*FieldCacheEntry, fieldPath [][]int) {
 	for i := 0; i < t.NumField(); i++ {
 		field := t.Field(i)
-		jsonName, omit, isInline, isOmitempty := lookupJsonTags(field)
+		jsonName, omit, isInline, isOmitempty, omitzero := lookupJsonTags(field)
 		if omit {
 			continue
 		}
@@ -161,7 +169,7 @@ func buildStructCacheEntry(t reflect.Type, infos map[string]*FieldCacheEntry, fi
 			}
 			continue
 		}
-		info := &FieldCacheEntry{JsonName: jsonName, isOmitEmpty: isOmitempty, fieldPath: append(fieldPath, field.Index), fieldType: field.Type}
+		info := &FieldCacheEntry{JsonName: jsonName, isOmitEmpty: isOmitempty, omitzero: omitzero, fieldPath: append(fieldPath, field.Index), fieldType: field.Type}
 		infos[jsonName] = info
 	}
 }
diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/value/scalar.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/value/scalar.go
index c78a4c18d122bccc813040ddaee0ff6acd8e3c2c..5824219e5136d85749df8d480c0fc9f081b24645 100644
--- a/vendor/sigs.k8s.io/structured-merge-diff/v4/value/scalar.go
+++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/value/scalar.go
@@ -43,7 +43,7 @@ func IntCompare(lhs, rhs int64) int {
 func BoolCompare(lhs, rhs bool) int {
 	if lhs == rhs {
 		return 0
-	} else if lhs == false {
+	} else if !lhs {
 		return -1
 	}
 	return 1